Compare commits

..

3 Commits

Author SHA1 Message Date
leewei
d9ef728427 Fix gradle depends 2023-02-20 17:24:26 +08:00
leewei
f528567f5d Remove invalid file 2023-02-14 14:21:36 +08:00
leewei
7008677947 Add km module kafka gateway 2023-02-14 11:10:58 +08:00
4828 changed files with 778344 additions and 95159 deletions

19
.gitignore vendored
View File

@@ -5,6 +5,7 @@
## Directory-based project format:
.idea/
.gradle/
# if you remove the above rule, at least ignore the following:
# User-specific stuff:
@@ -27,11 +28,12 @@
## File-based project format:
*.ipr
*.iws
*.iml
## Plugin-specific files:
# IntelliJ
/out/
build/
# mpeltonen/sbt-idea plugin
.idea_modules/
@@ -56,7 +58,6 @@ fabric.properties
*.jar
*.war
*.ear
*.tar.gz
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
hs_err_pid*
@@ -100,13 +101,13 @@ target/
*/velocity.log*
*/*.log
*/*.log.*
node_modules/
node_modules/*
web/node_modules/
web/node_modules/*
workspace.xml
/output/*
.gitversion
out/*
dist/
dist/*
km-rest/src/main/resources/templates/
*dependency-reduced-pom*
*/node_modules/*
*/templates/*
*/out/*
*/dist/*
.DS_Store

View File

@@ -0,0 +1,40 @@
#!/bin/bash
SERVICE_PATH="/home/xiaoju/${APPNAME}"
#nginx logs ln
if [ ! -L /home/xiaoju/nginx/logs ]; then
rm -rf /home/xiaoju/nginx/logs
mkdir -p /home/xiaoju/data1/nginx-logs && \
ln -s /home/xiaoju/data1/nginx-logs /home/xiaoju/nginx/logs
fi
if [ -f "/home/xiaoju/$APPNAME/.deploy/service.json" ]; then
# cp service.json for nginx metric collect.
su xiaoju -c "mkdir -p /home/xiaoju/nginx/.deploy && cp /home/xiaoju/$APPNAME/.deploy/service.json /home/xiaoju/nginx/.deploy"
fi
#tomcat logs ln
if [ ! -L /home/xiaoju/tomcat/logs ]; then
rm -rf /home/xiaoju/tomcat/logs
mkdir -p /home/xiaoju/data1/tomcat-logs && \
ln -s /home/xiaoju/data1/tomcat-logs /home/xiaoju/tomcat/logs
fi
#application logs ln
if [ ! -L /home/xiaoju/${APPNAME}/logs ]; then
mkdir -p /home/xiaoju/data1/${APPNAME}-logs && \
ln -s /home/xiaoju/data1/${APPNAME}-logs /home/xiaoju/${APPNAME}/logs
fi
if [ ! -L /data1 ]; then
ln -s /home/xiaoju/data1 /data1
fi
chown -R xiaoju.xiaoju /home/xiaoju/data1/
chown -R xiaoju.xiaoju /data1/
mkdir -p '/etc/odin-super-agent/'; echo 'consul-client' >> /etc/odin-super-agent/agents.deny; /home/odin/super-agent/data/install/consul-client/current/control stop
su xiaoju -c "cd $SERVICE_PATH && bash -x ./control.sh start"
/usr/bin/monit -c /etc/monitrc

8
APP_META/990-stopapp.sh Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/bash
SERVICE_PATH="/home/xiaoju/${APPNAME}"
/usr/bin/monit stop all
su xiaoju -c "cd $SERVICE_PATH && ./control.sh stop"

25
APP_META/Dockerfile Normal file
View File

@@ -0,0 +1,25 @@
FROM registry.xiaojukeji.com/didionline/bigdatadatabus-didi-jdk11-tomcat-nginx-centos7:stable
MAINTAINER zhuyefeng <zhuyefeng@didichuxing.com>
ENV JAVA_HOME /usr/local/jdk-11.0.2
# TODO 设置模块名字
ENV APPNAME service-discovery
RUN mkdir -p /etc/container/prestop
ADD ./APP_META/nginx/conf/nginx.conf /home/xiaoju/nginx/conf/
ADD ./APP_META/monit/monitrc /etc/monitrc
#ADD ./APP_META/monit/nginx.cfg /etc/monit.d/
ADD ./APP_META/990-startapp.required.sh /etc/container/init/990-startapp.required.sh
ADD ./APP_META/990-stopapp.sh /etc/container/prestop/990-stopapp.sh
RUN mkdir -p /home/xiaoju/${APPNAME} && \
# TODO 如果tomcat容器应用需要下面这步
#mkdir -p /home/xiaoju/tomcat/webapps && \
chmod 0700 /etc/monitrc && \
chmod a+x /etc/container/init/990-startapp.required.sh && \
chmod a+x /etc/container/prestop/990-stopapp.sh
COPY ./home-xiaoju-${APPNAME} /home/xiaoju/${APPNAME}
# TODO 如果tomcat容器应用需要下面这步
#RUN ln -s /home/xiaoju/${APPNAME} /home/xiaoju/tomcat/webapps/

13
APP_META/monit/monitrc Executable file
View File

@@ -0,0 +1,13 @@
set daemon 10 # check services at 10 seconds intervals
set log syslog
set httpd port 2812 and
use address localhost # only accept connection from localhost
allow localhost # allow localhost to connect to the server and
allow admin:monit # require user 'admin' with password 'monit'
#with ssl { # enable SSL/TLS and set path to server certificate
# pemfile: /etc/ssl/certs/monit.pem
#}
include /etc/monit.d/*

View File

@@ -0,0 +1,127 @@
#user xiaoju xiaoju;
worker_rlimit_nofile 204800;
worker_processes 4;
error_log /home/xiaoju/nginx/logs/error.log;
pid /home/xiaoju/nginx/run/nginx.pid;
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
include /home/xiaoju/nginx/modules/*.conf;
events {
use epoll;
worker_connections 204800;
accept_mutex on;
accept_mutex_delay 5ms;
multi_accept on;
}
http {
include mime.types;
default_type application/octet-stream;
server_names_hash_bucket_size 128;
#server_tag off;
#server_info off;
server_tokens off;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
fastcgi_connect_timeout 5;
fastcgi_send_timeout 10;
fastcgi_read_timeout 10;
fastcgi_buffer_size 64k;
fastcgi_buffers 4 64k;
fastcgi_busy_buffers_size 128k;
fastcgi_temp_file_write_size 128k;
keepalive_timeout 60;
keepalive_requests 1024;
client_header_buffer_size 4k;
large_client_header_buffers 4 32k;
client_max_body_size 10m;
client_body_buffer_size 512k;
client_body_timeout 30;
client_header_timeout 10;
send_timeout 240;
proxy_connect_timeout 10s;
proxy_send_timeout 15s;
proxy_read_timeout 15s;
proxy_buffers 64 8k;
proxy_busy_buffers_size 128k;
proxy_temp_file_write_size 64k;
proxy_redirect off;
#proxy_upstream_tries 2;
proxy_next_upstream error invalid_header timeout http_502 http_504;
gzip on;
gzip_min_length 1k;
gzip_buffers 4 16k;
gzip_http_version 1.0;
gzip_comp_level 2;
gzip_types text/plain application/x-javascript text/css text/xml application/xml+css application/json text/javascript;
gzip_vary on;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Real-Port $remote_port;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_pass_header Server;
#operationid on;
#operationid_header didi-header-rid;
#operationid_eth eth0;
#proxy_set_header didi-header-rid $operationid;
log_format main '$server_addr\t$host\t'
'$remote_addr\t$http_x_forwarded_for\t'
'$time_local\t'
'$scheme\t$request\t'
'$status\t$upstream_status\t'
'$request_time\t$upstream_addr\t$upstream_response_time\t'
'$request_length\t$bytes_sent\t'
'$http_referer\t$http_cookie\t$http_user_agent\t'
'$limit_rate\t$http_didi_header_omgid\t$remote_port';
set_real_ip_from 10.0.0.0/8;
set_real_ip_from 100.64.0.0/10;
real_ip_header X-Real-IP;
server {
listen 8080 backlog=4096;
server_name localhost;
access_log logs/access.log main;
location = /status.do {
access_log off;
root /home/xiaoju/nginx/html;
}
location / {
root html;
index index.html index.htm;
if ( $args !~ '^\?' ){
proxy_pass http://127.0.0.1:8888;
}
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
include conf.d/*.conf;
# include servers/*.conf;
# include server_conf/*.conf;
# include upstream_conf/*.conf;
}

View File

@@ -1,28 +1,11 @@
# Contribution Guideline
## Contributing to Kafka
Thanks for considering to contribute this project. All issues and pull requests are highly appreciated.
*Before opening a pull request*, review the [Contributing](https://kafka.apache.org/contributing.html) and [Contributing Code Changes](https://cwiki.apache.org/confluence/display/KAFKA/Contributing+Code+Changes) pages.
## Pull Requests
It lists steps that are required before creating a PR.
Before sending pull request to this project, please read and follow guidelines below.
1. Branch: We only accept pull request on `dev` branch.
2. Coding style: Follow the coding style used in LogiKM.
3. Commit message: Use English and be aware of your spell.
4. Test: Make sure to test your code.
Add device mode, API version, related log, screenshots and other related information in your pull request if possible.
NOTE: We assume all your contribution can be licensed under the [Apache License 2.0](LICENSE).
## Issues
We love clearly described issues. :)
Following information can help us to resolve the issue faster.
* Device mode and hardware information.
* API version.
* Logs.
* Screenshots.
* Steps to reproduce the issue.
When you contribute code, you affirm that the contribution is your original work and that you
license the work to the project under the project's open source license. Whether or not you
state this explicitly, by submitting any copyrighted material via pull request, email, or
other means you agree to license the material under the project's open source license and
warrant that you have the legal authority to do so.

14
HEADER Normal file
View File

@@ -0,0 +1,14 @@
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

1057
LICENSE

File diff suppressed because it is too large Load Diff

8
NOTICE Normal file
View File

@@ -0,0 +1,8 @@
Apache Kafka
Copyright 2020 The Apache Software Foundation.
This product includes software developed at
The Apache Software Foundation (https://www.apache.org/).
This distribution has a binary dependency on jersey, which is available under the CDDL
License. The source code of jersey can be found at https://github.com/jersey/jersey/.

14
PULL_REQUEST_TEMPLATE.md Normal file
View File

@@ -0,0 +1,14 @@
*More detailed description of your change,
if necessary. The PR title and PR message become
the squashed commit message, so use a separate
comment to ping reviewers.*
*Summary of testing strategy (including rationale)
for the feature or bug fix. Unit and/or integration
tests are expected for any behaviour change and
system tests should be considered for larger changes.*
### Committer Checklist (excluded from commit message)
- [ ] Verify design and implementation
- [ ] Verify test coverage and CI build status
- [ ] Verify documentation (including upgrade notes)

285
README.md
View File

@@ -1,139 +1,220 @@
Apache Kafka
=================
See our [web site](https://kafka.apache.org) for details on the project.
<p align="center">
<img src="https://user-images.githubusercontent.com/71620349/185368586-aed82d30-1534-453d-86ff-ecfa9d0f35bd.png" width = "256" div align=center />
You need to have [Java](http://www.oracle.com/technetwork/java/javase/downloads/index.html) installed.
</p>
Java 8 should be used for building in order to support both Java 8 and Java 11 at runtime.
<p align="center">
<a href="https://knowstreaming.com">产品官网</a> |
<a href="https://github.com/didi/KnowStreaming/releases">下载地址</a> |
<a href="https://doc.knowstreaming.com/product">文档资源</a> |
<a href="https://demo.knowstreaming.com">体验环境</a>
</p>
Scala 2.12 is used by default, see below for how to use a different Scala version or all of the supported Scala versions.
<p align="center">
<!--最近一次提交时间-->
<a href="https://img.shields.io/github/last-commit/didi/KnowStreaming">
<img src="https://img.shields.io/github/last-commit/didi/KnowStreaming" alt="LastCommit">
</a>
### Build a jar and run it ###
./gradlew jar
<!--最新版本-->
<a href="https://github.com/didi/KnowStreaming/blob/master/LICENSE">
<img src="https://img.shields.io/github/v/release/didi/KnowStreaming" alt="License">
</a>
Follow instructions in https://kafka.apache.org/documentation.html#quickstart
<!--License信息-->
<a href="https://github.com/didi/KnowStreaming/blob/master/LICENSE">
<img src="https://img.shields.io/github/license/didi/KnowStreaming" alt="License">
</a>
### Build source jar ###
./gradlew srcJar
<!--Open-Issue-->
<a href="https://github.com/didi/KnowStreaming/issues">
<img src="https://img.shields.io/github/issues-raw/didi/KnowStreaming" alt="Issues">
</a>
### Build aggregated javadoc ###
./gradlew aggregatedJavadoc
<!--知识星球-->
<a href="https://z.didi.cn/5gSF9">
<img src="https://img.shields.io/badge/join-%E7%9F%A5%E8%AF%86%E6%98%9F%E7%90%83-red" alt="Slack">
</a>
### Build javadoc and scaladoc ###
./gradlew javadoc
./gradlew javadocJar # builds a javadoc jar for each module
./gradlew scaladoc
./gradlew scaladocJar # builds a scaladoc jar for each module
./gradlew docsJar # builds both (if applicable) javadoc and scaladoc jars for each module
</p>
### Run unit/integration tests ###
./gradlew test # runs both unit and integration tests
./gradlew unitTest
./gradlew integrationTest
### Force re-running tests without code change ###
./gradlew cleanTest test
./gradlew cleanTest unitTest
./gradlew cleanTest integrationTest
### Running a particular unit/integration test ###
./gradlew clients:test --tests RequestResponseTest
### Running a particular test method within a unit/integration test ###
./gradlew core:test --tests kafka.api.ProducerFailureHandlingTest.testCannotSendToInternalTopic
./gradlew clients:test --tests org.apache.kafka.clients.MetadataTest.testMetadataUpdateWaitTime
### Running a particular unit/integration test with log4j output ###
Change the log4j setting in either `clients/src/test/resources/log4j.properties` or `core/src/test/resources/log4j.properties`
./gradlew clients:test --tests RequestResponseTest
### Generating test coverage reports ###
Generate coverage reports for the whole project:
./gradlew reportCoverage
Generate coverage for a single module, i.e.:
./gradlew clients:reportCoverage
### Building a binary release gzipped tar ball ###
./gradlew clean releaseTarGz
The above command will fail if you haven't set up the signing key. To bypass signing the artifact, you can run:
./gradlew clean releaseTarGz -x signArchives
The release file can be found inside `./core/build/distributions/`.
### Cleaning the build ###
./gradlew clean
### Running a task with one of the Scala versions available (2.12.x or 2.13.x) ###
*Note that if building the jars with a version other than 2.12.x, you need to set the `SCALA_VERSION` variable or change it in `bin/kafka-run-class.sh` to run the quick start.*
You can pass either the major version (eg 2.12) or the full version (eg 2.12.7):
./gradlew -PscalaVersion=2.12 jar
./gradlew -PscalaVersion=2.12 test
./gradlew -PscalaVersion=2.12 releaseTarGz
### Running a task with all the scala versions enabled by default ###
Append `All` to the task name:
./gradlew testAll
./gradlew jarAll
./gradlew releaseTarGzAll
### Running a task for a specific project ###
This is for `core`, `examples` and `clients`
./gradlew core:jar
./gradlew core:test
### Listing all gradle tasks ###
./gradlew tasks
### Building IDE project ####
*Note that this is not strictly necessary (IntelliJ IDEA has good built-in support for Gradle projects, for example).*
./gradlew eclipse
./gradlew idea
The `eclipse` task has been configured to use `${project_dir}/build_eclipse` as Eclipse's build directory. Eclipse's default
build directory (`${project_dir}/bin`) clashes with Kafka's scripts directory and we don't use Gradle's build directory
to avoid known issues with this configuration.
### Publishing the jar for all version of Scala and for all projects to maven ###
./gradlew uploadArchivesAll
Please note for this to work you should create/update `${GRADLE_USER_HOME}/gradle.properties` (typically, `~/.gradle/gradle.properties`) and assign the following variables
mavenUrl=
mavenUsername=
mavenPassword=
signing.keyId=
signing.password=
signing.secretKeyRingFile=
### Publishing the streams quickstart archetype artifact to maven ###
For the Streams archetype project, one cannot use gradle to upload to maven; instead the `mvn deploy` command needs to be called at the quickstart folder:
cd streams/quickstart
mvn deploy
Please note for this to work you should create/update user maven settings (typically, `${USER_HOME}/.m2/settings.xml`) to assign the following variables
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
https://maven.apache.org/xsd/settings-1.0.0.xsd">
...
<servers>
...
<server>
<id>apache.snapshots.https</id>
<username>${maven_username}</username>
<password>${maven_password}</password>
</server>
<server>
<id>apache.releases.https</id>
<username>${maven_username}</username>
<password>${maven_password}</password>
</server>
...
</servers>
...
---
### Installing the jars to the local Maven repository ###
./gradlew installAll
### Building the test jar ###
./gradlew testJar
## `Know Streaming` 简介
### Determining how transitive dependencies are added ###
./gradlew core:dependencies --configuration runtime
`Know Streaming`是一套云原生的Kafka管控平台脱胎于众多互联网内部多年的Kafka运营实践经验专注于Kafka运维管控、监控告警、资源治理、多活容灾等核心场景。在用户体验、监控、运维管控上进行了平台化、可视化、智能化的建设提供一系列特色的功能极大地方便了用户和运维人员的日常使用让普通运维人员都能成为Kafka专家。整体具有以下特点
### Determining if any dependencies could be updated ###
./gradlew dependencyUpdates
- 👀 &nbsp;**零侵入、全覆盖**
- 无需侵入改造 `Apache Kafka` ,一键便能纳管 `0.10.x` ~ `3.x.x` 众多版本的Kafka包括 `ZK``Raft` 运行模式的版本,同时在兼容架构上具备良好的扩展性,帮助您提升集群管理水平;
### Running code quality checks ###
There are two code quality analysis tools that we regularly run, spotbugs and checkstyle.
- 🌪️ &nbsp;**零成本、界面化**
- 提炼高频 CLI 能力,设计合理的产品路径,提供清新美观的 GUI 界面,支持 Cluster、Broker、Topic、Group、Message、ACL 等组件 GUI 管理普通用户5分钟即可上手
#### Checkstyle ####
Checkstyle enforces a consistent coding style in Kafka.
You can run checkstyle using:
- 👏 &nbsp;**云原生、插件化**
- 基于云原生构建,具备水平扩展能力,只需要增加节点即可获取更强的采集及对外服务能力,提供众多可热插拔的企业级特性,覆盖可观测性生态整合、资源治理、多活容灾等核心场景;
./gradlew checkstyleMain checkstyleTest
- 🚀 &nbsp;**专业能力**
- 集群管理:支持集群一键纳管,健康分析、核心组件观测 等功能;
- 观测提升:多维度指标观测大盘、观测指标最佳实践 等功能;
- 异常巡检:集群多维度健康巡检、集群多维度健康分 等功能;
- 能力增强Topic扩缩副本、Topic副本迁移 等功能;
The checkstyle warnings will be found in `reports/checkstyle/reports/main.html` and `reports/checkstyle/reports/test.html` files in the
subproject build directories. They are also printed to the console. The build will fail if Checkstyle fails.
#### Spotbugs ####
Spotbugs uses static analysis to look for bugs in the code.
You can run spotbugs using:
&nbsp;
**产品图**
./gradlew spotbugsMain spotbugsTest -x test
<p align="center">
The spotbugs warnings will be found in `reports/spotbugs/main.html` and `reports/spotbugs/test.html` files in the subproject build
directories. Use -PxmlSpotBugsReport=true to generate an XML report instead of an HTML one.
<img src="http://img-ys011.didistatic.com/static/dc2img/do1_sPmS4SNLX9m1zlpmHaLJ" width = "768" height = "473" div align=center />
### Common build options ###
</p>
The following options should be set with a `-P` switch, for example `./gradlew -PmaxParallelForks=1 test`.
* `commitId`: sets the build commit ID as .git/HEAD might not be correct if there are local commits added for build purposes.
* `mavenUrl`: sets the URL of the maven deployment repository (`file://path/to/repo` can be used to point to a local repository).
* `maxParallelForks`: limits the maximum number of processes for each task.
* `showStandardStreams`: shows standard out and standard error of the test JVM(s) on the console.
* `skipSigning`: skips signing of artifacts.
* `testLoggingEvents`: unit test events to be logged, separated by comma. For example `./gradlew -PtestLoggingEvents=started,passed,skipped,failed test`.
* `xmlSpotBugsReport`: enable XML reports for spotBugs. This also disables HTML reports as only one can be enabled at a time.
### Dependency Analysis ###
The gradle [dependency debugging documentation](https://docs.gradle.org/current/userguide/viewing_debugging_dependencies.html) mentions using the `dependencies` or `dependencyInsight` tasks to debug dependencies for the root project or individual subprojects.
## 文档资源
Alternatively, use the `allDeps` or `allDepInsight` tasks for recursively iterating through all subprojects:
**`开发相关手册`**
./gradlew allDeps
- [打包编译手册](docs/install_guide/源码编译打包手册.md)
- [单机部署手册](docs/install_guide/单机部署手册.md)
- [版本升级手册](docs/install_guide/版本升级手册.md)
- [本地源码启动手册](docs/dev_guide/本地源码启动手册.md)
./gradlew allDepInsight --configuration runtime --dependency com.fasterxml.jackson.core:jackson-databind
**`产品相关手册`**
These take the same arguments as the builtin variants.
- [产品使用指南](docs/user_guide/用户使用手册.md)
- [2.x与3.x新旧对比手册](docs/user_guide/新旧对比手册.md)
- [FAQ](docs/user_guide/faq.md)
### Running system tests ###
See [tests/README.md](tests/README.md).
**点击 [这里](https://doc.knowstreaming.com/product),也可以从官网获取到更多文档**
### Running in Vagrant ###
See [vagrant/README.md](vagrant/README.md).
### Contribution ###
Apache Kafka is interested in building the community; we would welcome any thoughts or [patches](https://issues.apache.org/jira/browse/KAFKA). You can reach us [on the Apache mailing lists](http://kafka.apache.org/contact.html).
## 成为社区贡献者
点击 [这里](CONTRIBUTING.md),了解如何成为 Know Streaming 的贡献者
## 加入技术交流群
**`1、知识星球`**
<p align="left">
<img src="https://user-images.githubusercontent.com/71620349/185357284-fdff1dad-c5e9-4ddf-9a82-0be1c970980d.JPG" height = "180" div align=left />
</p>
<br/>
<br/>
<br/>
<br/>
<br/>
<br/>
<br/>
<br/>
👍 我们正在组建国内最大,最权威的 **[Kafka中文社区](https://z.didi.cn/5gSF9)**
在这里你可以结交各大互联网的 Kafka大佬 以及 4000+ Kafka爱好者一起实现知识共享实时掌控最新行业资讯期待 👏 &nbsp; 您的加入中~ https://z.didi.cn/5gSF9
有问必答~ 互动有礼~
PS: 提问请尽量把问题一次性描述清楚,并告知环境信息情况~!如使用版本、操作步骤、报错/警告信息等方便大V们快速解答
&nbsp;
**`2、微信群`**
微信加群:添加`mike_zhangliang``PenceXie`的微信号备注KnowStreaming加群。
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=didi/KnowStreaming&type=Date)](https://star-history.com/#didi/KnowStreaming&Date)
To contribute follow the instructions here:
* https://kafka.apache.org/contributing.html

View File

@@ -1,279 +0,0 @@
## v3.0.0-beta.1
**文档**
- 新增Task模块说明文档
- FAQ补充 `Specified key was too long; max key length is 767 bytes ` 错误说明
- FAQ补充 `出现ESIndexNotFoundException报错` 错误说明
**Bug修复**
- 修复 Consumer 点击 Stop 未停止检索的问题
- 修复创建/编辑角色权限报错问题
- 修复多集群管理/单集群详情均衡卡片状态错误问题
- 修复版本列表未排序问题
- 修复Raft集群Controller信息不断记录问题
- 修复部分版本消费组描述信息获取失败问题
- 修复分区Offset获取失败的日志中缺少Topic名称信息问题
- 修复GitHub图地址错误及图裂问题
- 修复Broker默认使用的地址和注释不一致问题
- 修复 Consumer 列表分页不生效问题
- 修复操作记录表operation_methods字段缺少默认值问题
- 修复集群均衡表中move_broker_list字段无效的问题
- 修复KafkaUser、KafkaACL信息获取时日志一直重复提示不支持问题
- 修复指标缺失时,曲线出现掉底的问题
**体验优化**
- 优化前端构建时间和打包体积,增加依赖打包的分包策略
- 优化产品样式和文案展示
- 优化ES客户端数为可配置
- 优化日志中大量出现的MySQL Key冲突日志
**能力提升**
- 增加周期任务用于主动创建缺少的ES模版及索引的能力减少额外的脚本操作
- 增加JMX连接的Broker地址可选择的能力
## v3.0.0-beta.0
**1、多集群管理**
- 增加健康监测体系、关键组件&指标 GUI 展示
- 增加 2.8.x 以上 Kafka 集群接入,覆盖 0.10.x-3.x
- 删除逻辑集群、共享集群、Region 概念
**2、Cluster 管理**
- 增加集群概览信息、集群配置变更记录
- 增加 Cluster 健康分,健康检查规则支持自定义配置
- 增加 Cluster 关键指标统计和 GUI 展示,支持自定义配置
- 增加 Cluster 层 I/O、Disk 的 Load Reblance 功能,支持定时均衡任务(企业版)
- 删除限流、鉴权功能
- 删除 APPID 概念
**3、Broker 管理**
- 增加 Broker 健康分
- 增加 Broker 关键指标统计和 GUI 展示,支持自定义配置
- 增加 Broker 参数配置功能,需重启生效
- 增加 Controller 变更记录
- 增加 Broker Datalogs 记录
- 删除 Leader Rebalance 功能
- 删除 Broker 优先副本选举
**4、Topic 管理**
- 增加 Topic 健康分
- 增加 Topic 关键指标统计和 GUI 展示,支持自定义配置
- 增加 Topic 参数配置功能,可实时生效
- 增加 Topic 批量迁移、Topic 批量扩缩副本功能
- 增加查看系统 Topic 功能
- 优化 Partition 分布的 GUI 展示
- 优化 Topic Message 数据采样
- 删除 Topic 过期概念
- 删除 Topic 申请配额功能
**5、Consumer 管理**
- 优化了 ConsumerGroup 展示形式,增加 Consumer Lag 的 GUI 展示
**6、ACL 管理**
- 增加原生 ACL GUI 配置功能,可配置生产、消费、自定义多种组合权限
- 增加 KafkaUser 功能,可自定义新增 KafkaUser
**7、消息测试企业版**
- 增加生产者消息模拟器,支持 Data、Flow、Header、Options 自定义配置(企业版)
- 增加消费者消息模拟器,支持 Data、Flow、Header、Options 自定义配置(企业版)
**8、Job**
- 优化 Job 模块,支持任务进度管理
**9、系统管理**
- 优化用户、角色管理体系,支持自定义角色配置页面及操作权限
- 优化审计日志信息
- 删除多租户体系
- 删除工单流程
---
## v2.6.0
版本上线时间2022-01-24
### 能力提升
- 增加简单回退工具类
### 体验优化
- 补充周期任务说明文档
- 补充集群安装部署使用说明文档
- 升级Swagger、SpringFramework、SpringBoot、EChats版本
- 优化Task模块的日志输出
- 优化corn表达式解析失败后退出无任何日志提示问题
- Ldap用户接入时增加部门及邮箱信息等
- 对Jmx模块增加连接失败后的回退机制及错误日志优化
- 增加线程池、客户端池可配置
- 删除无用的jmx_prometheus_javaagent-0.14.0.jar
- 优化迁移任务名称
- 优化创建Region时Region容量信息不能立即被更新问题
- 引入lombok
- 更新视频教程
- 优化kcm_script.sh脚本中的LogiKM地址为可通过程序传入
- 第三方接口及网关接口,增加是否跳过登录的开关
- extends模块相关配置调整为非必须在application.yml中配置
### bug修复
- 修复批量往DB写入空指标数组时报SQL语法异常的问题
- 修复网关增加配置及修改配置时version不变化问题
- 修复集群列表页,提示框遮挡问题
- 修复对高版本Broker元信息协议解析失败的问题
- 修复Dockerfile执行时提示缺少application.yml文件的问题
- 修复逻辑集群更新时,会报空指针的问题
## v2.5.0
版本上线时间2021-07-10
### 体验优化
- 更改产品名为LogiKM
- 更新产品图标
## v2.4.1+
版本上线时间2021-05-21
### 能力提升
- 增加直接增加权限和配额的接口(v2.4.1)
- 增加接口调用可绕过登录的功能(v2.4.1)
### 体验优化
- Tomcat 版本提升至8.5.66(v2.4.2)
- op接口优化拆分util接口为topic、leader两类接口(v2.4.1)
- 简化Gateway配置的Key长度(v2.4.1)
### bug修复
- 修复页面展示版本错误问题(v2.4.2)
## v2.4.0
版本上线时间2021-05-18
### 能力提升
- 增加App与Topic自动化审批开关
- Broker元信息中增加Rack信息
- 升级MySQL 驱动支持MySQL 8+
- 增加操作记录查询界面
### 体验优化
- FAQ告警组说明优化
- 用户手册共享及 独享集群概念优化
- 用户管理界面,前端限制用户删除自己
### bug修复
- 修复op-util类中创建Topic失败的接口
- 周期同步Topic到DB的任务修复将Topic列表查询从缓存调整为直接查DB
- 应用下线审批失败的功能修复将权限为0(无权限)的数据进行过滤
- 修复登录及权限绕过的漏洞
- 修复研发角色展示接入集群、暂停监控等按钮的问题
## v2.3.0
版本上线时间2021-02-08
### 能力提升
- 新增支持docker化部署
- 可指定Broker作为候选controller
- 可新增并管理网关配置
- 可获取消费组状态
- 增加集群的JMX认证
### 体验优化
- 优化编辑用户角色、修改密码的流程
- 新增consumerID的搜索功能
- 优化“Topic连接信息”、“消费组重置消费偏移”、“修改Topic保存时间”的文案提示
- 在相应位置增加《资源申请文档》链接
### bug修复
- 修复Broker监控图表时间轴展示错误的问题
- 修复创建夜莺监控告警规则时,使用的告警周期的单位不正确的问题
## v2.2.0
版本上线时间2021-01-25
### 能力提升
- 优化工单批量操作流程
- 增加获取Topic75分位/99分位的实时耗时数据
- 增加定时任务可将无主未落DB的Topic定期写入DB
### 体验优化
- 在相应位置增加《集群接入文档》链接
- 优化物理集群、逻辑集群含义
- 在Topic详情页、Topic扩分区操作弹窗增加展示Topic所属Region的信息
- 优化Topic审批时Topic数据保存时间的配置流程
- 优化Topic/应用申请、审批时的错误提示文案
- 优化Topic数据采样的操作项文案
- 优化运维人员删除Topic时的提示文案
- 优化运维人员删除Region的删除逻辑与提示文案
- 优化运维人员删除逻辑集群的提示文案
- 优化上传集群配置文件时的文件类型限制条件
### bug修复
- 修复填写应用名称时校验特殊字符出错的问题
- 修复普通用户越权访问应用详情的问题
- 修复由于Kafka版本升级导致的数据压缩格式无法获取的问题
- 修复删除逻辑集群或Topic之后界面依旧展示的问题
- 修复进行Leader rebalance操作时执行结果重复提示的问题
## v2.1.0
版本上线时间2020-12-19
### 体验优化
- 优化页面加载时的背景样式
- 优化普通用户申请Topic权限的流程
- 优化Topic申请配额、申请分区的权限限制
- 优化取消Topic权限的文案提示
- 优化申请配额表单的表单项名称
- 优化重置消费偏移的操作流程
- 优化创建Topic迁移任务的表单内容
- 优化Topic扩分区操作的弹窗界面样式
- 优化集群Broker监控可视化图表样式
- 优化创建逻辑集群的表单内容
- 优化集群安全协议的提示文案
### bug修复
- 修复偶发性重置消费偏移失败的问题

189
TROGDOR.md Normal file
View File

@@ -0,0 +1,189 @@
Trogdor
========================================
Trogdor is a test framework for Apache Kafka.
Trogdor can run benchmarks and other workloads. Trogdor can also inject faults in order to stress test the system.
Quickstart
=========================================================
First, we want to start a single-node Kafka cluster with a ZooKeeper and a broker.
Running ZooKeeper:
> ./bin/zookeeper-server-start.sh ./config/zookeeper.properties &> /tmp/zookeeper.log &
Running Kafka:
> ./bin/kafka-server-start.sh ./config/server.properties &> /tmp/kafka.log &
Then, we want to run a Trogdor Agent, plus a Trogdor Coordinator.
To run the Trogdor Agent:
> ./bin/trogdor.sh agent -c ./config/trogdor.conf -n node0 &> /tmp/trogdor-agent.log &
To run the Trogdor Coordinator:
> ./bin/trogdor.sh coordinator -c ./config/trogdor.conf -n node0 &> /tmp/trogdor-coordinator.log &
Let's confirm that all of the daemons are running:
> jps
116212 Coordinator
115188 QuorumPeerMain
116571 Jps
115420 Kafka
115694 Agent
Now, we can submit a test job to Trogdor.
> ./bin/trogdor.sh client createTask -t localhost:8889 -i produce0 --spec ./tests/spec/simple_produce_bench.json
Sent CreateTaskRequest for task produce0.
We can run showTask to see what the task's status is:
> ./bin/trogdor.sh client showTask -t localhost:8889 -i produce0
Task bar of type org.apache.kafka.trogdor.workload.ProduceBenchSpec is DONE. FINISHED at 2019-01-09T20:38:22.039-08:00 after 6s
To see the results, we use showTask with --show-status:
> ./bin/trogdor.sh client showTask -t localhost:8889 -i produce0 --show-status
Task bar of type org.apache.kafka.trogdor.workload.ProduceBenchSpec is DONE. FINISHED at 2019-01-09T20:38:22.039-08:00 after 6s
Status: {
"totalSent" : 50000,
"averageLatencyMs" : 17.83388,
"p50LatencyMs" : 12,
"p95LatencyMs" : 75,
"p99LatencyMs" : 96,
"transactionsCommitted" : 0
}
Trogdor Architecture
========================================
Trogdor has a single coordinator process which manages multiple agent processes. Each agent process is responsible for a single cluster node.
The Trogdor coordinator manages tasks. A task is anything we might want to do on a cluster, such as running a benchmark, injecting a fault, or running a workload. In order to implement each task, the coordinator creates workers on one or more agent nodes.
The Trogdor agent process implements the tasks. For example, when running a workload, the agent process is the process which produces and consumes messages.
Both the coordinator and the agent expose a REST interface that accepts objects serialized via JSON. There is also a command-line program which makes it easy to send messages to either one without manually crafting the JSON message body.
All Trogdor RPCs are idempotent except the shutdown requests. Sending an idempotent RPC twice in a row has the same effect as sending the RPC once.
Tasks
========================================
Tasks are described by specifications containing:
* A "class" field describing the task type. This contains a full Java class name.
* A "startMs" field describing when the task should start. This is given in terms of milliseconds since the UNIX epoch.
* A "durationMs" field describing how long the task should last. This is given in terms of milliseconds.
* Other fields which are task-specific.
The task specification is usually written as JSON. For example, this task specification describes a network partition between nodes 1 and 2, and 3:
{
"class": "org.apache.kafka.trogdor.fault.NetworkPartitionFaultSpec",
"startMs": 1000,
"durationMs": 30000,
"partitions": [["node1", "node2"], ["node3"]]
}
This task runs a simple ProduceBench test on a cluster with one producer node, 5 topics, and 10,000 messages per second.
The keys are generated sequentially and the configured partitioner (DefaultPartitioner) is used.
{
"class": "org.apache.kafka.trogdor.workload.ProduceBenchSpec",
"durationMs": 10000000,
"producerNode": "node0",
"bootstrapServers": "localhost:9092",
"targetMessagesPerSec": 10000,
"maxMessages": 50000,
"activeTopics": {
"foo[1-3]": {
"numPartitions": 10,
"replicationFactor": 1
}
},
"inactiveTopics": {
"foo[4-5]": {
"numPartitions": 10,
"replicationFactor": 1
}
},
"keyGenerator": {
"type": "sequential",
"size": 8,
"offset": 1
},
"useConfiguredPartitioner": true
}
Tasks are submitted to the coordinator. Once the coordinator determines that it is time for the task to start, it creates workers on agent processes. The workers run until the task is done.
Task specifications are immutable; they do not change after the task has been created.
Tasks can be in several states:
* PENDING, when task is waiting to execute,
* RUNNING, when the task is running,
* STOPPING, when the task is in the process of stopping,
* DONE, when the task is done.
Tasks that are DONE also have an error field which will be set if the task failed.
Workloads
========================================
Trogdor can run several workloads. Workloads perform operations on the cluster and measure their performance. Workloads fail when the operations cannot be performed.
### ProduceBench
ProduceBench starts a Kafka producer on a single agent node, producing to several partitions. The workload measures the average produce latency, as well as the median, 95th percentile, and 99th percentile latency.
It can be configured to use a transactional producer which can commit transactions based on a set time interval or number of messages.
### RoundTripWorkload
RoundTripWorkload tests both production and consumption. The workload starts a Kafka producer and consumer on a single node. The consumer will read back the messages that were produced by the producer.
### ConsumeBench
ConsumeBench starts one or more Kafka consumers on a single agent node. Depending on the passed in configuration (see ConsumeBenchSpec), the consumers either subscribe to a set of topics (leveraging consumer group functionality and dynamic partition assignment) or manually assign partitions to themselves.
The workload measures the average produce latency, as well as the median, 95th percentile, and 99th percentile latency.
Faults
========================================
Trogdor can run several faults which deliberately break something in the cluster.
### ProcessStopFault
ProcessStopFault stops a process by sending it a SIGSTOP signal. When the fault ends, the process is resumed with SIGCONT.
### NetworkPartitionFault
NetworkPartitionFault sets up an artificial network partition between one or more sets of nodes. Currently, this is implemented using iptables. The iptables rules are set up on the outbound traffic from the affected nodes. Therefore, the affected nodes should still be reachable from outside the cluster.
External Processes
========================================
Trogdor supports running arbitrary commands in external processes. This is a generic way to run any configurable command in the Trogdor framework - be it a Python program, bash script, docker image, etc.
### ExternalCommandWorker
ExternalCommandWorker starts an external command defined by the ExternalCommandSpec. It essentially allows you to run any command on any Trogdor agent node.
The worker communicates with the external process via its stdin, stdout and stderr in a JSON protocol. It uses stdout for any actionable communication and only logs what it sees in stderr.
On startup the worker will first send a message describing the workload to the external process in this format:
```
{"id":<task ID string>, "workload":<configured workload JSON object>}
```
and will then listen for messages from the external process, again in a JSON format.
Said JSON can contain the following fields:
- status: If the object contains this field, the status of the worker will be set to the given value.
- error: If the object contains this field, the error of the worker will be set to the given value. Once an error occurs, the external process will be terminated.
- log: If the object contains this field, a log message will be issued with this text.
An example:
```json
{"log": "Finished successfully.", "status": {"p99ProduceLatency": "100ms", "messagesSent": 10000}}
```
Exec Mode
========================================
Sometimes, you just want to run a test quickly on a single node. In this case, you can use "exec mode." This mode allows you to run a single Trogdor Agent without a Coordinator.
When using exec mode, you must pass in a Task specification to use. The Agent will try to start this task.
For example:
> ./bin/trogdor.sh agent -n node0 -c ./config/trogdor.conf --exec ./tests/spec/simple_produce_bench.json
When using exec mode, the Agent will exit once the task is complete.

199
Vagrantfile vendored Normal file
View File

@@ -0,0 +1,199 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- mode: ruby -*-
# vi: set ft=ruby :
require 'socket'
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
# General config
enable_dns = false
# Override to false when bringing up a cluster on AWS
enable_hostmanager = true
enable_jmx = false
num_zookeepers = 1
num_brokers = 3
num_workers = 0 # Generic workers that get the code, but don't start any services
ram_megabytes = 1280
base_box = "ubuntu/trusty64"
# EC2
ec2_access_key = ENV['AWS_ACCESS_KEY']
ec2_secret_key = ENV['AWS_SECRET_KEY']
ec2_session_token = ENV['AWS_SESSION_TOKEN']
ec2_keypair_name = nil
ec2_keypair_file = nil
ec2_region = "us-east-1"
ec2_az = nil # Uses set by AWS
ec2_ami = "ami-29ebb519"
ec2_instance_type = "m3.medium"
ec2_spot_instance = ENV['SPOT_INSTANCE'] ? ENV['SPOT_INSTANCE'] == 'true' : true
ec2_spot_max_price = "0.113" # On-demand price for instance type
ec2_user = "ubuntu"
ec2_instance_name_prefix = "kafka-vagrant"
ec2_security_groups = nil
ec2_subnet_id = nil
# Only override this by setting it to false if you're running in a VPC and you
# are running Vagrant from within that VPC as well.
ec2_associate_public_ip = nil
jdk_major = '8'
jdk_full = '8u202-linux-x64'
local_config_file = File.join(File.dirname(__FILE__), "Vagrantfile.local")
if File.exists?(local_config_file) then
eval(File.read(local_config_file), binding, "Vagrantfile.local")
end
# TODO(ksweeney): RAM requirements are not empirical and can probably be significantly lowered.
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.hostmanager.enabled = enable_hostmanager
config.hostmanager.manage_host = enable_dns
config.hostmanager.include_offline = false
## Provider-specific global configs
config.vm.provider :virtualbox do |vb,override|
override.vm.box = base_box
override.hostmanager.ignore_private_ip = false
# Brokers started with the standard script currently set Xms and Xmx to 1G,
# plus we need some extra head room.
vb.customize ["modifyvm", :id, "--memory", ram_megabytes.to_s]
if Vagrant.has_plugin?("vagrant-cachier")
override.cache.scope = :box
end
end
config.vm.provider :aws do |aws,override|
# The "box" is specified as an AMI
override.vm.box = "dummy"
override.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
cached_addresses = {}
# Use a custom resolver that SSH's into the machine and finds the IP address
# directly. This lets us get at the private IP address directly, avoiding
# some issues with using the default IP resolver, which uses the public IP
# address.
override.hostmanager.ip_resolver = proc do |vm, resolving_vm|
if !cached_addresses.has_key?(vm.name)
state_id = vm.state.id
if state_id != :not_created && state_id != :stopped && vm.communicate.ready?
contents = ''
vm.communicate.execute("/sbin/ifconfig eth0 | grep 'inet addr' | tail -n 1 | egrep -o '[0-9\.]+' | head -n 1 2>&1") do |type, data|
contents << data
end
cached_addresses[vm.name] = contents.split("\n").first[/(\d+\.\d+\.\d+\.\d+)/, 1]
else
cached_addresses[vm.name] = nil
end
end
cached_addresses[vm.name]
end
override.ssh.username = ec2_user
override.ssh.private_key_path = ec2_keypair_file
aws.access_key_id = ec2_access_key
aws.secret_access_key = ec2_secret_key
aws.session_token = ec2_session_token
aws.keypair_name = ec2_keypair_name
aws.region = ec2_region
aws.availability_zone = ec2_az
aws.instance_type = ec2_instance_type
aws.ami = ec2_ami
aws.security_groups = ec2_security_groups
aws.subnet_id = ec2_subnet_id
# If a subnet is specified, default to turning on a public IP unless the
# user explicitly specifies the option. Without a public IP, Vagrant won't
# be able to SSH into the hosts unless Vagrant is also running in the VPC.
if ec2_associate_public_ip.nil?
aws.associate_public_ip = true unless ec2_subnet_id.nil?
else
aws.associate_public_ip = ec2_associate_public_ip
end
aws.region_config ec2_region do |region|
region.spot_instance = ec2_spot_instance
region.spot_max_price = ec2_spot_max_price
end
# Exclude some directories that can grow very large from syncing
override.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__exclude: ['.git', 'core/data/', 'logs/', 'tests/results/', 'results/']
end
def name_node(node, name, ec2_instance_name_prefix)
node.vm.hostname = name
node.vm.provider :aws do |aws|
aws.tags = {
'Name' => ec2_instance_name_prefix + "-" + Socket.gethostname + "-" + name,
'JenkinsBuildUrl' => ENV['BUILD_URL']
}
end
end
def assign_local_ip(node, ip_address)
node.vm.provider :virtualbox do |vb,override|
override.vm.network :private_network, ip: ip_address
end
end
## Cluster definition
zookeepers = []
(1..num_zookeepers).each { |i|
name = "zk" + i.to_s
zookeepers.push(name)
config.vm.define name do |zookeeper|
name_node(zookeeper, name, ec2_instance_name_prefix)
ip_address = "192.168.50." + (10 + i).to_s
assign_local_ip(zookeeper, ip_address)
zookeeper.vm.provision "shell", path: "vagrant/base.sh", env: {"JDK_MAJOR" => jdk_major, "JDK_FULL" => jdk_full}
zk_jmx_port = enable_jmx ? (8000 + i).to_s : ""
zookeeper.vm.provision "shell", path: "vagrant/zk.sh", :args => [i.to_s, num_zookeepers, zk_jmx_port]
end
}
(1..num_brokers).each { |i|
name = "broker" + i.to_s
config.vm.define name do |broker|
name_node(broker, name, ec2_instance_name_prefix)
ip_address = "192.168.50." + (50 + i).to_s
assign_local_ip(broker, ip_address)
# We need to be careful about what we list as the publicly routable
# address since this is registered in ZK and handed out to clients. If
# host DNS isn't setup, we shouldn't use hostnames -- IP addresses must be
# used to support clients running on the host.
zookeeper_connect = zookeepers.map{ |zk_addr| zk_addr + ":2181"}.join(",")
broker.vm.provision "shell", path: "vagrant/base.sh", env: {"JDK_MAJOR" => jdk_major, "JDK_FULL" => jdk_full}
kafka_jmx_port = enable_jmx ? (9000 + i).to_s : ""
broker.vm.provision "shell", path: "vagrant/broker.sh", :args => [i.to_s, enable_dns ? name : ip_address, zookeeper_connect, kafka_jmx_port]
end
}
(1..num_workers).each { |i|
name = "worker" + i.to_s
config.vm.define name do |worker|
name_node(worker, name, ec2_instance_name_prefix)
ip_address = "192.168.50." + (100 + i).to_s
assign_local_ip(worker, ip_address)
worker.vm.provision "shell", path: "vagrant/base.sh", env: {"JDK_MAJOR" => jdk_major, "JDK_FULL" => jdk_full}
end
}
end

45
bin/connect-distributed.sh Executable file
View File

@@ -0,0 +1,45 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -lt 1 ];
then
echo "USAGE: $0 [-daemon] connect-distributed.properties"
exit 1
fi
base_dir=$(dirname $0)
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties"
fi
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G"
fi
EXTRA_ARGS=${EXTRA_ARGS-'-name connectDistributed'}
COMMAND=$1
case $COMMAND in
-daemon)
EXTRA_ARGS="-daemon "$EXTRA_ARGS
shift
;;
*)
;;
esac
exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectDistributed "$@"

45
bin/connect-mirror-maker.sh Executable file
View File

@@ -0,0 +1,45 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -lt 1 ];
then
echo "USAGE: $0 [-daemon] mm2.properties"
exit 1
fi
base_dir=$(dirname $0)
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties"
fi
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G"
fi
EXTRA_ARGS=${EXTRA_ARGS-'-name mirrorMaker'}
COMMAND=$1
case $COMMAND in
-daemon)
EXTRA_ARGS="-daemon "$EXTRA_ARGS
shift
;;
*)
;;
esac
exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.mirror.MirrorMaker "$@"

45
bin/connect-standalone.sh Executable file
View File

@@ -0,0 +1,45 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -lt 1 ];
then
echo "USAGE: $0 [-daemon] connect-standalone.properties"
exit 1
fi
base_dir=$(dirname $0)
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties"
fi
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G"
fi
EXTRA_ARGS=${EXTRA_ARGS-'-name connectStandalone'}
COMMAND=$1
case $COMMAND in
-daemon)
EXTRA_ARGS="-daemon "$EXTRA_ARGS
shift
;;
*)
;;
esac
exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectStandalone "$@"

View File

@@ -1,655 +0,0 @@
esaddr=127.0.0.1
port=8060
curl -s --connect-timeout 10 -o /dev/null http://${esaddr}:${port}/_cat/nodes >/dev/null 2>&1
if [ "$?" != "0" ];then
echo "Elasticserach 访问失败, 请安装完后检查并重新执行该脚本 "
exit
fi
curl -s --connect-timeout 10 -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_broker_metric -d '{
"order" : 10,
"index_patterns" : [
"ks_kafka_broker_metric*"
],
"settings" : {
"index" : {
"number_of_shards" : "10"
}
},
"mappings" : {
"properties" : {
"brokerId" : {
"type" : "long"
},
"routingValue" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"clusterPhyId" : {
"type" : "long"
},
"metrics" : {
"properties" : {
"NetworkProcessorAvgIdle" : {
"type" : "float"
},
"UnderReplicatedPartitions" : {
"type" : "float"
},
"BytesIn_min_15" : {
"type" : "float"
},
"HealthCheckTotal" : {
"type" : "float"
},
"RequestHandlerAvgIdle" : {
"type" : "float"
},
"connectionsCount" : {
"type" : "float"
},
"BytesIn_min_5" : {
"type" : "float"
},
"HealthScore" : {
"type" : "float"
},
"BytesOut" : {
"type" : "float"
},
"BytesOut_min_15" : {
"type" : "float"
},
"BytesIn" : {
"type" : "float"
},
"BytesOut_min_5" : {
"type" : "float"
},
"TotalRequestQueueSize" : {
"type" : "float"
},
"MessagesIn" : {
"type" : "float"
},
"TotalProduceRequests" : {
"type" : "float"
},
"HealthCheckPassed" : {
"type" : "float"
},
"TotalResponseQueueSize" : {
"type" : "float"
}
}
},
"key" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"timestamp" : {
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
"index" : true,
"type" : "date",
"doc_values" : true
}
}
},
"aliases" : { }
}'
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_cluster_metric -d '{
"order" : 10,
"index_patterns" : [
"ks_kafka_cluster_metric*"
],
"settings" : {
"index" : {
"number_of_shards" : "10"
}
},
"mappings" : {
"properties" : {
"routingValue" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"clusterPhyId" : {
"type" : "long"
},
"metrics" : {
"properties" : {
"Connections" : {
"type" : "double"
},
"BytesIn_min_15" : {
"type" : "double"
},
"PartitionURP" : {
"type" : "double"
},
"HealthScore_Topics" : {
"type" : "double"
},
"EventQueueSize" : {
"type" : "double"
},
"ActiveControllerCount" : {
"type" : "double"
},
"GroupDeads" : {
"type" : "double"
},
"BytesIn_min_5" : {
"type" : "double"
},
"HealthCheckTotal_Topics" : {
"type" : "double"
},
"Partitions" : {
"type" : "double"
},
"BytesOut" : {
"type" : "double"
},
"Groups" : {
"type" : "double"
},
"BytesOut_min_15" : {
"type" : "double"
},
"TotalRequestQueueSize" : {
"type" : "double"
},
"HealthCheckPassed_Groups" : {
"type" : "double"
},
"TotalProduceRequests" : {
"type" : "double"
},
"HealthCheckPassed" : {
"type" : "double"
},
"TotalLogSize" : {
"type" : "double"
},
"GroupEmptys" : {
"type" : "double"
},
"PartitionNoLeader" : {
"type" : "double"
},
"HealthScore_Brokers" : {
"type" : "double"
},
"Messages" : {
"type" : "double"
},
"Topics" : {
"type" : "double"
},
"PartitionMinISR_E" : {
"type" : "double"
},
"HealthCheckTotal" : {
"type" : "double"
},
"Brokers" : {
"type" : "double"
},
"Replicas" : {
"type" : "double"
},
"HealthCheckTotal_Groups" : {
"type" : "double"
},
"GroupRebalances" : {
"type" : "double"
},
"MessageIn" : {
"type" : "double"
},
"HealthScore" : {
"type" : "double"
},
"HealthCheckPassed_Topics" : {
"type" : "double"
},
"HealthCheckTotal_Brokers" : {
"type" : "double"
},
"PartitionMinISR_S" : {
"type" : "double"
},
"BytesIn" : {
"type" : "double"
},
"BytesOut_min_5" : {
"type" : "double"
},
"GroupActives" : {
"type" : "double"
},
"MessagesIn" : {
"type" : "double"
},
"GroupReBalances" : {
"type" : "double"
},
"HealthCheckPassed_Brokers" : {
"type" : "double"
},
"HealthScore_Groups" : {
"type" : "double"
},
"TotalResponseQueueSize" : {
"type" : "double"
},
"Zookeepers" : {
"type" : "double"
},
"LeaderMessages" : {
"type" : "double"
},
"HealthScore_Cluster" : {
"type" : "double"
},
"HealthCheckPassed_Cluster" : {
"type" : "double"
},
"HealthCheckTotal_Cluster" : {
"type" : "double"
}
}
},
"key" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"timestamp" : {
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
"type" : "date"
}
}
},
"aliases" : { }
}'
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_group_metric -d '{
"order" : 10,
"index_patterns" : [
"ks_kafka_group_metric*"
],
"settings" : {
"index" : {
"number_of_shards" : "10"
}
},
"mappings" : {
"properties" : {
"group" : {
"type" : "keyword"
},
"partitionId" : {
"type" : "long"
},
"routingValue" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"clusterPhyId" : {
"type" : "long"
},
"topic" : {
"type" : "keyword"
},
"metrics" : {
"properties" : {
"HealthScore" : {
"type" : "float"
},
"Lag" : {
"type" : "float"
},
"OffsetConsumed" : {
"type" : "float"
},
"HealthCheckTotal" : {
"type" : "float"
},
"HealthCheckPassed" : {
"type" : "float"
}
}
},
"groupMetric" : {
"type" : "keyword"
},
"key" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"timestamp" : {
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
"index" : true,
"type" : "date",
"doc_values" : true
}
}
},
"aliases" : { }
}'
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_partition_metric -d '{
"order" : 10,
"index_patterns" : [
"ks_kafka_partition_metric*"
],
"settings" : {
"index" : {
"number_of_shards" : "10"
}
},
"mappings" : {
"properties" : {
"brokerId" : {
"type" : "long"
},
"partitionId" : {
"type" : "long"
},
"routingValue" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"clusterPhyId" : {
"type" : "long"
},
"topic" : {
"type" : "keyword"
},
"metrics" : {
"properties" : {
"LogStartOffset" : {
"type" : "float"
},
"Messages" : {
"type" : "float"
},
"LogEndOffset" : {
"type" : "float"
}
}
},
"key" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"timestamp" : {
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
"index" : true,
"type" : "date",
"doc_values" : true
}
}
},
"aliases" : { }
}'
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_replication_metric -d '{
"order" : 10,
"index_patterns" : [
"ks_kafka_partition_metric*"
],
"settings" : {
"index" : {
"number_of_shards" : "10"
}
},
"mappings" : {
"properties" : {
"brokerId" : {
"type" : "long"
},
"partitionId" : {
"type" : "long"
},
"routingValue" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"clusterPhyId" : {
"type" : "long"
},
"topic" : {
"type" : "keyword"
},
"metrics" : {
"properties" : {
"LogStartOffset" : {
"type" : "float"
},
"Messages" : {
"type" : "float"
},
"LogEndOffset" : {
"type" : "float"
}
}
},
"key" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"timestamp" : {
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
"index" : true,
"type" : "date",
"doc_values" : true
}
}
},
"aliases" : { }
}[root@10-255-0-23 template]# cat ks_kafka_replication_metric
PUT _template/ks_kafka_replication_metric
{
"order" : 10,
"index_patterns" : [
"ks_kafka_replication_metric*"
],
"settings" : {
"index" : {
"number_of_shards" : "10"
}
},
"mappings" : {
"properties" : {
"timestamp" : {
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
"index" : true,
"type" : "date",
"doc_values" : true
}
}
},
"aliases" : { }
}'
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_topic_metric -d '{
"order" : 10,
"index_patterns" : [
"ks_kafka_topic_metric*"
],
"settings" : {
"index" : {
"number_of_shards" : "10"
}
},
"mappings" : {
"properties" : {
"brokerId" : {
"type" : "long"
},
"routingValue" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"topic" : {
"type" : "keyword"
},
"clusterPhyId" : {
"type" : "long"
},
"metrics" : {
"properties" : {
"BytesIn_min_15" : {
"type" : "float"
},
"Messages" : {
"type" : "float"
},
"BytesRejected" : {
"type" : "float"
},
"PartitionURP" : {
"type" : "float"
},
"HealthCheckTotal" : {
"type" : "float"
},
"ReplicationCount" : {
"type" : "float"
},
"ReplicationBytesOut" : {
"type" : "float"
},
"ReplicationBytesIn" : {
"type" : "float"
},
"FailedFetchRequests" : {
"type" : "float"
},
"BytesIn_min_5" : {
"type" : "float"
},
"HealthScore" : {
"type" : "float"
},
"LogSize" : {
"type" : "float"
},
"BytesOut" : {
"type" : "float"
},
"BytesOut_min_15" : {
"type" : "float"
},
"FailedProduceRequests" : {
"type" : "float"
},
"BytesIn" : {
"type" : "float"
},
"BytesOut_min_5" : {
"type" : "float"
},
"MessagesIn" : {
"type" : "float"
},
"TotalProduceRequests" : {
"type" : "float"
},
"HealthCheckPassed" : {
"type" : "float"
}
}
},
"brokerAgg" : {
"type" : "keyword"
},
"key" : {
"type" : "text",
"fields" : {
"keyword" : {
"ignore_above" : 256,
"type" : "keyword"
}
}
},
"timestamp" : {
"format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis",
"index" : true,
"type" : "date",
"doc_values" : true
}
}
},
"aliases" : { }
}'
for i in {0..6};
do
logdate=_$(date -d "${i} day ago" +%Y-%m-%d)
curl -s --connect-timeout 10 -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_broker_metric${logdate} && \
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_cluster_metric${logdate} && \
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_group_metric${logdate} && \
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_partition_metric${logdate} && \
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_replication_metric${logdate} && \
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_topic_metric${logdate} || \
exit 2
done

17
bin/kafka-acls.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.admin.AclCommand "$@"

View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.admin.BrokerApiVersionsCommand "$@"

17
bin/kafka-configs.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.admin.ConfigCommand "$@"

21
bin/kafka-console-consumer.sh Executable file
View File

@@ -0,0 +1,21 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx512M"
fi
exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleConsumer "$@"

20
bin/kafka-console-producer.sh Executable file
View File

@@ -0,0 +1,20 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx512M"
fi
exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleProducer "$@"

17
bin/kafka-consumer-groups.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.admin.ConsumerGroupCommand "$@"

20
bin/kafka-consumer-perf-test.sh Executable file
View File

@@ -0,0 +1,20 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx512M"
fi
exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsumerPerformance "$@"

17
bin/kafka-delegation-tokens.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.admin.DelegationTokenCommand "$@"

17
bin/kafka-delete-records.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.admin.DeleteRecordsCommand "$@"

18
bin/kafka-diskload-protector.sh Executable file
View File

@@ -0,0 +1,18 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.admin.DiskLoadProtectorCommand "$@"

17
bin/kafka-dump-log.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.tools.DumpLogSegments "$@"

18
bin/kafka-exmetrics.sh Executable file
View File

@@ -0,0 +1,18 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.admin.KafkaExMetricsCommand "$@"

17
bin/kafka-leader-election.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.admin.LeaderElectionCommand "$@"

17
bin/kafka-log-dirs.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.admin.LogDirsCommand "$@"

17
bin/kafka-mirror-maker.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.tools.MirrorMaker "$@"

View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.admin.PreferredReplicaLeaderElectionCommand "$@"

20
bin/kafka-producer-perf-test.sh Executable file
View File

@@ -0,0 +1,20 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx512M"
fi
exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.ProducerPerformance "$@"

View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.admin.ReassignPartitionsCommand "$@"

View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.tools.ReplicaVerificationTool "$@"

316
bin/kafka-run-class.sh Executable file
View File

@@ -0,0 +1,316 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -lt 1 ];
then
echo "USAGE: $0 [-daemon] [-name servicename] [-loggc] classname [opts]"
exit 1
fi
# CYGWIN == 1 if Cygwin is detected, else 0.
if [[ $(uname -a) =~ "CYGWIN" ]]; then
CYGWIN=1
else
CYGWIN=0
fi
if [ -z "$INCLUDE_TEST_JARS" ]; then
INCLUDE_TEST_JARS=false
fi
# Exclude jars not necessary for running commands.
regex="(-(test|test-sources|src|scaladoc|javadoc)\.jar|jar.asc)$"
should_include_file() {
if [ "$INCLUDE_TEST_JARS" = true ]; then
return 0
fi
file=$1
if [ -z "$(echo "$file" | egrep "$regex")" ] ; then
return 0
else
return 1
fi
}
base_dir=$(dirname $0)/..
if [ -z "$SCALA_VERSION" ]; then
SCALA_VERSION=2.12.10
fi
if [ -z "$SCALA_BINARY_VERSION" ]; then
SCALA_BINARY_VERSION=$(echo $SCALA_VERSION | cut -f 1-2 -d '.')
fi
# run ./gradlew copyDependantLibs to get all dependant jars in a local dir
shopt -s nullglob
if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
for dir in "$base_dir"/core/build/dependant-libs-${SCALA_VERSION}*;
do
CLASSPATH="$CLASSPATH:$dir/*"
done
fi
for file in "$base_dir"/examples/build/libs/kafka-examples*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
clients_lib_dir=$(dirname $0)/../clients/build/libs
streams_lib_dir=$(dirname $0)/../streams/build/libs
streams_dependant_clients_lib_dir=$(dirname $0)/../streams/build/dependant-libs-${SCALA_VERSION}
else
clients_lib_dir=/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs
streams_lib_dir=$clients_lib_dir
streams_dependant_clients_lib_dir=$streams_lib_dir
fi
for file in "$clients_lib_dir"/kafka-clients*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
for file in "$streams_lib_dir"/kafka-streams*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
for file in "$base_dir"/streams/examples/build/libs/kafka-streams-examples*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
else
VERSION_NO_DOTS=`echo $UPGRADE_KAFKA_STREAMS_TEST_VERSION | sed 's/\.//g'`
SHORT_VERSION_NO_DOTS=${VERSION_NO_DOTS:0:((${#VERSION_NO_DOTS} - 1))} # remove last char, ie, bug-fix number
for file in "$base_dir"/streams/upgrade-system-tests-$SHORT_VERSION_NO_DOTS/build/libs/kafka-streams-upgrade-system-tests*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$file":"$CLASSPATH"
fi
done
if [ "$SHORT_VERSION_NO_DOTS" = "0100" ]; then
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.8.jar":"$CLASSPATH"
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.6.jar":"$CLASSPATH"
fi
if [ "$SHORT_VERSION_NO_DOTS" = "0101" ]; then
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.9.jar":"$CLASSPATH"
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.8.jar":"$CLASSPATH"
fi
fi
for file in "$streams_dependant_clients_lib_dir"/rocksdb*.jar;
do
CLASSPATH="$CLASSPATH":"$file"
done
for file in "$streams_dependant_clients_lib_dir"/*hamcrest*.jar;
do
CLASSPATH="$CLASSPATH":"$file"
done
for file in "$base_dir"/tools/build/libs/kafka-tools*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
for dir in "$base_dir"/tools/build/dependant-libs-${SCALA_VERSION}*;
do
CLASSPATH="$CLASSPATH:$dir/*"
done
for cc_pkg in "api" "transforms" "runtime" "file" "mirror" "mirror-client" "json" "tools" "basic-auth-extension"
do
for file in "$base_dir"/connect/${cc_pkg}/build/libs/connect-${cc_pkg}*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
if [ -d "$base_dir/connect/${cc_pkg}/build/dependant-libs" ] ; then
CLASSPATH="$CLASSPATH:$base_dir/connect/${cc_pkg}/build/dependant-libs/*"
fi
done
# classpath addition for release
for file in "$base_dir"/libs/*;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
for file in "$base_dir"/core/build/libs/kafka_${SCALA_BINARY_VERSION}*.jar;
do
if should_include_file "$file"; then
CLASSPATH="$CLASSPATH":"$file"
fi
done
shopt -u nullglob
if [ -z "$CLASSPATH" ] ; then
echo "Classpath is empty. Please build the project first e.g. by running './gradlew jar -PscalaVersion=$SCALA_VERSION'"
exit 1
fi
# JMX settings
if [ -z "$KAFKA_JMX_OPTS" ]; then
KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false "
fi
# JMX port to use
if [ $JMX_PORT ]; then
KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
fi
# Log directory to use
if [ "x$LOG_DIR" = "x" ]; then
LOG_DIR="$base_dir/logs"
fi
# Log4j settings
if [ -z "$KAFKA_LOG4J_OPTS" ]; then
# Log to console. This is a tool.
LOG4J_DIR="$base_dir/config/tools-log4j.properties"
# If Cygwin is detected, LOG4J_DIR is converted to Windows format.
(( CYGWIN )) && LOG4J_DIR=$(cygpath --path --mixed "${LOG4J_DIR}")
KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_DIR}"
else
# create logs directory
if [ ! -d "$LOG_DIR" ]; then
mkdir -p "$LOG_DIR"
fi
fi
# If Cygwin is detected, LOG_DIR is converted to Windows format.
(( CYGWIN )) && LOG_DIR=$(cygpath --path --mixed "${LOG_DIR}")
KAFKA_LOG4J_OPTS="-Dkafka.logs.dir=$LOG_DIR $KAFKA_LOG4J_OPTS"
# Generic jvm settings you want to add
if [ -z "$KAFKA_OPTS" ]; then
KAFKA_OPTS=""
fi
# Set Debug options if enabled
if [ "x$KAFKA_DEBUG" != "x" ]; then
# Use default ports
DEFAULT_JAVA_DEBUG_PORT="5005"
if [ -z "$JAVA_DEBUG_PORT" ]; then
JAVA_DEBUG_PORT="$DEFAULT_JAVA_DEBUG_PORT"
fi
# Use the defaults if JAVA_DEBUG_OPTS was not set
DEFAULT_JAVA_DEBUG_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=${DEBUG_SUSPEND_FLAG:-n},address=0.0.0.0:$JAVA_DEBUG_PORT"
if [ -z "$JAVA_DEBUG_OPTS" ]; then
JAVA_DEBUG_OPTS="$DEFAULT_JAVA_DEBUG_OPTS"
fi
echo "Enabling Java debug options: $JAVA_DEBUG_OPTS"
KAFKA_OPTS="$JAVA_DEBUG_OPTS $KAFKA_OPTS"
fi
# Which java to use
if [ -z "$JAVA_HOME" ]; then
JAVA="java"
else
JAVA="$JAVA_HOME/bin/java"
fi
# Memory options
if [ -z "$KAFKA_HEAP_OPTS" ]; then
KAFKA_HEAP_OPTS="-Xmx256M"
fi
# JVM performance options
# MaxInlineLevel=15 is the default since JDK 14 and can be removed once older JDKs are no longer supported
if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then
KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:InitiatingHeapOccupancyPercent=35 -XX:G1HeapRegionSize=16m -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 -Djava.awt.headless=true"
fi
while [ $# -gt 0 ]; do
COMMAND=$1
case $COMMAND in
-name)
DAEMON_NAME=$2
CONSOLE_OUTPUT_FILE=$LOG_DIR/$DAEMON_NAME.out
shift 2
;;
-loggc)
if [ -z "$KAFKA_GC_LOG_OPTS" ]; then
GC_LOG_ENABLED="true"
fi
shift
;;
-daemon)
DAEMON_MODE="true"
shift
;;
*)
break
;;
esac
done
# GC options
GC_FILE_SUFFIX='-gc.log'
GC_LOG_FILE_NAME=''
if [ "x$GC_LOG_ENABLED" = "xtrue" ]; then
GC_LOG_FILE_NAME=$DAEMON_NAME$GC_FILE_SUFFIX
# The first segment of the version number, which is '1' for releases before Java 9
# it then becomes '9', '10', ...
# Some examples of the first line of `java --version`:
# 8 -> java version "1.8.0_152"
# 9.0.4 -> java version "9.0.4"
# 10 -> java version "10" 2018-03-20
# 10.0.1 -> java version "10.0.1" 2018-04-17
# We need to match to the end of the line to prevent sed from printing the characters that do not match
JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
KAFKA_GC_LOG_OPTS="-Xlog:gc*:file=$LOG_DIR/$GC_LOG_FILE_NAME:time"
else
KAFKA_GC_LOG_OPTS="-Xloggc:$LOG_DIR/$GC_LOG_FILE_NAME -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
fi
fi
# Remove a possible colon prefix from the classpath (happens at lines like `CLASSPATH="$CLASSPATH:$file"` when CLASSPATH is blank)
# Syntax used on the right side is native Bash string manipulation; for more details see
# http://tldp.org/LDP/abs/html/string-manipulation.html, specifically the section titled "Substring Removal"
CLASSPATH=${CLASSPATH#:}
# If Cygwin is detected, classpath is converted to Windows format.
(( CYGWIN )) && CLASSPATH=$(cygpath --path --mixed "${CLASSPATH}")
# Launch mode
if [ "x$DAEMON_MODE" = "xtrue" ]; then
nohup $JAVA $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp $CLASSPATH $KAFKA_OPTS "$@" > "$CONSOLE_OUTPUT_FILE" 2>&1 < /dev/null &
else
exec $JAVA $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp $CLASSPATH $KAFKA_OPTS "$@"
fi

51
bin/kafka-server-start.sh Executable file
View File

@@ -0,0 +1,51 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -lt 1 ];
then
echo "USAGE: $0 [-daemon] server.properties [--override property=value]*"
exit 1
fi
base_dir=$(dirname $0)
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
fi
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx8G -Xms8G"
export JMX_PORT=8099
#export KAFKA_DEBUG=debug
#export DAEMON_MODE=true
export KAFKA_OPTS="-Djava.security.auth.login.config=$base_dir/../config/kafka_server_jaas.conf"
export DEBUG_SUSPEND_FLAG="n"
export JAVA_DEBUG_PORT="8096"
export GC_LOG_ENABLED=true
fi
EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'}
COMMAND=$1
case $COMMAND in
-daemon)
EXTRA_ARGS="-daemon "$EXTRA_ARGS
shift
;;
*)
;;
esac
exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.ServiceDiscovery "$@"

24
bin/kafka-server-stop.sh Executable file
View File

@@ -0,0 +1,24 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SIGNAL=${SIGNAL:-TERM}
PIDS=$(ps ax | grep -i 'kafka\.ServiceDiscovery' | grep java | grep -v grep | awk '{print $1}')
if [ -z "$PIDS" ]; then
echo "No kafka server to stop"
exit 1
else
kill -s $SIGNAL $PIDS
fi

View File

@@ -0,0 +1,21 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx512M"
fi
exec $(dirname $0)/kafka-run-class.sh kafka.tools.StreamsResetter "$@"

17
bin/kafka-topics.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.admin.TopicCommand "$@"

View File

@@ -0,0 +1,20 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx512M"
fi
exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.VerifiableConsumer "$@"

View File

@@ -0,0 +1,20 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx512M"
fi
exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.VerifiableProducer "$@"

View File

@@ -1,16 +0,0 @@
#!/bin/bash
cd `dirname $0`/../libs
target_dir=`pwd`
pid=`ps ax | grep -i 'ks-km' | grep ${target_dir} | grep java | grep -v grep | awk '{print $1}'`
if [ -z "$pid" ] ; then
echo "No ks-km running."
exit -1;
fi
echo "The ks-km (${pid}) is running..."
kill ${pid}
echo "Send shutdown request to ks-km (${pid}) OK"

View File

@@ -1,82 +0,0 @@
error_exit ()
{
echo "ERROR: $1 !!"
exit 1
}
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=$HOME/jdk/java
[ ! -e "$JAVA_HOME/bin/java" ] && JAVA_HOME=/usr/java
[ ! -e "$JAVA_HOME/bin/java" ] && unset JAVA_HOME
if [ -z "$JAVA_HOME" ]; then
if $darwin; then
if [ -x '/usr/libexec/java_home' ] ; then
export JAVA_HOME=`/usr/libexec/java_home`
elif [ -d "/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home" ]; then
export JAVA_HOME="/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home"
fi
else
JAVA_PATH=`dirname $(readlink -f $(which javac))`
if [ "x$JAVA_PATH" != "x" ]; then
export JAVA_HOME=`dirname $JAVA_PATH 2>/dev/null`
fi
fi
if [ -z "$JAVA_HOME" ]; then
error_exit "Please set the JAVA_HOME variable in your environment, We need java(x64)! jdk8 or later is better!"
fi
fi
export WEB_SERVER="ks-km"
export JAVA_HOME
export JAVA="$JAVA_HOME/bin/java"
export BASE_DIR=`cd $(dirname $0)/..; pwd`
export CUSTOM_SEARCH_LOCATIONS=file:${BASE_DIR}/conf/
#===========================================================================================
# JVM Configuration
#===========================================================================================
JAVA_OPT="${JAVA_OPT} -server -Xms2g -Xmx2g -Xmn1g -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=320m"
JAVA_OPT="${JAVA_OPT} -XX:-OmitStackTraceInFastThrow -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${BASE_DIR}/logs/java_heapdump.hprof"
## jdk版本高的情况 有些 参数废弃了
JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
JAVA_OPT="${JAVA_OPT} -Xlog:gc*:file=${BASE_DIR}/logs/km_gc.log:time,tags:filecount=10,filesize=102400"
else
JAVA_OPT="${JAVA_OPT} -Djava.ext.dirs=${JAVA_HOME}/jre/lib/ext:${JAVA_HOME}/lib/ext"
JAVA_OPT="${JAVA_OPT} -Xloggc:${BASE_DIR}/logs/km_gc.log -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
fi
JAVA_OPT="${JAVA_OPT} -jar ${BASE_DIR}/libs/${WEB_SERVER}.jar"
JAVA_OPT="${JAVA_OPT} --spring.config.additional-location=${CUSTOM_SEARCH_LOCATIONS}"
JAVA_OPT="${JAVA_OPT} --logging.config=${BASE_DIR}/conf/logback-spring.xml"
JAVA_OPT="${JAVA_OPT} --server.max-http-header-size=524288"
if [ ! -d "${BASE_DIR}/logs" ]; then
mkdir ${BASE_DIR}/logs
fi
echo "$JAVA ${JAVA_OPT}"
# check the start.out log output file
if [ ! -f "${BASE_DIR}/logs/start.out" ]; then
touch "${BASE_DIR}/logs/start.out"
fi
# start
echo -e "---- 启动脚本 ------\n $JAVA ${JAVA_OPT}" > ${BASE_DIR}/logs/start.out 2>&1 &
nohup $JAVA ${JAVA_OPT} >> ${BASE_DIR}/logs/start.out 2>&1 &
echo "${WEB_SERVER} is startingyou can check the ${BASE_DIR}/logs/start.out"

50
bin/trogdor.sh Executable file
View File

@@ -0,0 +1,50 @@
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
usage() {
cat <<EOF
The Trogdor fault injector.
Usage:
$0 [action] [options]
Actions:
agent: Run the trogdor agent.
coordinator: Run the trogdor coordinator.
client: Run the client which communicates with the trogdor coordinator.
agent-client: Run the client which communicates with the trogdor agent.
help: This help message.
EOF
}
if [[ $# -lt 1 ]]; then
usage
exit 0
fi
action="${1}"
shift
CLASS=""
case ${action} in
agent) CLASS="org.apache.kafka.trogdor.agent.Agent";;
coordinator) CLASS="org.apache.kafka.trogdor.coordinator.Coordinator";;
client) CLASS="org.apache.kafka.trogdor.coordinator.CoordinatorClient";;
agent-client) CLASS="org.apache.kafka.trogdor.agent.AgentClient";;
help) usage; exit 0;;
*) echo "Unknown action '${action}'. Type '$0 help' for help."; exit 1;;
esac
export INCLUDE_TEST_JARS=1
exec $(dirname $0)/kafka-run-class.sh "${CLASS}" "$@"

View File

@@ -0,0 +1,34 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
IF [%1] EQU [] (
echo USAGE: %0 connect-distributed.properties
EXIT /B 1
)
SetLocal
rem Using pushd popd to set BASE_DIR to the absolute path
pushd %~dp0..\..
set BASE_DIR=%CD%
popd
rem Log4j settings
IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/tools-log4j.properties
)
"%~dp0kafka-run-class.bat" org.apache.kafka.connect.cli.ConnectDistributed %*
EndLocal

View File

@@ -0,0 +1,34 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
IF [%1] EQU [] (
echo USAGE: %0 connect-standalone.properties
EXIT /B 1
)
SetLocal
rem Using pushd popd to set BASE_DIR to the absolute path
pushd %~dp0..\..
set BASE_DIR=%CD%
popd
rem Log4j settings
IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/tools-log4j.properties
)
"%~dp0kafka-run-class.bat" org.apache.kafka.connect.cli.ConnectStandalone %*
EndLocal

View File

@@ -0,0 +1,17 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
"%~dp0kafka-run-class.bat" kafka.admin.AclCommand %*

View File

@@ -0,0 +1,17 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
%~dp0kafka-run-class.bat kafka.admin.BrokerApiVersionsCommand %*

View File

@@ -0,0 +1,17 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
"%~dp0kafka-run-class.bat" kafka.admin.ConfigCommand %*

View File

@@ -0,0 +1,20 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
SetLocal
set KAFKA_HEAP_OPTS=-Xmx512M
"%~dp0kafka-run-class.bat" kafka.tools.ConsoleConsumer %*
EndLocal

View File

@@ -0,0 +1,20 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
SetLocal
set KAFKA_HEAP_OPTS=-Xmx512M
"%~dp0kafka-run-class.bat" kafka.tools.ConsoleProducer %*
EndLocal

View File

@@ -0,0 +1,17 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
"%~dp0kafka-run-class.bat" kafka.admin.ConsumerGroupCommand %*

View File

@@ -0,0 +1,20 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
SetLocal
set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M
"%~dp0kafka-run-class.bat" kafka.tools.ConsumerPerformance %*
EndLocal

View File

@@ -0,0 +1,17 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
"%~dp0kafka-run-class.bat" kafka.admin.DelegationTokenCommand %*

View File

@@ -0,0 +1,17 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
"%~dp0kafka-run-class.bat" kafka.admin.DeleteRecordsCommand %*

View File

@@ -0,0 +1,17 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
"%~dp0kafka-run-class.bat" kafka.tools.DumpLogSegments %*

View File

@@ -0,0 +1,17 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
"%~dp0kafka-run-class.bat" kafka.admin.LeaderElectionCommand %*

View File

@@ -0,0 +1,17 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
"%~dp0kafka-run-class.bat" kafka.admin.LogDirsCommand %*

View File

@@ -0,0 +1,17 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
"%~dp0kafka-run-class.bat" kafka.tools.MirrorMaker %*

View File

@@ -0,0 +1,17 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
"%~dp0kafka-run-class.bat" kafka.admin.PreferredReplicaLeaderElectionCommand %*

View File

@@ -0,0 +1,20 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
SetLocal
set KAFKA_HEAP_OPTS=-Xmx512M
"%~dp0kafka-run-class.bat" org.apache.kafka.tools.ProducerPerformance %*
EndLocal

View File

@@ -0,0 +1,17 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
"%~dp0kafka-run-class.bat" kafka.admin.ReassignPartitionsCommand %*

View File

@@ -0,0 +1,17 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
"%~dp0kafka-run-class.bat" kafka.tools.ReplicaVerificationTool %*

191
bin/windows/kafka-run-class.bat Executable file
View File

@@ -0,0 +1,191 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
setlocal enabledelayedexpansion
IF [%1] EQU [] (
echo USAGE: %0 classname [opts]
EXIT /B 1
)
rem Using pushd popd to set BASE_DIR to the absolute path
pushd %~dp0..\..
set BASE_DIR=%CD%
popd
IF ["%SCALA_VERSION%"] EQU [""] (
set SCALA_VERSION=2.12.10
)
IF ["%SCALA_BINARY_VERSION%"] EQU [""] (
for /f "tokens=1,2 delims=." %%a in ("%SCALA_VERSION%") do (
set FIRST=%%a
set SECOND=%%b
if ["!SECOND!"] EQU [""] (
set SCALA_BINARY_VERSION=!FIRST!
) else (
set SCALA_BINARY_VERSION=!FIRST!.!SECOND!
)
)
)
rem Classpath addition for kafka-core dependencies
for %%i in ("%BASE_DIR%\core\build\dependant-libs-%SCALA_VERSION%\*.jar") do (
call :concat "%%i"
)
rem Classpath addition for kafka-examples
for %%i in ("%BASE_DIR%\examples\build\libs\kafka-examples*.jar") do (
call :concat "%%i"
)
rem Classpath addition for kafka-clients
for %%i in ("%BASE_DIR%\clients\build\libs\kafka-clients*.jar") do (
call :concat "%%i"
)
rem Classpath addition for kafka-streams
for %%i in ("%BASE_DIR%\streams\build\libs\kafka-streams*.jar") do (
call :concat "%%i"
)
rem Classpath addition for kafka-streams-examples
for %%i in ("%BASE_DIR%\streams\examples\build\libs\kafka-streams-examples*.jar") do (
call :concat "%%i"
)
for %%i in ("%BASE_DIR%\streams\build\dependant-libs-%SCALA_VERSION%\rocksdb*.jar") do (
call :concat "%%i"
)
rem Classpath addition for kafka tools
for %%i in ("%BASE_DIR%\tools\build\libs\kafka-tools*.jar") do (
call :concat "%%i"
)
for %%i in ("%BASE_DIR%\tools\build\dependant-libs-%SCALA_VERSION%\*.jar") do (
call :concat "%%i"
)
for %%p in (api runtime file json tools) do (
for %%i in ("%BASE_DIR%\connect\%%p\build\libs\connect-%%p*.jar") do (
call :concat "%%i"
)
if exist "%BASE_DIR%\connect\%%p\build\dependant-libs\*" (
call :concat "%BASE_DIR%\connect\%%p\build\dependant-libs\*"
)
)
rem Classpath addition for release
for %%i in ("%BASE_DIR%\libs\*") do (
call :concat "%%i"
)
rem Classpath addition for core
for %%i in ("%BASE_DIR%\core\build\libs\kafka_%SCALA_BINARY_VERSION%*.jar") do (
call :concat "%%i"
)
rem JMX settings
IF ["%KAFKA_JMX_OPTS%"] EQU [""] (
set KAFKA_JMX_OPTS=-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false
)
rem JMX port to use
IF ["%JMX_PORT%"] NEQ [""] (
set KAFKA_JMX_OPTS=%KAFKA_JMX_OPTS% -Dcom.sun.management.jmxremote.port=%JMX_PORT%
)
rem Log directory to use
IF ["%LOG_DIR%"] EQU [""] (
set LOG_DIR=%BASE_DIR%/logs
)
rem Log4j settings
IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/tools-log4j.properties
) ELSE (
rem create logs directory
IF not exist "%LOG_DIR%" (
mkdir "%LOG_DIR%"
)
)
set KAFKA_LOG4J_OPTS=-Dkafka.logs.dir="%LOG_DIR%" "%KAFKA_LOG4J_OPTS%"
rem Generic jvm settings you want to add
IF ["%KAFKA_OPTS%"] EQU [""] (
set KAFKA_OPTS=
)
set DEFAULT_JAVA_DEBUG_PORT=5005
set DEFAULT_DEBUG_SUSPEND_FLAG=n
rem Set Debug options if enabled
IF ["%KAFKA_DEBUG%"] NEQ [""] (
IF ["%JAVA_DEBUG_PORT%"] EQU [""] (
set JAVA_DEBUG_PORT=%DEFAULT_JAVA_DEBUG_PORT%
)
IF ["%DEBUG_SUSPEND_FLAG%"] EQU [""] (
set DEBUG_SUSPEND_FLAG=%DEFAULT_DEBUG_SUSPEND_FLAG%
)
set DEFAULT_JAVA_DEBUG_OPTS=-agentlib:jdwp=transport=dt_socket,server=y,suspend=!DEBUG_SUSPEND_FLAG!,address=!JAVA_DEBUG_PORT!
IF ["%JAVA_DEBUG_OPTS%"] EQU [""] (
set JAVA_DEBUG_OPTS=!DEFAULT_JAVA_DEBUG_OPTS!
)
echo Enabling Java debug options: !JAVA_DEBUG_OPTS!
set KAFKA_OPTS=!JAVA_DEBUG_OPTS! !KAFKA_OPTS!
)
rem Which java to use
IF ["%JAVA_HOME%"] EQU [""] (
set JAVA=java
) ELSE (
set JAVA="%JAVA_HOME%/bin/java"
)
rem Memory options
IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
set KAFKA_HEAP_OPTS=-Xmx256M
)
rem JVM performance options
IF ["%KAFKA_JVM_PERFORMANCE_OPTS%"] EQU [""] (
set KAFKA_JVM_PERFORMANCE_OPTS=-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true
)
IF not defined CLASSPATH (
echo Classpath is empty. Please build the project first e.g. by running 'gradlew jarAll'
EXIT /B 2
)
set COMMAND=%JAVA% %KAFKA_HEAP_OPTS% %KAFKA_JVM_PERFORMANCE_OPTS% %KAFKA_JMX_OPTS% %KAFKA_LOG4J_OPTS% -cp "%CLASSPATH%" %KAFKA_OPTS% %*
rem echo.
rem echo %COMMAND%
rem echo.
%COMMAND%
goto :eof
:concat
IF not defined CLASSPATH (
set CLASSPATH="%~1"
) ELSE (
set CLASSPATH=%CLASSPATH%;"%~1"
)

View File

@@ -0,0 +1,38 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
IF [%1] EQU [] (
echo USAGE: %0 server.properties
EXIT /B 1
)
SetLocal
IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties
)
IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
rem detect OS architecture
wmic os get osarchitecture | find /i "32-bit" >nul 2>&1
IF NOT ERRORLEVEL 1 (
rem 32-bit OS
set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M
) ELSE (
rem 64-bit OS
set KAFKA_HEAP_OPTS=-Xmx1G -Xms1G
)
)
"%~dp0kafka-run-class.bat" kafka.Kafka %*
EndLocal

View File

@@ -0,0 +1,18 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
wmic process where (commandline like "%%kafka.Kafka%%" and not name="wmic.exe") delete
rem ps ax | grep -i 'kafka.Kafka' | grep -v grep | awk '{print $1}' | xargs kill -SIGTERM

View File

@@ -0,0 +1,23 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
SetLocal
IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
set KAFKA_HEAP_OPTS=-Xmx512M
)
"%~dp0kafka-run-class.bat" kafka.tools.StreamsResetter %*
EndLocal

View File

@@ -0,0 +1,17 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
"%~dp0kafka-run-class.bat" kafka.admin.TopicCommand %*

View File

@@ -0,0 +1,30 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
IF [%1] EQU [] (
echo USAGE: %0 zookeeper.properties
EXIT /B 1
)
SetLocal
IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties
)
IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M
)
"%~dp0kafka-run-class.bat" org.apache.zookeeper.server.quorum.QuorumPeerMain %*
EndLocal

View File

@@ -0,0 +1,17 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
wmic process where (commandline like "%%zookeeper%%" and not name="wmic.exe") delete

View File

@@ -0,0 +1,22 @@
@echo off
rem Licensed to the Apache Software Foundation (ASF) under one or more
rem contributor license agreements. See the NOTICE file distributed with
rem this work for additional information regarding copyright ownership.
rem The ASF licenses this file to You under the Apache License, Version 2.0
rem (the "License"); you may not use this file except in compliance with
rem the License. You may obtain a copy of the License at
rem
rem http://www.apache.org/licenses/LICENSE-2.0
rem
rem Unless required by applicable law or agreed to in writing, software
rem distributed under the License is distributed on an "AS IS" BASIS,
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
rem See the License for the specific language governing permissions and
rem limitations under the License.
IF [%1] EQU [] (
echo USAGE: %0 zookeeper_host:port[/path] [-zk-tls-config-file file] [args...]
EXIT /B 1
)
"%~dp0kafka-run-class.bat" org.apache.zookeeper.ZooKeeperMainWithTlsSupportForKafka -server %*

View File

@@ -0,0 +1,17 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
exec $(dirname $0)/kafka-run-class.sh kafka.admin.ZkSecurityMigrator "$@"

44
bin/zookeeper-server-start.sh Executable file
View File

@@ -0,0 +1,44 @@
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -lt 1 ];
then
echo "USAGE: $0 [-daemon] zookeeper.properties"
exit 1
fi
base_dir=$(dirname $0)
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
fi
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx512M -Xms512M"
fi
EXTRA_ARGS=${EXTRA_ARGS-'-name zookeeper -loggc'}
COMMAND=$1
case $COMMAND in
-daemon)
EXTRA_ARGS="-daemon "$EXTRA_ARGS
shift
;;
*)
;;
esac
exec $base_dir/kafka-run-class.sh $EXTRA_ARGS org.apache.zookeeper.server.quorum.QuorumPeerMain "$@"

24
bin/zookeeper-server-stop.sh Executable file
View File

@@ -0,0 +1,24 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
SIGNAL=${SIGNAL:-TERM}
PIDS=$(ps ax | grep java | grep -i QuorumPeerMain | grep -v grep | awk '{print $1}')
if [ -z "$PIDS" ]; then
echo "No zookeeper server to stop"
exit 1
else
kill -s $SIGNAL $PIDS
fi

23
bin/zookeeper-shell.sh Executable file
View File

@@ -0,0 +1,23 @@
#!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -lt 1 ];
then
echo "USAGE: $0 zookeeper_host:port[/path] [-zk-tls-config-file file] [args...]"
exit 1
fi
exec $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.ZooKeeperMainWithTlsSupportForKafka -server "$@"

1202
build.gradle Normal file

File diff suppressed because it is too large Load Diff

83
build.sh Normal file
View File

@@ -0,0 +1,83 @@
#!/bin/bash
workspace=$(cd $(dirname $0) && pwd -P)
cd $workspace
## TODO const
APPNAME=service-discovery
module=$APPNAME
app=$module
gitversion=.gitversion
control=./control.sh
ngxfunc=./nginxfunc.sh
## function
function build() {
# 进行编译
# cmd
JVERSION=`java -version 2>&1 | awk 'NR==1{gsub(/"/,"");print $3}'`
major=`echo $JVERSION | awk -F. '{print $1}'`
mijor=`echo $JVERSION | awk -F. '{print $2}'`
if [ $major -le 1 ] && [ $mijor -lt 11 ]; then
export JAVA_HOME=/usr/local/jdk-11.0.2 #(使用jdk11请设置)
export PATH=$JAVA_HOME/bin:$PATH
fi
# XXX 编译命令
# mvn clean install -Ponline -Dmaven.test.skip=true -f ../pom.xml
./gradlew -PscalaVersion=2.12 releaseTarGz
local sc=$?
if [ $sc -ne 0 ];then
## 编译失败, 退出码为 非0
echo "$app build error"
exit $sc
else
echo -n "$app build ok, vsn="
gitversion
fi
}
function make_output() {
# 新建output目录
local output="./output"
rm -rf $output &>/dev/null
mkdir -p $output &>/dev/null
# 填充output目录, output内的内容 即为 线上部署内容
(
cp -rf $control $output && # 拷贝 control.sh脚本 至output目录
cp -rf $ngxfunc $output &&
cp -rf ./APP_META $output &&
cp -rf ./APP_META/Dockerfile $output &&
# XXX 解压程序包到output路径
tar -xzvf core/build/distributions/kafka_2.12-sd-2.5.0-d-100.tgz
mv kafka_2.12-sd-2.5.0-d-100 ${output}/service-discovery
# unzip target/${module}.war -d ${output} && # 解压war包到output目录
echo -e "make output ok."
) || { echo -e "make output error"; exit 2; } # 填充output目录失败后, 退出码为 非0
}
## internals
function gitversion() {
git log -1 --pretty=%h > $gitversion
local gv=`cat $gitversion`
echo "$gv"
}
##########################################
## main
## 其中,
## 1.进行编译
## 2.生成部署包output
##########################################
# 1.进行编译
build
# 2.生成部署包output
make_output
# 编译成功
echo -e "build done"
exit 0

20
checkstyle/.scalafmt.conf Normal file
View File

@@ -0,0 +1,20 @@
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
docstrings = JavaDoc
maxColumn = 120
continuationIndent.defnSite = 2
assumeStandardLibraryStripMargin = true
danglingParentheses = true
rewrite.rules = [SortImports, RedundantBraces, RedundantParens, SortModifiers]

142
checkstyle/checkstyle.xml Normal file
View File

@@ -0,0 +1,142 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE module PUBLIC
"-//Puppy Crawl//DTD Check Configuration 1.3//EN"
"http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
<!--
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-->
<module name="Checker">
<property name="localeLanguage" value="en"/>
<module name="FileTabCharacter"/>
<!-- header -->
<module name="Header">
<property name="headerFile" value="${headerFile}" />
</module>
<module name="TreeWalker">
<!-- code cleanup -->
<module name="UnusedImports">
<property name="processJavadoc" value="true" />
</module>
<module name="RedundantImport"/>
<module name="IllegalImport" />
<module name="EqualsHashCode"/>
<module name="SimplifyBooleanExpression"/>
<module name="OneStatementPerLine"/>
<module name="UnnecessaryParentheses" />
<module name="SimplifyBooleanReturn"/>
<!-- style -->
<module name="DefaultComesLast"/>
<module name="EmptyStatement"/>
<module name="ArrayTypeStyle"/>
<module name="UpperEll"/>
<module name="LeftCurly"/>
<module name="RightCurly"/>
<module name="EmptyStatement"/>
<module name="ConstantName">
<property name="format" value="(^[A-Z][A-Z0-9]*(_[A-Z0-9]+)*$)|(^log$)"/>
</module>
<module name="LocalVariableName"/>
<module name="LocalFinalVariableName"/>
<module name="MemberName"/>
<module name="ClassTypeParameterName">
<property name="format" value="^[A-Z][a-zA-Z0-9]*$$"/>
</module>
<module name="MethodTypeParameterName">
<property name="format" value="^[A-Z][a-zA-Z0-9]*$$"/>
</module>
<module name="InterfaceTypeParameterName">
<property name="format" value="^[A-Z][a-zA-Z0-9]*$$"/>
</module>
<module name="PackageName"/>
<module name="ParameterName"/>
<module name="StaticVariableName"/>
<module name="TypeName"/>
<module name="AvoidStarImport"/>
<!-- variables that can be final should be final (suppressed except for Streams) -->
<module name="FinalLocalVariable">
<property name="tokens" value="VARIABLE_DEF,PARAMETER_DEF"/>
<property name="validateEnhancedForLoopVariable" value="true"/>
</module>
<!-- dependencies -->
<module name="ImportControl">
<property name="file" value="${importControlFile}"/>
</module>
<!-- whitespace -->
<module name="GenericWhitespace"/>
<module name="NoWhitespaceBefore"/>
<module name="WhitespaceAfter" />
<module name="NoWhitespaceAfter"/>
<module name="WhitespaceAround">
<property name="allowEmptyConstructors" value="true"/>
<property name="allowEmptyMethods" value="true"/>
</module>
<module name="Indentation"/>
<module name="MethodParamPad"/>
<module name="ParenPad"/>
<module name="TypecastParenPad"/>
<!-- locale-sensitive methods should specify locale -->
<module name="Regexp">
<property name="format" value="\.to(Lower|Upper)Case\(\)"/>
<property name="illegalPattern" value="true"/>
<property name="ignoreComments" value="true"/>
</module>
<!-- code quality -->
<module name="MethodLength"/>
<module name="ParameterNumber">
<!-- default is 8 -->
<property name="max" value="13"/>
</module>
<module name="ClassDataAbstractionCoupling">
<!-- default is 7 -->
<property name="max" value="25"/>
</module>
<module name="BooleanExpressionComplexity">
<!-- default is 3 -->
<property name="max" value="5"/>
</module>
<module name="ClassFanOutComplexity">
<!-- default is 20 -->
<property name="max" value="50"/>
</module>
<module name="CyclomaticComplexity">
<!-- default is 10-->
<property name="max" value="16"/>
</module>
<module name="JavaNCSS">
<!-- default is 50 -->
<property name="methodMaximum" value="100"/>
</module>
<module name="NPathComplexity">
<!-- default is 200 -->
<property name="max" value="500"/>
</module>
</module>
<module name="SuppressionFilter">
<property name="file" value="${suppressionsFile}"/>
</module>
</module>

View File

@@ -0,0 +1,56 @@
<!DOCTYPE import-control PUBLIC
"-//Puppy Crawl//DTD Import Control 1.1//EN"
"http://www.puppycrawl.com/dtds/import_control_1_1.dtd">
<!--
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-->
<import-control pkg="kafka">
<!-- THINK HARD ABOUT THE LAYERING OF THE PROJECT BEFORE CHANGING THIS FILE -->
<!-- common library dependencies -->
<allow pkg="java" />
<allow pkg="scala" />
<allow pkg="javax.management" />
<allow pkg="org.slf4j" />
<allow pkg="org.junit" />
<allow pkg="org.easymock" />
<allow pkg="java.security" />
<allow pkg="javax.net.ssl" />
<allow pkg="javax.security" />
<allow pkg="kafka.common" />
<allow pkg="kafka.utils" />
<allow pkg="kafka.serializer" />
<allow pkg="org.apache.kafka.common" />
<subpackage name="tools">
<allow pkg="org.apache.kafka.clients.admin" />
<allow pkg="kafka.admin" />
<allow pkg="joptsimple" />
<allow pkg="org.apache.kafka.clients.consumer" />
</subpackage>
<subpackage name="coordinator">
<allow class="kafka.server.MetadataCache" />
</subpackage>
<subpackage name="examples">
<allow pkg="org.apache.kafka.clients" />
</subpackage>
</import-control>

View File

@@ -0,0 +1,47 @@
<!DOCTYPE import-control PUBLIC
"-//Puppy Crawl//DTD Import Control 1.1//EN"
"http://www.puppycrawl.com/dtds/import_control_1_1.dtd">
<!--
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-->
<import-control pkg="org.apache.kafka.jmh">
<allow pkg="java"/>
<allow pkg="scala"/>
<allow pkg="javax.management"/>
<allow pkg="org.slf4j"/>
<allow pkg="org.openjdk.jmh.annotations"/>
<allow pkg="org.openjdk.jmh.runner"/>
<allow pkg="org.openjdk.jmh.infra"/>
<allow pkg="java.security"/>
<allow pkg="javax.net.ssl"/>
<allow pkg="javax.security"/>
<allow pkg="org.apache.kafka.common"/>
<allow pkg="org.apache.kafka.clients.producer"/>
<allow pkg="kafka.cluster"/>
<allow pkg="kafka.log"/>
<allow pkg="kafka.server"/>
<allow pkg="kafka.api"/>
<allow class="kafka.utils.Pool"/>
<allow class="kafka.utils.KafkaScheduler"/>
<allow class="org.apache.kafka.clients.FetchSessionHandler"/>
<allow pkg="org.mockito"/>
<subpackage name="cache">
</subpackage>
</import-control>

View File

@@ -0,0 +1,456 @@
<!DOCTYPE import-control PUBLIC
"-//Puppy Crawl//DTD Import Control 1.1//EN"
"http://www.puppycrawl.com/dtds/import_control_1_1.dtd">
<!--
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-->
<import-control pkg="org.apache.kafka">
<!-- THINK HARD ABOUT THE LAYERING OF THE PROJECT BEFORE CHANGING THIS FILE -->
<!-- common library dependencies -->
<allow pkg="java" />
<allow pkg="javax.management" />
<allow pkg="org.slf4j" />
<allow pkg="org.junit" />
<allow pkg="org.hamcrest" />
<allow pkg="org.mockito" />
<allow pkg="org.easymock" />
<allow pkg="org.powermock" />
<allow pkg="java.security" />
<allow pkg="javax.net.ssl" />
<allow pkg="javax.security" />
<allow pkg="org.ietf.jgss" />
<!-- no one depends on the server -->
<disallow pkg="kafka" />
<!-- anyone can use public classes -->
<allow pkg="org.apache.kafka.common" exact-match="true" />
<allow pkg="org.apache.kafka.common.security" />
<allow pkg="org.apache.kafka.common.serialization" />
<allow pkg="org.apache.kafka.common.utils" />
<allow pkg="org.apache.kafka.common.errors" exact-match="true" />
<allow pkg="org.apache.kafka.common.memory" />
<subpackage name="common">
<disallow pkg="org.apache.kafka.clients" />
<allow pkg="org.apache.kafka.common" exact-match="true" />
<allow pkg="org.apache.kafka.common.annotation" />
<allow pkg="org.apache.kafka.common.config" exact-match="true" />
<allow pkg="org.apache.kafka.common.internals" exact-match="true" />
<allow pkg="org.apache.kafka.test" />
<subpackage name="acl">
<allow pkg="org.apache.kafka.common.annotation" />
<allow pkg="org.apache.kafka.common.acl" />
<allow pkg="org.apache.kafka.common.resource" />
</subpackage>
<subpackage name="config">
<allow pkg="org.apache.kafka.common.config" />
<!-- for testing -->
<allow pkg="org.apache.kafka.common.metrics" />
</subpackage>
<subpackage name="message">
<allow pkg="com.fasterxml.jackson" />
<allow pkg="org.apache.kafka.common.protocol" />
<allow pkg="org.apache.kafka.common.protocol.types" />
<allow pkg="org.apache.kafka.common.message" />
</subpackage>
<subpackage name="metrics">
<allow pkg="org.apache.kafka.common.metrics" />
</subpackage>
<subpackage name="memory">
<allow pkg="org.apache.kafka.common.metrics" />
</subpackage>
<subpackage name="network">
<allow pkg="org.apache.kafka.common.security.auth" />
<allow pkg="org.apache.kafka.common.protocol" />
<allow pkg="org.apache.kafka.common.config" />
<allow pkg="org.apache.kafka.common.metrics" />
<allow pkg="org.apache.kafka.common.security" />
</subpackage>
<subpackage name="resource">
<allow pkg="org.apache.kafka.common.annotation" />
<allow pkg="org.apache.kafka.common.resource" />
</subpackage>
<subpackage name="security">
<allow pkg="org.apache.kafka.common.annotation" />
<allow pkg="org.apache.kafka.common.network" />
<allow pkg="org.apache.kafka.common.config" />
<allow pkg="org.apache.kafka.common.protocol" />
<allow pkg="org.apache.kafka.common.errors" />
<subpackage name="authenticator">
<allow pkg="org.apache.kafka.common.message" />
<allow pkg="org.apache.kafka.common.protocol.types" />
<allow pkg="org.apache.kafka.common.requests" />
<allow pkg="org.apache.kafka.clients" />
</subpackage>
<subpackage name="scram">
<allow pkg="javax.crypto" />
</subpackage>
<subpackage name="oauthbearer">
<allow pkg="com.fasterxml.jackson.databind" />
</subpackage>
</subpackage>
<subpackage name="protocol">
<allow pkg="org.apache.kafka.common.errors" />
<allow pkg="org.apache.kafka.common.message" />
<allow pkg="org.apache.kafka.common.protocol" />
<allow pkg="org.apache.kafka.common.protocol.types" />
<allow pkg="org.apache.kafka.common.record" />
<allow pkg="org.apache.kafka.common.requests" />
<allow pkg="org.apache.kafka.common.resource" />
</subpackage>
<subpackage name="record">
<allow pkg="net.jpountz" />
<allow pkg="org.apache.kafka.common.header" />
<allow pkg="org.apache.kafka.common.record" />
<allow pkg="org.apache.kafka.common.network" />
<allow pkg="org.apache.kafka.common.protocol" />
<allow pkg="org.apache.kafka.common.protocol.types" />
<allow pkg="org.apache.kafka.common.errors" />
</subpackage>
<subpackage name="header">
<allow pkg="org.apache.kafka.common.header" />
<allow pkg="org.apache.kafka.common.record" />
</subpackage>
<subpackage name="requests">
<allow pkg="org.apache.kafka.common.acl" />
<allow pkg="org.apache.kafka.common.protocol" />
<allow pkg="org.apache.kafka.common.message" />
<allow pkg="org.apache.kafka.common.network" />
<allow pkg="org.apache.kafka.common.requests" />
<allow pkg="org.apache.kafka.common.resource" />
<allow pkg="org.apache.kafka.common.record" />
<!-- for AuthorizableRequestContext interface -->
<allow pkg="org.apache.kafka.server.authorizer" />
<!-- for testing -->
<allow pkg="org.apache.kafka.common.errors" />
</subpackage>
<subpackage name="serialization">
<allow class="org.apache.kafka.common.errors.SerializationException" />
<allow class="org.apache.kafka.common.header.Headers" />
</subpackage>
<subpackage name="utils">
<allow pkg="org.apache.kafka.common" />
</subpackage>
</subpackage>
<subpackage name="clients">
<allow pkg="org.slf4j" />
<allow pkg="org.apache.kafka.common" />
<allow pkg="org.apache.kafka.clients" exact-match="true"/>
<allow pkg="org.apache.kafka.test" />
<subpackage name="consumer">
<allow pkg="org.apache.kafka.clients.consumer" />
</subpackage>
<subpackage name="producer">
<allow pkg="org.apache.kafka.clients.consumer" />
<allow pkg="org.apache.kafka.clients.producer" />
</subpackage>
<subpackage name="admin">
<allow pkg="org.apache.kafka.clients.admin" />
<allow pkg="org.apache.kafka.clients.consumer.internals" />
<allow pkg="org.apache.kafka.clients.consumer" />
</subpackage>
</subpackage>
<subpackage name="server">
<allow pkg="org.slf4j" />
<allow pkg="org.apache.kafka.common" />
<allow pkg="org.apache.kafka.test" />
</subpackage>
<subpackage name="tools">
<allow pkg="org.apache.kafka.common"/>
<allow pkg="org.apache.kafka.clients.admin" />
<allow pkg="org.apache.kafka.clients.producer" />
<allow pkg="org.apache.kafka.clients.consumer" />
<allow pkg="com.fasterxml.jackson" />
<allow pkg="net.sourceforge.argparse4j" />
<allow pkg="org.apache.log4j" />
</subpackage>
<subpackage name="trogdor">
<allow pkg="com.fasterxml.jackson" />
<allow pkg="javax.servlet" />
<allow pkg="javax.ws.rs" />
<allow pkg="net.sourceforge.argparse4j" />
<allow pkg="org.apache.kafka.clients" />
<allow pkg="org.apache.kafka.clients.admin" />
<allow pkg="org.apache.kafka.clients.consumer" exact-match="true"/>
<allow pkg="org.apache.kafka.clients.producer" exact-match="true"/>
<allow pkg="org.apache.kafka.common" />
<allow pkg="org.apache.kafka.test"/>
<allow pkg="org.apache.kafka.trogdor" />
<allow pkg="org.apache.log4j" />
<allow pkg="org.eclipse.jetty" />
<allow pkg="org.glassfish.jersey" />
</subpackage>
<subpackage name="message">
<allow pkg="com.fasterxml.jackson" />
<allow pkg="com.fasterxml.jackson.annotation" />
</subpackage>
<subpackage name="streams">
<allow pkg="org.apache.kafka.common"/>
<allow pkg="org.apache.kafka.test"/>
<allow pkg="org.apache.kafka.clients"/>
<allow pkg="org.apache.kafka.clients.producer" exact-match="true"/>
<allow pkg="org.apache.kafka.clients.consumer" exact-match="true"/>
<allow pkg="org.apache.kafka.streams"/>
<subpackage name="examples">
<allow pkg="com.fasterxml.jackson" />
<allow pkg="org.apache.kafka.connect.json" />
</subpackage>
<subpackage name="perf">
<allow pkg="com.fasterxml.jackson.databind" />
</subpackage>
<subpackage name="integration">
<allow pkg="kafka.admin" />
<allow pkg="kafka.api" />
<allow pkg="kafka.server" />
<allow pkg="kafka.tools" />
<allow pkg="kafka.utils" />
<allow pkg="kafka.log" />
<allow pkg="scala" />
<allow class="kafka.zk.EmbeddedZookeeper"/>
</subpackage>
<subpackage name="test">
<allow pkg="kafka.admin" />
</subpackage>
<subpackage name="tools">
<allow pkg="kafka.tools" />
</subpackage>
<subpackage name="state">
<allow pkg="org.rocksdb" />
</subpackage>
<subpackage name="processor">
<subpackage name="internals">
<allow pkg="com.fasterxml.jackson" />
<allow pkg="kafka.utils" />
<allow pkg="org.apache.zookeeper" />
<allow pkg="org.apache.zookeeper" />
<allow pkg="org.apache.log4j" />
<subpackage name="testutil">
<allow pkg="org.apache.log4j" />
</subpackage>
</subpackage>
</subpackage>
</subpackage>
<subpackage name="jmh">
<allow pkg="org.openjdk.jmh.annotations" />
<allow pkg="org.openjdk.jmh.runner" />
<allow pkg="org.openjdk.jmh.runner.options" />
<allow pkg="org.openjdk.jmh.infra" />
<allow pkg="org.apache.kafka.common" />
<allow pkg="org.apache.kafka.clients" />
<allow pkg="org.apache.kafka.streams" />
<allow pkg="org.github.jamm" />
</subpackage>
<subpackage name="log4jappender">
<allow pkg="org.apache.log4j" />
<allow pkg="org.apache.kafka.clients" />
<allow pkg="org.apache.kafka.common" />
<allow pkg="org.apache.kafka.test" />
</subpackage>
<subpackage name="test">
<allow pkg="org.apache.kafka" />
<allow pkg="org.bouncycastle" />
</subpackage>
<subpackage name="connect">
<allow pkg="org.apache.kafka.common" />
<allow pkg="org.apache.kafka.connect.data" />
<allow pkg="org.apache.kafka.connect.errors" />
<allow pkg="org.apache.kafka.connect.header" />
<allow pkg="org.apache.kafka.connect.components"/>
<allow pkg="org.apache.kafka.clients" />
<allow pkg="org.apache.kafka.test"/>
<subpackage name="source">
<allow pkg="org.apache.kafka.connect.connector" />
<allow pkg="org.apache.kafka.connect.storage" />
</subpackage>
<subpackage name="sink">
<allow pkg="org.apache.kafka.clients.consumer" />
<allow pkg="org.apache.kafka.connect.connector" />
<allow pkg="org.apache.kafka.connect.storage" />
</subpackage>
<subpackage name="converters">
<allow pkg="org.apache.kafka.connect.storage" />
</subpackage>
<subpackage name="connector.policy">
<allow pkg="org.apache.kafka.connect.health" />
<allow pkg="org.apache.kafka.connect.connector" />
<!-- for testing -->
<allow pkg="org.apache.kafka.connect.runtime" />
</subpackage>
<subpackage name="rest">
<allow pkg="org.apache.kafka.connect.health" />
<allow pkg="javax.ws.rs" />
<allow pkg= "javax.security.auth"/>
<subpackage name="basic">
<allow pkg="org.apache.kafka.connect.rest"/>
</subpackage>
</subpackage>
<subpackage name="mirror">
<allow pkg="org.apache.kafka.clients.consumer" />
<allow pkg="org.apache.kafka.connect.source" />
<allow pkg="org.apache.kafka.connect.sink" />
<allow pkg="org.apache.kafka.connect.storage" />
<allow pkg="org.apache.kafka.connect.connector" />
<allow pkg="org.apache.kafka.connect.runtime" />
<allow pkg="org.apache.kafka.connect.runtime.distributed" />
<allow pkg="org.apache.kafka.connect.util" />
<allow pkg="org.apache.kafka.connect.converters" />
<allow pkg="net.sourceforge.argparse4j" />
<!-- for tests -->
<allow pkg="org.apache.kafka.connect.integration" />
<allow pkg="org.apache.kafka.connect.mirror" />
</subpackage>
<subpackage name="runtime">
<allow pkg="org.apache.kafka.connect" />
<allow pkg="org.reflections"/>
<allow pkg="org.reflections.util"/>
<allow pkg="javax.crypto"/>
<subpackage name="rest">
<allow pkg="org.eclipse.jetty" />
<allow pkg="javax.ws.rs" />
<allow pkg="javax.servlet" />
<allow pkg="org.glassfish.jersey" />
<allow pkg="com.fasterxml.jackson" />
<allow pkg="org.apache.http"/>
<subpackage name="resources">
<allow pkg="org.apache.log4j" />
</subpackage>
</subpackage>
<subpackage name="isolation">
<allow pkg="com.fasterxml.jackson" />
<allow pkg="org.apache.maven.artifact.versioning" />
<allow pkg="javax.tools" />
</subpackage>
<subpackage name="distributed">
<allow pkg="javax.ws.rs.core" />
</subpackage>
</subpackage>
<subpackage name="cli">
<allow pkg="org.apache.kafka.connect.runtime" />
<allow pkg="org.apache.kafka.connect.storage" />
<allow pkg="org.apache.kafka.connect.util" />
<allow pkg="org.apache.kafka.common" />
<allow pkg="org.apache.kafka.connect.connector.policy" />
</subpackage>
<subpackage name="storage">
<allow pkg="org.apache.kafka.connect" />
<allow pkg="org.apache.kafka.common.serialization" />
<allow pkg="javax.crypto.spec"/>
</subpackage>
<subpackage name="util">
<allow pkg="org.apache.kafka.connect" />
<allow pkg="org.reflections.vfs" />
<!-- for annotations to avoid code duplication -->
<allow pkg="com.fasterxml.jackson.annotation" />
<allow pkg="com.fasterxml.jackson.databind" />
<subpackage name="clusters">
<allow pkg="kafka.server" />
<allow pkg="kafka.zk" />
<allow pkg="kafka.utils" />
<allow class="javax.servlet.http.HttpServletResponse" />
<allow class="javax.ws.rs.core.Response" />
<allow pkg="com.fasterxml.jackson.core.type" />
</subpackage>
</subpackage>
<subpackage name="integration">
<allow pkg="org.apache.kafka.connect.util.clusters" />
<allow pkg="org.apache.kafka.connect" />
<allow pkg="org.apache.kafka.tools" />
<allow pkg="javax.ws.rs" />
</subpackage>
<subpackage name="json">
<allow pkg="com.fasterxml.jackson" />
<allow pkg="org.apache.kafka.common.serialization" />
<allow pkg="org.apache.kafka.common.errors" />
<allow pkg="org.apache.kafka.connect.storage" />
</subpackage>
<subpackage name="file">
<allow pkg="org.apache.kafka.connect" />
<allow pkg="org.apache.kafka.clients.consumer" />
<!-- for tests -->
<allow pkg="org.easymock" />
<allow pkg="org.powermock" />
</subpackage>
<subpackage name="tools">
<allow pkg="org.apache.kafka.connect" />
<allow pkg="org.apache.kafka.tools" />
<allow pkg="com.fasterxml.jackson" />
</subpackage>
<subpackage name="transforms">
<allow class="org.apache.kafka.connect.connector.ConnectRecord" />
<allow class="org.apache.kafka.connect.source.SourceRecord" />
<allow class="org.apache.kafka.connect.sink.SinkRecord" />
<allow pkg="org.apache.kafka.connect.transforms.util" />
</subpackage>
</subpackage>
</import-control>

16
checkstyle/java.header Normal file
View File

@@ -0,0 +1,16 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

267
checkstyle/suppressions.xml Normal file
View File

@@ -0,0 +1,267 @@
<!DOCTYPE suppressions PUBLIC
"-//Puppy Crawl//DTD Suppressions 1.1//EN"
"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
<suppressions>
<!-- Note that [/\\] must be used as the path separator for cross-platform support -->
<!-- Generator -->
<suppress checks="CyclomaticComplexity|BooleanExpressionComplexity"
files="(SchemaGenerator|MessageDataGenerator|FieldSpec).java"/>
<suppress checks="NPathComplexity"
files="(MessageDataGenerator|FieldSpec).java"/>
<suppress checks="JavaNCSS"
files="(ApiMessageType).java|MessageDataGenerator.java"/>
<suppress checks="MethodLength"
files="MessageDataGenerator.java"/>
<!-- Clients -->
<suppress checks="ClassFanOutComplexity"
files="(Fetcher|Sender|SenderTest|ConsumerCoordinator|KafkaConsumer|KafkaProducer|Utils|TransactionManager|TransactionManagerTest|KafkaAdminClient|NetworkClient|Admin).java"/>
<suppress checks="ClassFanOutComplexity"
files="(SaslServerAuthenticator|SaslAuthenticatorTest).java"/>
<suppress checks="ClassFanOutComplexity"
files="Errors.java"/>
<suppress checks="ClassFanOutComplexity"
files="Utils.java"/>
<suppress checks="ClassFanOutComplexity"
files="AbstractRequest.java"/>
<suppress checks="ClassFanOutComplexity"
files="AbstractResponse.java"/>
<suppress checks="MethodLength"
files="KerberosLogin.java|RequestResponseTest.java|ConnectMetricsRegistry.java|KafkaConsumer.java"/>
<suppress checks="ParameterNumber"
files="NetworkClient.java|FieldSpec.java"/>
<suppress checks="ParameterNumber"
files="KafkaConsumer.java"/>
<suppress checks="ParameterNumber"
files="Fetcher.java"/>
<suppress checks="ParameterNumber"
files="Sender.java"/>
<suppress checks="ParameterNumber"
files="ConfigDef.java"/>
<suppress checks="ParameterNumber"
files="DefaultRecordBatch.java"/>
<suppress checks="ParameterNumber"
files="Sender.java"/>
<suppress checks="ClassDataAbstractionCoupling"
files="(KafkaConsumer|ConsumerCoordinator|Fetcher|KafkaProducer|AbstractRequest|AbstractResponse|TransactionManager|Admin|KafkaAdminClient).java"/>
<suppress checks="ClassDataAbstractionCoupling"
files="(Errors|SaslAuthenticatorTest|AgentTest|CoordinatorTest).java"/>
<suppress checks="BooleanExpressionComplexity"
files="(Utils|Topic|KafkaLZ4BlockOutputStream|AclData|JoinGroupRequest).java"/>
<suppress checks="CyclomaticComplexity"
files="(ConsumerCoordinator|Fetcher|Sender|KafkaProducer|BufferPool|ConfigDef|RecordAccumulator|KerberosLogin|AbstractRequest|AbstractResponse|Selector|SslFactory|SslTransportLayer|SaslClientAuthenticator|SaslClientCallbackHandler|SaslServerAuthenticator|AbstractCoordinator|TransactionManager).java"/>
<suppress checks="JavaNCSS"
files="(AbstractRequest|KerberosLogin|WorkerSinkTaskTest|TransactionManagerTest|SenderTest|KafkaAdminClient|ConsumerCoordinatorTest).java"/>
<suppress checks="NPathComplexity"
files="(BufferPool|Fetcher|MetricName|Node|ConfigDef|RecordBatch|SslFactory|SslTransportLayer|MetadataResponse|KerberosLogin|Selector|Sender|Serdes|TokenInformation|Agent|Values|PluginUtils|MiniTrogdorCluster|TasksRequest|KafkaProducer).java"/>
<suppress checks="(JavaNCSS|CyclomaticComplexity|MethodLength)"
files="CoordinatorClient.java"/>
<suppress checks="(UnnecessaryParentheses|BooleanExpressionComplexity|CyclomaticComplexity|WhitespaceAfter|LocalVariableName)"
files="Murmur3.java"/>
<suppress checks="(NPathComplexity|ClassFanOutComplexity|CyclomaticComplexity|ClassDataAbstractionCoupling|LocalVariableName|MemberName|ParameterName|MethodLength|JavaNCSS)"
files="clients[\\/]src[\\/](generated|generated-test)[\\/].+.java$"/>
<suppress checks="NPathComplexity"
files="MessageTest.java"/>
<!-- clients tests -->
<suppress checks="ClassDataAbstractionCoupling"
files="(Sender|Fetcher|KafkaConsumer|Metrics|RequestResponse|TransactionManager|KafkaAdminClient|Message|KafkaProducer)Test.java"/>
<suppress checks="ClassFanOutComplexity"
files="(ConsumerCoordinator|KafkaConsumer|RequestResponse|Fetcher|KafkaAdminClient|Message|KafkaProducer)Test.java"/>
<suppress checks="ClassFanOutComplexity"
files="MockAdminClient.java"/>
<suppress checks="JavaNCSS"
files="RequestResponseTest.java|FetcherTest.java"/>
<suppress checks="NPathComplexity"
files="MemoryRecordsTest|MetricsTest"/>
<suppress checks="(WhitespaceAround|LocalVariableName|ImportControl|AvoidStarImport)"
files="Murmur3Test.java"/>
<!-- Connect -->
<suppress checks="ClassFanOutComplexity"
files="DistributedHerder(|Test).java"/>
<suppress checks="ClassFanOutComplexity"
files="Worker.java"/>
<suppress checks="MethodLength"
files="(KafkaConfigBackingStore|RequestResponseTest|WorkerSinkTaskTest).java"/>
<suppress checks="ParameterNumber"
files="(WorkerSinkTask|WorkerSourceTask).java"/>
<suppress checks="ParameterNumber"
files="WorkerCoordinator.java"/>
<suppress checks="ParameterNumber"
files="ConfigKeyInfo.java"/>
<suppress checks="ClassDataAbstractionCoupling"
files="(RestServer|AbstractHerder|DistributedHerder).java"/>
<suppress checks="BooleanExpressionComplexity"
files="JsonConverter.java"/>
<suppress checks="CyclomaticComplexity"
files="ConnectRecord.java"/>
<suppress checks="CyclomaticComplexity"
files="JsonConverter.java"/>
<suppress checks="CyclomaticComplexity"
files="FileStreamSourceTask.java"/>
<suppress checks="CyclomaticComplexity"
files="DistributedHerder.java"/>
<suppress checks="CyclomaticComplexity"
files="KafkaConfigBackingStore.java"/>
<suppress checks="CyclomaticComplexity"
files="(Values|ConnectHeader|ConnectHeaders).java"/>
<suppress checks="CyclomaticComplexity"
files="RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java"/>
<suppress checks="JavaNCSS"
files="KafkaConfigBackingStore.java"/>
<suppress checks="JavaNCSS"
files="Values.java"/>
<suppress checks="NPathComplexity"
files="(DistributedHerder|RestClient|JsonConverter|KafkaConfigBackingStore|FileStreamSourceTask).java"/>
<suppress checks="MethodLength"
files="Values.java"/>
<!-- connect tests-->
<suppress checks="ClassDataAbstractionCoupling"
files="(DistributedHerder|KafkaBasedLog)Test.java"/>
<suppress checks="ClassFanOutComplexity"
files="(WorkerSinkTask|WorkerSourceTask)Test.java"/>
<!-- Streams -->
<suppress checks="ClassFanOutComplexity"
files="(TopologyBuilder|KafkaStreams|KStreamImpl|KTableImpl|StreamThread|StreamTask).java"/>
<suppress checks="MethodLength"
files="(KTableImpl|StreamsPartitionAssignor.java)"/>
<suppress checks="ParameterNumber"
files="StreamTask.java"/>
<suppress checks="ParameterNumber"
files="RocksDBWindowStoreSupplier.java"/>
<suppress checks="ClassDataAbstractionCoupling"
files="(TopologyBuilder|KStreamImpl|StreamsPartitionAssignor|KafkaStreams|KTableImpl).java"/>
<suppress checks="CyclomaticComplexity"
files="TopologyBuilder.java"/>
<suppress checks="CyclomaticComplexity"
files="StreamsPartitionAssignor.java"/>
<suppress checks="CyclomaticComplexity"
files="StreamThread.java"/>
<suppress checks="JavaNCSS"
files="StreamsPartitionAssignor.java"/>
<suppress checks="NPathComplexity"
files="(ProcessorStateManager|InternalTopologyBuilder|StreamsPartitionAssignor|StreamThread).java"/>
<suppress checks="(FinalLocalVariable|UnnecessaryParentheses|BooleanExpressionComplexity|CyclomaticComplexity|WhitespaceAfter|LocalVariableName)"
files="Murmur3.java"/>
<!-- suppress FinalLocalVariable outside of the streams package. -->
<suppress checks="FinalLocalVariable"
files="^(?!.*[\\/]org[\\/]apache[\\/]kafka[\\/]streams[\\/].*$)"/>
<!-- generated code -->
<suppress checks="(NPathComplexity|ClassFanOutComplexity|CyclomaticComplexity|ClassDataAbstractionCoupling|FinalLocalVariable|LocalVariableName|MemberName|ParameterName|MethodLength|JavaNCSS)"
files="streams[\\/]src[\\/](generated|generated-test)[\\/].+.java$"/>
<!-- Streams tests -->
<suppress checks="ClassFanOutComplexity"
files="(StreamThreadTest|StreamTaskTest|ProcessorTopologyTestDriver).java"/>
<suppress checks="MethodLength"
files="KStreamKTableJoinIntegrationTest.java"/>
<suppress checks="MethodLength"
files="KStreamKStreamJoinTest.java"/>
<suppress checks="MethodLength"
files="KStreamWindowAggregateTest.java"/>
<suppress checks="MethodLength"
files="RocksDBWindowStoreTest.java"/>
<suppress checks="MemberName"
files="StreamsPartitionAssignorTest.java"/>
<suppress checks="ClassDataAbstractionCoupling"
files=".*[/\\]streams[/\\].*test[/\\].*.java"/>
<suppress checks="BooleanExpressionComplexity"
files="SmokeTestDriver.java"/>
<suppress checks="CyclomaticComplexity"
files="KStreamKStreamJoinTest.java|KTableKTableForeignKeyJoinIntegrationTest.java"/>
<suppress checks="CyclomaticComplexity"
files="RelationalSmokeTest.java|SmokeTestDriver.java"/>
<suppress checks="JavaNCSS"
files="KStreamKStreamJoinTest.java"/>
<suppress checks="JavaNCSS"
files="SmokeTestDriver.java"/>
<suppress checks="NPathComplexity"
files="EosTestDriver|KStreamKStreamJoinTest.java|RelationalSmokeTest.java|SmokeTestDriver.java|KStreamKStreamLeftJoinTest.java|KTableKTableForeignKeyJoinIntegrationTest.java"/>
<suppress checks="(FinalLocalVariable|WhitespaceAround|LocalVariableName|ImportControl|AvoidStarImport)"
files="Murmur3Test.java"/>
<!-- Streams Test-Utils -->
<suppress checks="ClassFanOutComplexity"
files="TopologyTestDriver.java"/>
<suppress checks="ClassDataAbstractionCoupling"
files="TopologyTestDriver.java"/>
<!-- Tools -->
<suppress checks="ClassDataAbstractionCoupling"
files="VerifiableConsumer.java"/>
<suppress checks="CyclomaticComplexity"
files="(StreamsResetter|ProducerPerformance|Agent).java"/>
<suppress checks="BooleanExpressionComplexity"
files="StreamsResetter.java"/>
<suppress checks="NPathComplexity"
files="(ProducerPerformance|StreamsResetter|Agent|TransactionalMessageCopier).java"/>
<suppress checks="ImportControl"
files="SignalLogger.java"/>
<suppress checks="IllegalImport"
files="SignalLogger.java"/>
<suppress checks="ParameterNumber"
files="ProduceBenchSpec.java"/>
<suppress checks="ParameterNumber"
files="SustainedConnectionSpec.java"/>
<!-- Log4J-Appender -->
<suppress checks="CyclomaticComplexity"
files="KafkaLog4jAppender.java"/>
<suppress checks="NPathComplexity"
files="KafkaLog4jAppender.java"/>
<suppress checks="JavaNCSS"
files="RequestResponseTest.java"/>
</suppressions>

1
clients/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/bin/

View File

@@ -0,0 +1,952 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.NavigableMap;
import java.util.Objects;
import java.util.TreeMap;
import java.util.UUID;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import org.apache.kafka.common.utils.Bytes;
import static java.util.Map.Entry;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class SimpleExampleMessageData implements ApiMessage {
private UUID processId;
private List<Integer> myTaggedIntArray;
private String myNullableString;
private short myInt16;
private double myFloat64;
private String myString;
private byte[] myBytes;
private UUID taggedUuid;
private long taggedLong;
private ByteBuffer zeroCopyByteBuffer;
private ByteBuffer nullableZeroCopyByteBuffer;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
);
public static final Schema SCHEMA_1 =
new Schema(
new Field("process_id", Type.UUID, ""),
new Field("zero_copy_byte_buffer", Type.COMPACT_BYTES, ""),
new Field("nullable_zero_copy_byte_buffer", Type.COMPACT_NULLABLE_BYTES, ""),
TaggedFieldsSection.of(
0, new Field("my_tagged_int_array", new CompactArrayOf(Type.INT32), ""),
1, new Field("my_nullable_string", Type.COMPACT_NULLABLE_STRING, ""),
2, new Field("my_int16", Type.INT16, ""),
3, new Field("my_float64", Type.FLOAT64, ""),
4, new Field("my_string", Type.COMPACT_STRING, ""),
5, new Field("my_bytes", Type.COMPACT_NULLABLE_BYTES, ""),
6, new Field("tagged_uuid", Type.UUID, ""),
7, new Field("tagged_long", Type.INT64, "")
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public SimpleExampleMessageData(Readable _readable, short _version) {
read(_readable, _version);
}
public SimpleExampleMessageData(Struct struct, short _version) {
fromStruct(struct, _version);
}
public SimpleExampleMessageData() {
this.processId = MessageUtil.ZERO_UUID;
this.myTaggedIntArray = new ArrayList<Integer>();
this.myNullableString = null;
this.myInt16 = (short) 123;
this.myFloat64 = Double.parseDouble("12.34");
this.myString = "";
this.myBytes = Bytes.EMPTY;
this.taggedUuid = UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367");
this.taggedLong = 0xcafcacafcacafcaL;
this.zeroCopyByteBuffer = ByteUtils.EMPTY_BUF;
this.nullableZeroCopyByteBuffer = ByteUtils.EMPTY_BUF;
}
@Override
public short apiKey() {
return -1;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version >= 1) {
this.processId = _readable.readUUID();
} else {
this.processId = MessageUtil.ZERO_UUID;
}
{
this.myTaggedIntArray = new ArrayList<Integer>();
}
{
this.myNullableString = null;
}
this.myInt16 = (short) 123;
this.myFloat64 = Double.parseDouble("12.34");
{
this.myString = "";
}
{
this.myBytes = Bytes.EMPTY;
}
this.taggedUuid = UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367");
this.taggedLong = 0xcafcacafcacafcaL;
if (_version >= 1) {
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
throw new RuntimeException("non-nullable field zeroCopyByteBuffer was serialized as null");
} else {
this.zeroCopyByteBuffer = _readable.readByteBuffer(length);
}
} else {
this.zeroCopyByteBuffer = ByteUtils.EMPTY_BUF;
}
if (_version >= 1) {
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
this.nullableZeroCopyByteBuffer = null;
} else {
this.nullableZeroCopyByteBuffer = _readable.readByteBuffer(length);
}
} else {
this.nullableZeroCopyByteBuffer = ByteUtils.EMPTY_BUF;
}
this._unknownTaggedFields = null;
if (_version >= 1) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
case 0: {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field myTaggedIntArray was serialized as null");
} else {
ArrayList<Integer> newCollection = new ArrayList<Integer>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(_readable.readInt());
}
this.myTaggedIntArray = newCollection;
}
break;
}
case 1: {
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
this.myNullableString = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field myNullableString had invalid length " + length);
} else {
this.myNullableString = _readable.readString(length);
}
break;
}
case 2: {
this.myInt16 = _readable.readShort();
break;
}
case 3: {
this.myFloat64 = _readable.readDouble();
break;
}
case 4: {
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
throw new RuntimeException("non-nullable field myString was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field myString had invalid length " + length);
} else {
this.myString = _readable.readString(length);
}
break;
}
case 5: {
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
this.myBytes = null;
} else {
byte[] newBytes = new byte[length];
_readable.readArray(newBytes);
this.myBytes = newBytes;
}
break;
}
case 6: {
this.taggedUuid = _readable.readUUID();
break;
}
case 7: {
this.taggedLong = _readable.readLong();
break;
}
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version >= 1) {
_writable.writeUUID(processId);
} else {
if (processId != MessageUtil.ZERO_UUID) {
throw new UnsupportedVersionException("Attempted to write a non-default processId at version " + _version);
}
}
if (_version >= 1) {
if (!myTaggedIntArray.isEmpty()) {
_numTaggedFields++;
}
} else {
if (!myTaggedIntArray.isEmpty()) {
throw new UnsupportedVersionException("Attempted to write a non-default myTaggedIntArray at version " + _version);
}
}
if (_version >= 1) {
if (myNullableString != null) {
_numTaggedFields++;
}
} else {
if (myNullableString != null) {
throw new UnsupportedVersionException("Attempted to write a non-default myNullableString at version " + _version);
}
}
if (_version >= 1) {
if (myInt16 != (short) 123) {
_numTaggedFields++;
}
} else {
if (myInt16 != (short) 123) {
throw new UnsupportedVersionException("Attempted to write a non-default myInt16 at version " + _version);
}
}
if (_version >= 1) {
if (myFloat64 != Double.parseDouble("12.34")) {
_numTaggedFields++;
}
} else {
if (myFloat64 != Double.parseDouble("12.34")) {
throw new UnsupportedVersionException("Attempted to write a non-default myFloat64 at version " + _version);
}
}
if (_version >= 1) {
if (!myString.equals("")) {
_numTaggedFields++;
}
} else {
if (!myString.equals("")) {
throw new UnsupportedVersionException("Attempted to write a non-default myString at version " + _version);
}
}
if (_version >= 1) {
if (myBytes == null || myBytes.length != 0) {
_numTaggedFields++;
}
} else {
if (myBytes == null || myBytes.length != 0) {
throw new UnsupportedVersionException("Attempted to write a non-default myBytes at version " + _version);
}
}
if (_version >= 1) {
if (taggedUuid != UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367")) {
_numTaggedFields++;
}
} else {
if (taggedUuid != UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367")) {
throw new UnsupportedVersionException("Attempted to write a non-default taggedUuid at version " + _version);
}
}
if (_version >= 1) {
if (taggedLong != 0xcafcacafcacafcaL) {
_numTaggedFields++;
}
} else {
if (taggedLong != 0xcafcacafcacafcaL) {
throw new UnsupportedVersionException("Attempted to write a non-default taggedLong at version " + _version);
}
}
if (_version >= 1) {
_writable.writeUnsignedVarint(zeroCopyByteBuffer.remaining() + 1);
_writable.writeByteBuffer(zeroCopyByteBuffer);
} else {
if (zeroCopyByteBuffer.hasRemaining()) {
throw new UnsupportedVersionException("Attempted to write a non-default zeroCopyByteBuffer at version " + _version);
}
}
if (_version >= 1) {
if (nullableZeroCopyByteBuffer == null) {
_writable.writeUnsignedVarint(0);
} else {
_writable.writeUnsignedVarint(nullableZeroCopyByteBuffer.remaining() + 1);
_writable.writeByteBuffer(nullableZeroCopyByteBuffer);
}
} else {
if (nullableZeroCopyByteBuffer == null || nullableZeroCopyByteBuffer.remaining() > 0) {
throw new UnsupportedVersionException("Attempted to write a non-default nullableZeroCopyByteBuffer at version " + _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 1) {
_writable.writeUnsignedVarint(_numTaggedFields);
{
if (!myTaggedIntArray.isEmpty()) {
_writable.writeUnsignedVarint(0);
_writable.writeUnsignedVarint(_cache.getArraySizeInBytes(this.myTaggedIntArray));
_writable.writeUnsignedVarint(myTaggedIntArray.size() + 1);
for (Integer myTaggedIntArrayElement : myTaggedIntArray) {
_writable.writeInt(myTaggedIntArrayElement);
}
}
}
if (myNullableString != null) {
_writable.writeUnsignedVarint(1);
byte[] _stringBytes = _cache.getSerializedValue(this.myNullableString);
_writable.writeUnsignedVarint(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
{
if (myInt16 != (short) 123) {
_writable.writeUnsignedVarint(2);
_writable.writeUnsignedVarint(2);
_writable.writeShort(myInt16);
}
}
{
if (myFloat64 != Double.parseDouble("12.34")) {
_writable.writeUnsignedVarint(3);
_writable.writeUnsignedVarint(8);
_writable.writeDouble(myFloat64);
}
}
{
if (!myString.equals("")) {
_writable.writeUnsignedVarint(4);
byte[] _stringBytes = _cache.getSerializedValue(this.myString);
_writable.writeUnsignedVarint(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
}
if (myBytes == null) {
_writable.writeUnsignedVarint(5);
_writable.writeUnsignedVarint(1);
_writable.writeUnsignedVarint(0);
} else {
if (myBytes.length != 0) {
_writable.writeUnsignedVarint(5);
_writable.writeUnsignedVarint(this.myBytes.length + ByteUtils.sizeOfUnsignedVarint(this.myBytes.length + 1));
_writable.writeUnsignedVarint(this.myBytes.length + 1);
_writable.writeByteArray(this.myBytes);
}
}
{
if (taggedUuid != UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367")) {
_writable.writeUnsignedVarint(6);
_writable.writeUnsignedVarint(16);
_writable.writeUUID(taggedUuid);
}
}
{
if (taggedLong != 0xcafcacafcacafcaL) {
_writable.writeUnsignedVarint(7);
_writable.writeUnsignedVarint(8);
_writable.writeLong(taggedLong);
}
}
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
NavigableMap<Integer, Object> _taggedFields = null;
this._unknownTaggedFields = null;
if (_version >= 1) {
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
}
if (_version >= 1) {
this.processId = struct.getUUID("process_id");
} else {
this.processId = MessageUtil.ZERO_UUID;
}
if (_version >= 1) {
if (_taggedFields.containsKey(0)) {
Object[] _nestedObjects = (Object[]) _taggedFields.remove(0);
this.myTaggedIntArray = new ArrayList<Integer>(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.myTaggedIntArray.add((Integer) nestedObject);
}
} else {
this.myTaggedIntArray = new ArrayList<Integer>();
}
} else {
this.myTaggedIntArray = new ArrayList<Integer>();
}
if (_version >= 1) {
if (_taggedFields.containsKey(1)) {
this.myNullableString = (String) _taggedFields.remove(1);
} else {
this.myNullableString = null;
}
} else {
this.myNullableString = null;
}
if (_version >= 1) {
if (_taggedFields.containsKey(2)) {
this.myInt16 = (Short) _taggedFields.remove(2);
} else {
this.myInt16 = (short) 123;
}
} else {
this.myInt16 = (short) 123;
}
if (_version >= 1) {
if (_taggedFields.containsKey(3)) {
this.myFloat64 = (Double) _taggedFields.remove(3);
} else {
this.myFloat64 = Double.parseDouble("12.34");
}
} else {
this.myFloat64 = Double.parseDouble("12.34");
}
if (_version >= 1) {
if (_taggedFields.containsKey(4)) {
this.myString = (String) _taggedFields.remove(4);
} else {
this.myString = "";
}
} else {
this.myString = "";
}
if (_version >= 1) {
if (_taggedFields.containsKey(5)) {
this.myBytes = MessageUtil.byteBufferToArray((ByteBuffer) _taggedFields.remove(5));
} else {
this.myBytes = Bytes.EMPTY;
}
} else {
this.myBytes = Bytes.EMPTY;
}
if (_version >= 1) {
if (_taggedFields.containsKey(6)) {
this.taggedUuid = (UUID) _taggedFields.remove(6);
} else {
this.taggedUuid = UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367");
}
} else {
this.taggedUuid = UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367");
}
if (_version >= 1) {
if (_taggedFields.containsKey(7)) {
this.taggedLong = (Long) _taggedFields.remove(7);
} else {
this.taggedLong = 0xcafcacafcacafcaL;
}
} else {
this.taggedLong = 0xcafcacafcacafcaL;
}
if (_version >= 1) {
this.zeroCopyByteBuffer = struct.getBytes("zero_copy_byte_buffer");
} else {
this.zeroCopyByteBuffer = ByteUtils.EMPTY_BUF;
}
if (_version >= 1) {
this.nullableZeroCopyByteBuffer = struct.getBytes("nullable_zero_copy_byte_buffer");
} else {
this.nullableZeroCopyByteBuffer = ByteUtils.EMPTY_BUF;
}
if (_version >= 1) {
if (!_taggedFields.isEmpty()) {
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
}
}
}
}
@Override
public Struct toStruct(short _version) {
TreeMap<Integer, Object> _taggedFields = null;
if (_version >= 1) {
_taggedFields = new TreeMap<>();
}
Struct struct = new Struct(SCHEMAS[_version]);
if (_version >= 1) {
struct.set("process_id", this.processId);
} else {
if (processId != MessageUtil.ZERO_UUID) {
throw new UnsupportedVersionException("Attempted to write a non-default processId at version " + _version);
}
}
if (_version >= 1) {
if (!myTaggedIntArray.isEmpty()) {
Integer[] _nestedObjects = new Integer[myTaggedIntArray.size()];
int i = 0;
for (Integer element : this.myTaggedIntArray) {
_nestedObjects[i++] = element;
}
_taggedFields.put(0, _nestedObjects);
}
} else {
if (!myTaggedIntArray.isEmpty()) {
throw new UnsupportedVersionException("Attempted to write a non-default myTaggedIntArray at version " + _version);
}
}
if (_version >= 1) {
if (myNullableString != null) {
_taggedFields.put(1, myNullableString);
}
} else {
if (myNullableString != null) {
throw new UnsupportedVersionException("Attempted to write a non-default myNullableString at version " + _version);
}
}
if (_version >= 1) {
if (myInt16 != (short) 123) {
_taggedFields.put(2, myInt16);
}
} else {
if (myInt16 != (short) 123) {
throw new UnsupportedVersionException("Attempted to write a non-default myInt16 at version " + _version);
}
}
if (_version >= 1) {
if (myFloat64 != Double.parseDouble("12.34")) {
_taggedFields.put(3, myFloat64);
}
} else {
if (myFloat64 != Double.parseDouble("12.34")) {
throw new UnsupportedVersionException("Attempted to write a non-default myFloat64 at version " + _version);
}
}
if (_version >= 1) {
if (!myString.equals("")) {
_taggedFields.put(4, myString);
}
} else {
if (!myString.equals("")) {
throw new UnsupportedVersionException("Attempted to write a non-default myString at version " + _version);
}
}
if (_version >= 1) {
if (myBytes == null || myBytes.length != 0) {
_taggedFields.put(5, (myBytes == null) ? null : ByteBuffer.wrap(myBytes));
}
} else {
if (myBytes == null || myBytes.length != 0) {
throw new UnsupportedVersionException("Attempted to write a non-default myBytes at version " + _version);
}
}
if (_version >= 1) {
if (taggedUuid != UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367")) {
_taggedFields.put(6, taggedUuid);
}
} else {
if (taggedUuid != UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367")) {
throw new UnsupportedVersionException("Attempted to write a non-default taggedUuid at version " + _version);
}
}
if (_version >= 1) {
if (taggedLong != 0xcafcacafcacafcaL) {
_taggedFields.put(7, taggedLong);
}
} else {
if (taggedLong != 0xcafcacafcacafcaL) {
throw new UnsupportedVersionException("Attempted to write a non-default taggedLong at version " + _version);
}
}
if (_version >= 1) {
struct.set("zero_copy_byte_buffer", this.zeroCopyByteBuffer);
} else {
if (zeroCopyByteBuffer.hasRemaining()) {
throw new UnsupportedVersionException("Attempted to write a non-default zeroCopyByteBuffer at version " + _version);
}
}
if (_version >= 1) {
struct.set("nullable_zero_copy_byte_buffer", this.nullableZeroCopyByteBuffer);
} else {
if (nullableZeroCopyByteBuffer == null || nullableZeroCopyByteBuffer.remaining() > 0) {
throw new UnsupportedVersionException("Attempted to write a non-default nullableZeroCopyByteBuffer at version " + _version);
}
}
if (_version >= 1) {
struct.set("_tagged_fields", _taggedFields);
}
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version >= 1) {
_size += 16;
}
if (_version >= 1) {
{
if (!myTaggedIntArray.isEmpty()) {
_numTaggedFields++;
_size += 1;
int _arraySize = 0;
_arraySize += ByteUtils.sizeOfUnsignedVarint(myTaggedIntArray.size() + 1);
_arraySize += myTaggedIntArray.size() * 4;
_cache.setArraySizeInBytes(myTaggedIntArray, _arraySize);
_size += _arraySize + ByteUtils.sizeOfUnsignedVarint(_arraySize);
}
}
}
if (_version >= 1) {
if (myNullableString == null) {
} else {
_numTaggedFields++;
_size += 1;
byte[] _stringBytes = myNullableString.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'myNullableString' field is too long to be serialized");
}
_cache.cacheSerializedValue(myNullableString, _stringBytes);
int _stringPrefixSize = ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1);
_size += _stringBytes.length + _stringPrefixSize + ByteUtils.sizeOfUnsignedVarint(_stringPrefixSize);
}
}
if (_version >= 1) {
if (myInt16 != (short) 123) {
_numTaggedFields++;
_size += 1;
_size += 1;
_size += 2;
}
}
if (_version >= 1) {
if (myFloat64 != Double.parseDouble("12.34")) {
_numTaggedFields++;
_size += 1;
_size += 1;
_size += 8;
}
}
if (_version >= 1) {
{
if (!myString.equals("")) {
_numTaggedFields++;
_size += 1;
byte[] _stringBytes = myString.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'myString' field is too long to be serialized");
}
_cache.cacheSerializedValue(myString, _stringBytes);
int _stringPrefixSize = ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1);
_size += _stringBytes.length + _stringPrefixSize + ByteUtils.sizeOfUnsignedVarint(_stringPrefixSize);
}
}
}
if (_version >= 1) {
if (myBytes == null) {
_numTaggedFields++;
_size += 1;
_size += 1;
_size += 1;
} else {
if (myBytes.length != 0) {
_numTaggedFields++;
_size += 1;
int _bytesSize = myBytes.length;
_bytesSize += ByteUtils.sizeOfUnsignedVarint(myBytes.length + 1);
_size += _bytesSize + ByteUtils.sizeOfUnsignedVarint(_bytesSize);
}
}
}
if (_version >= 1) {
if (taggedUuid != UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367")) {
_numTaggedFields++;
_size += 1;
_size += 1;
_size += 16;
}
}
if (_version >= 1) {
if (taggedLong != 0xcafcacafcacafcaL) {
_numTaggedFields++;
_size += 1;
_size += 1;
_size += 8;
}
}
if (_version >= 1) {
{
int _bytesSize = zeroCopyByteBuffer.remaining();
_bytesSize += ByteUtils.sizeOfUnsignedVarint(zeroCopyByteBuffer.remaining() + 1);
_size += _bytesSize;
}
}
if (_version >= 1) {
if (nullableZeroCopyByteBuffer == null) {
_size += 1;
} else {
int _bytesSize = nullableZeroCopyByteBuffer.remaining();
_bytesSize += ByteUtils.sizeOfUnsignedVarint(nullableZeroCopyByteBuffer.remaining() + 1);
_size += _bytesSize;
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_version >= 1) {
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof SimpleExampleMessageData)) return false;
SimpleExampleMessageData other = (SimpleExampleMessageData) obj;
if (!this.processId.equals(other.processId)) return false;
if (this.myTaggedIntArray == null) {
if (other.myTaggedIntArray != null) return false;
} else {
if (!this.myTaggedIntArray.equals(other.myTaggedIntArray)) return false;
}
if (this.myNullableString == null) {
if (other.myNullableString != null) return false;
} else {
if (!this.myNullableString.equals(other.myNullableString)) return false;
}
if (myInt16 != other.myInt16) return false;
if (myFloat64 != other.myFloat64) return false;
if (this.myString == null) {
if (other.myString != null) return false;
} else {
if (!this.myString.equals(other.myString)) return false;
}
if (!Arrays.equals(this.myBytes, other.myBytes)) return false;
if (!this.taggedUuid.equals(other.taggedUuid)) return false;
if (taggedLong != other.taggedLong) return false;
if (!Objects.equals(this.zeroCopyByteBuffer, other.zeroCopyByteBuffer)) return false;
if (!Objects.equals(this.nullableZeroCopyByteBuffer, other.nullableZeroCopyByteBuffer)) return false;
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + processId.hashCode();
hashCode = 31 * hashCode + (myTaggedIntArray == null ? 0 : myTaggedIntArray.hashCode());
hashCode = 31 * hashCode + (myNullableString == null ? 0 : myNullableString.hashCode());
hashCode = 31 * hashCode + myInt16;
hashCode = 31 * hashCode + Double.hashCode(myFloat64);
hashCode = 31 * hashCode + (myString == null ? 0 : myString.hashCode());
hashCode = 31 * hashCode + Arrays.hashCode(myBytes);
hashCode = 31 * hashCode + taggedUuid.hashCode();
hashCode = 31 * hashCode + ((int) (taggedLong >> 32) ^ (int) taggedLong);
hashCode = 31 * hashCode + Objects.hashCode(zeroCopyByteBuffer);
hashCode = 31 * hashCode + Objects.hashCode(nullableZeroCopyByteBuffer);
return hashCode;
}
@Override
public String toString() {
return "SimpleExampleMessageData("
+ ", myTaggedIntArray=" + MessageUtil.deepToString(myTaggedIntArray.iterator())
+ ", myNullableString=" + ((myNullableString == null) ? "null" : "'" + myNullableString.toString() + "'")
+ ", myInt16=" + myInt16
+ ", myFloat64=" + myFloat64
+ ", myString=" + ((myString == null) ? "null" : "'" + myString.toString() + "'")
+ ", myBytes=" + Arrays.toString(myBytes)
+ ", taggedLong=" + taggedLong
+ ", zeroCopyByteBuffer=" + zeroCopyByteBuffer
+ ", nullableZeroCopyByteBuffer=" + nullableZeroCopyByteBuffer
+ ")";
}
public UUID processId() {
return this.processId;
}
public List<Integer> myTaggedIntArray() {
return this.myTaggedIntArray;
}
public String myNullableString() {
return this.myNullableString;
}
public short myInt16() {
return this.myInt16;
}
public double myFloat64() {
return this.myFloat64;
}
public String myString() {
return this.myString;
}
public byte[] myBytes() {
return this.myBytes;
}
public UUID taggedUuid() {
return this.taggedUuid;
}
public long taggedLong() {
return this.taggedLong;
}
public ByteBuffer zeroCopyByteBuffer() {
return this.zeroCopyByteBuffer;
}
public ByteBuffer nullableZeroCopyByteBuffer() {
return this.nullableZeroCopyByteBuffer;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public SimpleExampleMessageData setProcessId(UUID v) {
this.processId = v;
return this;
}
public SimpleExampleMessageData setMyTaggedIntArray(List<Integer> v) {
this.myTaggedIntArray = v;
return this;
}
public SimpleExampleMessageData setMyNullableString(String v) {
this.myNullableString = v;
return this;
}
public SimpleExampleMessageData setMyInt16(short v) {
this.myInt16 = v;
return this;
}
public SimpleExampleMessageData setMyFloat64(double v) {
this.myFloat64 = v;
return this;
}
public SimpleExampleMessageData setMyString(String v) {
this.myString = v;
return this;
}
public SimpleExampleMessageData setMyBytes(byte[] v) {
this.myBytes = v;
return this;
}
public SimpleExampleMessageData setTaggedUuid(UUID v) {
this.taggedUuid = v;
return this;
}
public SimpleExampleMessageData setTaggedLong(long v) {
this.taggedLong = v;
return this;
}
public SimpleExampleMessageData setZeroCopyByteBuffer(ByteBuffer v) {
this.zeroCopyByteBuffer = v;
return this;
}
public SimpleExampleMessageData setNullableZeroCopyByteBuffer(ByteBuffer v) {
this.nullableZeroCopyByteBuffer = v;
return this;
}
}

View File

@@ -0,0 +1,281 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.TreeMap;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
public class AddOffsetsToTxnRequestData implements ApiMessage {
private String transactionalId;
private long producerId;
private short producerEpoch;
private String groupId;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("transactional_id", Type.STRING, "The transactional id corresponding to the transaction."),
new Field("producer_id", Type.INT64, "Current producer id in use by the transactional id."),
new Field("producer_epoch", Type.INT16, "Current epoch associated with the producer id."),
new Field("group_id", Type.STRING, "The unique group identifier.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AddOffsetsToTxnRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public AddOffsetsToTxnRequestData(Struct struct, short _version) {
fromStruct(struct, _version);
}
public AddOffsetsToTxnRequestData() {
this.transactionalId = "";
this.producerId = 0L;
this.producerEpoch = (short) 0;
this.groupId = "";
}
@Override
public short apiKey() {
return 25;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field transactionalId was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field transactionalId had invalid length " + length);
} else {
this.transactionalId = _readable.readString(length);
}
}
this.producerId = _readable.readLong();
this.producerEpoch = _readable.readShort();
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field groupId was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field groupId had invalid length " + length);
} else {
this.groupId = _readable.readString(length);
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(transactionalId);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
_writable.writeLong(producerId);
_writable.writeShort(producerEpoch);
{
byte[] _stringBytes = _cache.getSerializedValue(groupId);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
this._unknownTaggedFields = null;
this.transactionalId = struct.getString("transactional_id");
this.producerId = struct.getLong("producer_id");
this.producerEpoch = struct.getShort("producer_epoch");
this.groupId = struct.getString("group_id");
}
@Override
public Struct toStruct(short _version) {
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("transactional_id", this.transactionalId);
struct.set("producer_id", this.producerId);
struct.set("producer_epoch", this.producerEpoch);
struct.set("group_id", this.groupId);
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
{
byte[] _stringBytes = transactionalId.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'transactionalId' field is too long to be serialized");
}
_cache.cacheSerializedValue(transactionalId, _stringBytes);
_size += _stringBytes.length + 2;
}
_size += 8;
_size += 2;
{
byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'groupId' field is too long to be serialized");
}
_cache.cacheSerializedValue(groupId, _stringBytes);
_size += _stringBytes.length + 2;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AddOffsetsToTxnRequestData)) return false;
AddOffsetsToTxnRequestData other = (AddOffsetsToTxnRequestData) obj;
if (this.transactionalId == null) {
if (other.transactionalId != null) return false;
} else {
if (!this.transactionalId.equals(other.transactionalId)) return false;
}
if (producerId != other.producerId) return false;
if (producerEpoch != other.producerEpoch) return false;
if (this.groupId == null) {
if (other.groupId != null) return false;
} else {
if (!this.groupId.equals(other.groupId)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (transactionalId == null ? 0 : transactionalId.hashCode());
hashCode = 31 * hashCode + ((int) (producerId >> 32) ^ (int) producerId);
hashCode = 31 * hashCode + producerEpoch;
hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AddOffsetsToTxnRequestData("
+ "transactionalId=" + ((transactionalId == null) ? "null" : "'" + transactionalId.toString() + "'")
+ ", producerId=" + producerId
+ ", producerEpoch=" + producerEpoch
+ ", groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'")
+ ")";
}
public String transactionalId() {
return this.transactionalId;
}
public long producerId() {
return this.producerId;
}
public short producerEpoch() {
return this.producerEpoch;
}
public String groupId() {
return this.groupId;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AddOffsetsToTxnRequestData setTransactionalId(String v) {
this.transactionalId = v;
return this;
}
public AddOffsetsToTxnRequestData setProducerId(long v) {
this.producerId = v;
return this;
}
public AddOffsetsToTxnRequestData setProducerEpoch(short v) {
this.producerEpoch = v;
return this;
}
public AddOffsetsToTxnRequestData setGroupId(String v) {
this.groupId = v;
return this;
}
}

View File

@@ -0,0 +1,190 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.util.ArrayList;
import java.util.List;
import java.util.TreeMap;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
public class AddOffsetsToTxnResponseData implements ApiMessage {
private int throttleTimeMs;
private short errorCode;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
new Field("error_code", Type.INT16, "The response error code, or 0 if there was no error.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AddOffsetsToTxnResponseData(Readable _readable, short _version) {
read(_readable, _version);
}
public AddOffsetsToTxnResponseData(Struct struct, short _version) {
fromStruct(struct, _version);
}
public AddOffsetsToTxnResponseData() {
this.throttleTimeMs = 0;
this.errorCode = (short) 0;
}
@Override
public short apiKey() {
return 25;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
this.throttleTimeMs = _readable.readInt();
this.errorCode = _readable.readShort();
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(throttleTimeMs);
_writable.writeShort(errorCode);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
this._unknownTaggedFields = null;
this.throttleTimeMs = struct.getInt("throttle_time_ms");
this.errorCode = struct.getShort("error_code");
}
@Override
public Struct toStruct(short _version) {
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("throttle_time_ms", this.throttleTimeMs);
struct.set("error_code", this.errorCode);
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
_size += 4;
_size += 2;
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AddOffsetsToTxnResponseData)) return false;
AddOffsetsToTxnResponseData other = (AddOffsetsToTxnResponseData) obj;
if (throttleTimeMs != other.throttleTimeMs) return false;
if (errorCode != other.errorCode) return false;
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + throttleTimeMs;
hashCode = 31 * hashCode + errorCode;
return hashCode;
}
@Override
public String toString() {
return "AddOffsetsToTxnResponseData("
+ "throttleTimeMs=" + throttleTimeMs
+ ", errorCode=" + errorCode
+ ")";
}
public int throttleTimeMs() {
return this.throttleTimeMs;
}
public short errorCode() {
return this.errorCode;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AddOffsetsToTxnResponseData setThrottleTimeMs(int v) {
this.throttleTimeMs = v;
return this;
}
public AddOffsetsToTxnResponseData setErrorCode(short v) {
this.errorCode = v;
return this;
}
}

View File

@@ -0,0 +1,576 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.TreeMap;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import org.apache.kafka.common.utils.ImplicitLinkedHashCollection;
import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection;
public class AddPartitionsToTxnRequestData implements ApiMessage {
private String transactionalId;
private long producerId;
private short producerEpoch;
private AddPartitionsToTxnTopicCollection topics;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("transactional_id", Type.STRING, "The transactional id corresponding to the transaction."),
new Field("producer_id", Type.INT64, "Current producer id in use by the transactional id."),
new Field("producer_epoch", Type.INT16, "Current epoch associated with the producer id."),
new Field("topics", new ArrayOf(AddPartitionsToTxnTopic.SCHEMA_0), "The partitions to add to the transation.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AddPartitionsToTxnRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public AddPartitionsToTxnRequestData(Struct struct, short _version) {
fromStruct(struct, _version);
}
public AddPartitionsToTxnRequestData() {
this.transactionalId = "";
this.producerId = 0L;
this.producerEpoch = (short) 0;
this.topics = new AddPartitionsToTxnTopicCollection(0);
}
@Override
public short apiKey() {
return 24;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field transactionalId was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field transactionalId had invalid length " + length);
} else {
this.transactionalId = _readable.readString(length);
}
}
this.producerId = _readable.readLong();
this.producerEpoch = _readable.readShort();
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
AddPartitionsToTxnTopicCollection newCollection = new AddPartitionsToTxnTopicCollection(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new AddPartitionsToTxnTopic(_readable, _version));
}
this.topics = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(transactionalId);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
_writable.writeLong(producerId);
_writable.writeShort(producerEpoch);
_writable.writeInt(topics.size());
for (AddPartitionsToTxnTopic topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
this._unknownTaggedFields = null;
this.transactionalId = struct.getString("transactional_id");
this.producerId = struct.getLong("producer_id");
this.producerEpoch = struct.getShort("producer_epoch");
{
Object[] _nestedObjects = struct.getArray("topics");
this.topics = new AddPartitionsToTxnTopicCollection(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.topics.add(new AddPartitionsToTxnTopic((Struct) nestedObject, _version));
}
}
}
@Override
public Struct toStruct(short _version) {
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("transactional_id", this.transactionalId);
struct.set("producer_id", this.producerId);
struct.set("producer_epoch", this.producerEpoch);
{
Struct[] _nestedObjects = new Struct[topics.size()];
int i = 0;
for (AddPartitionsToTxnTopic element : this.topics) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("topics", (Object[]) _nestedObjects);
}
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
{
byte[] _stringBytes = transactionalId.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'transactionalId' field is too long to be serialized");
}
_cache.cacheSerializedValue(transactionalId, _stringBytes);
_size += _stringBytes.length + 2;
}
_size += 8;
_size += 2;
{
int _arraySize = 0;
_arraySize += 4;
for (AddPartitionsToTxnTopic topicsElement : topics) {
_arraySize += topicsElement.size(_cache, _version);
}
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AddPartitionsToTxnRequestData)) return false;
AddPartitionsToTxnRequestData other = (AddPartitionsToTxnRequestData) obj;
if (this.transactionalId == null) {
if (other.transactionalId != null) return false;
} else {
if (!this.transactionalId.equals(other.transactionalId)) return false;
}
if (producerId != other.producerId) return false;
if (producerEpoch != other.producerEpoch) return false;
if (this.topics == null) {
if (other.topics != null) return false;
} else {
if (!this.topics.equals(other.topics)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (transactionalId == null ? 0 : transactionalId.hashCode());
hashCode = 31 * hashCode + ((int) (producerId >> 32) ^ (int) producerId);
hashCode = 31 * hashCode + producerEpoch;
hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AddPartitionsToTxnRequestData("
+ "transactionalId=" + ((transactionalId == null) ? "null" : "'" + transactionalId.toString() + "'")
+ ", producerId=" + producerId
+ ", producerEpoch=" + producerEpoch
+ ", topics=" + MessageUtil.deepToString(topics.iterator())
+ ")";
}
public String transactionalId() {
return this.transactionalId;
}
public long producerId() {
return this.producerId;
}
public short producerEpoch() {
return this.producerEpoch;
}
public AddPartitionsToTxnTopicCollection topics() {
return this.topics;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AddPartitionsToTxnRequestData setTransactionalId(String v) {
this.transactionalId = v;
return this;
}
public AddPartitionsToTxnRequestData setProducerId(long v) {
this.producerId = v;
return this;
}
public AddPartitionsToTxnRequestData setProducerEpoch(short v) {
this.producerEpoch = v;
return this;
}
public AddPartitionsToTxnRequestData setTopics(AddPartitionsToTxnTopicCollection v) {
this.topics = v;
return this;
}
static public class AddPartitionsToTxnTopic implements Message, ImplicitLinkedHashMultiCollection.Element {
private String name;
private List<Integer> partitions;
private List<RawTaggedField> _unknownTaggedFields;
private int next;
private int prev;
public static final Schema SCHEMA_0 =
new Schema(
new Field("name", Type.STRING, "The name of the topic."),
new Field("partitions", new ArrayOf(Type.INT32), "The partition indexes to add to the transaction")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AddPartitionsToTxnTopic(Readable _readable, short _version) {
read(_readable, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public AddPartitionsToTxnTopic(Struct struct, short _version) {
fromStruct(struct, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public AddPartitionsToTxnTopic() {
this.name = "";
this.partitions = new ArrayList<Integer>();
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnTopic");
}
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field name was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field name had invalid length " + length);
} else {
this.name = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitions was serialized as null");
} else {
ArrayList<Integer> newCollection = new ArrayList<Integer>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(_readable.readInt());
}
this.partitions = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnTopic");
}
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(name);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
_writable.writeInt(partitions.size());
for (Integer partitionsElement : partitions) {
_writable.writeInt(partitionsElement);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnTopic");
}
this._unknownTaggedFields = null;
this.name = struct.getString("name");
{
Object[] _nestedObjects = struct.getArray("partitions");
this.partitions = new ArrayList<Integer>(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.partitions.add((Integer) nestedObject);
}
}
}
@Override
public Struct toStruct(short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnTopic");
}
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("name", this.name);
{
Integer[] _nestedObjects = new Integer[partitions.size()];
int i = 0;
for (Integer element : this.partitions) {
_nestedObjects[i++] = element;
}
struct.set("partitions", (Object[]) _nestedObjects);
}
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of AddPartitionsToTxnTopic");
}
{
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'name' field is too long to be serialized");
}
_cache.cacheSerializedValue(name, _stringBytes);
_size += _stringBytes.length + 2;
}
{
int _arraySize = 0;
_arraySize += 4;
_arraySize += partitions.size() * 4;
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AddPartitionsToTxnTopic)) return false;
AddPartitionsToTxnTopic other = (AddPartitionsToTxnTopic) obj;
if (this.name == null) {
if (other.name != null) return false;
} else {
if (!this.name.equals(other.name)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AddPartitionsToTxnTopic("
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
+ ")";
}
public String name() {
return this.name;
}
public List<Integer> partitions() {
return this.partitions;
}
@Override
public int next() {
return this.next;
}
@Override
public int prev() {
return this.prev;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AddPartitionsToTxnTopic setName(String v) {
this.name = v;
return this;
}
public AddPartitionsToTxnTopic setPartitions(List<Integer> v) {
this.partitions = v;
return this;
}
@Override
public void setNext(int v) {
this.next = v;
}
@Override
public void setPrev(int v) {
this.prev = v;
}
}
public static class AddPartitionsToTxnTopicCollection extends ImplicitLinkedHashMultiCollection<AddPartitionsToTxnTopic> {
public AddPartitionsToTxnTopicCollection() {
super();
}
public AddPartitionsToTxnTopicCollection(int expectedNumElements) {
super(expectedNumElements);
}
public AddPartitionsToTxnTopicCollection(Iterator<AddPartitionsToTxnTopic> iterator) {
super(iterator);
}
public AddPartitionsToTxnTopic find(String name) {
AddPartitionsToTxnTopic _key = new AddPartitionsToTxnTopic();
_key.setName(name);
return find(_key);
}
public List<AddPartitionsToTxnTopic> findAll(String name) {
AddPartitionsToTxnTopic _key = new AddPartitionsToTxnTopic();
_key.setName(name);
return findAll(_key);
}
}
}

View File

@@ -0,0 +1,730 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.TreeMap;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import org.apache.kafka.common.utils.ImplicitLinkedHashCollection;
import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection;
public class AddPartitionsToTxnResponseData implements ApiMessage {
private int throttleTimeMs;
private AddPartitionsToTxnTopicResultCollection results;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
new Field("results", new ArrayOf(AddPartitionsToTxnTopicResult.SCHEMA_0), "The results for each topic.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AddPartitionsToTxnResponseData(Readable _readable, short _version) {
read(_readable, _version);
}
public AddPartitionsToTxnResponseData(Struct struct, short _version) {
fromStruct(struct, _version);
}
public AddPartitionsToTxnResponseData() {
this.throttleTimeMs = 0;
this.results = new AddPartitionsToTxnTopicResultCollection(0);
}
@Override
public short apiKey() {
return 24;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
this.throttleTimeMs = _readable.readInt();
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field results was serialized as null");
} else {
AddPartitionsToTxnTopicResultCollection newCollection = new AddPartitionsToTxnTopicResultCollection(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new AddPartitionsToTxnTopicResult(_readable, _version));
}
this.results = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(throttleTimeMs);
_writable.writeInt(results.size());
for (AddPartitionsToTxnTopicResult resultsElement : results) {
resultsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
this._unknownTaggedFields = null;
this.throttleTimeMs = struct.getInt("throttle_time_ms");
{
Object[] _nestedObjects = struct.getArray("results");
this.results = new AddPartitionsToTxnTopicResultCollection(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.results.add(new AddPartitionsToTxnTopicResult((Struct) nestedObject, _version));
}
}
}
@Override
public Struct toStruct(short _version) {
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("throttle_time_ms", this.throttleTimeMs);
{
Struct[] _nestedObjects = new Struct[results.size()];
int i = 0;
for (AddPartitionsToTxnTopicResult element : this.results) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("results", (Object[]) _nestedObjects);
}
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
_size += 4;
{
int _arraySize = 0;
_arraySize += 4;
for (AddPartitionsToTxnTopicResult resultsElement : results) {
_arraySize += resultsElement.size(_cache, _version);
}
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AddPartitionsToTxnResponseData)) return false;
AddPartitionsToTxnResponseData other = (AddPartitionsToTxnResponseData) obj;
if (throttleTimeMs != other.throttleTimeMs) return false;
if (this.results == null) {
if (other.results != null) return false;
} else {
if (!this.results.equals(other.results)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + throttleTimeMs;
hashCode = 31 * hashCode + (results == null ? 0 : results.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AddPartitionsToTxnResponseData("
+ "throttleTimeMs=" + throttleTimeMs
+ ", results=" + MessageUtil.deepToString(results.iterator())
+ ")";
}
public int throttleTimeMs() {
return this.throttleTimeMs;
}
public AddPartitionsToTxnTopicResultCollection results() {
return this.results;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AddPartitionsToTxnResponseData setThrottleTimeMs(int v) {
this.throttleTimeMs = v;
return this;
}
public AddPartitionsToTxnResponseData setResults(AddPartitionsToTxnTopicResultCollection v) {
this.results = v;
return this;
}
static public class AddPartitionsToTxnTopicResult implements Message, ImplicitLinkedHashMultiCollection.Element {
private String name;
private AddPartitionsToTxnPartitionResultCollection results;
private List<RawTaggedField> _unknownTaggedFields;
private int next;
private int prev;
public static final Schema SCHEMA_0 =
new Schema(
new Field("name", Type.STRING, "The topic name."),
new Field("results", new ArrayOf(AddPartitionsToTxnPartitionResult.SCHEMA_0), "The results for each partition")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AddPartitionsToTxnTopicResult(Readable _readable, short _version) {
read(_readable, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public AddPartitionsToTxnTopicResult(Struct struct, short _version) {
fromStruct(struct, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public AddPartitionsToTxnTopicResult() {
this.name = "";
this.results = new AddPartitionsToTxnPartitionResultCollection(0);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnTopicResult");
}
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field name was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field name had invalid length " + length);
} else {
this.name = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field results was serialized as null");
} else {
AddPartitionsToTxnPartitionResultCollection newCollection = new AddPartitionsToTxnPartitionResultCollection(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new AddPartitionsToTxnPartitionResult(_readable, _version));
}
this.results = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnTopicResult");
}
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(name);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
_writable.writeInt(results.size());
for (AddPartitionsToTxnPartitionResult resultsElement : results) {
resultsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnTopicResult");
}
this._unknownTaggedFields = null;
this.name = struct.getString("name");
{
Object[] _nestedObjects = struct.getArray("results");
this.results = new AddPartitionsToTxnPartitionResultCollection(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.results.add(new AddPartitionsToTxnPartitionResult((Struct) nestedObject, _version));
}
}
}
@Override
public Struct toStruct(short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnTopicResult");
}
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("name", this.name);
{
Struct[] _nestedObjects = new Struct[results.size()];
int i = 0;
for (AddPartitionsToTxnPartitionResult element : this.results) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("results", (Object[]) _nestedObjects);
}
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of AddPartitionsToTxnTopicResult");
}
{
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'name' field is too long to be serialized");
}
_cache.cacheSerializedValue(name, _stringBytes);
_size += _stringBytes.length + 2;
}
{
int _arraySize = 0;
_arraySize += 4;
for (AddPartitionsToTxnPartitionResult resultsElement : results) {
_arraySize += resultsElement.size(_cache, _version);
}
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AddPartitionsToTxnTopicResult)) return false;
AddPartitionsToTxnTopicResult other = (AddPartitionsToTxnTopicResult) obj;
if (this.name == null) {
if (other.name != null) return false;
} else {
if (!this.name.equals(other.name)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AddPartitionsToTxnTopicResult("
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
+ ", results=" + MessageUtil.deepToString(results.iterator())
+ ")";
}
public String name() {
return this.name;
}
public AddPartitionsToTxnPartitionResultCollection results() {
return this.results;
}
@Override
public int next() {
return this.next;
}
@Override
public int prev() {
return this.prev;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AddPartitionsToTxnTopicResult setName(String v) {
this.name = v;
return this;
}
public AddPartitionsToTxnTopicResult setResults(AddPartitionsToTxnPartitionResultCollection v) {
this.results = v;
return this;
}
@Override
public void setNext(int v) {
this.next = v;
}
@Override
public void setPrev(int v) {
this.prev = v;
}
}
static public class AddPartitionsToTxnPartitionResult implements Message, ImplicitLinkedHashMultiCollection.Element {
private int partitionIndex;
private short errorCode;
private List<RawTaggedField> _unknownTaggedFields;
private int next;
private int prev;
public static final Schema SCHEMA_0 =
new Schema(
new Field("partition_index", Type.INT32, "The partition indexes."),
new Field("error_code", Type.INT16, "The response error code.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AddPartitionsToTxnPartitionResult(Readable _readable, short _version) {
read(_readable, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public AddPartitionsToTxnPartitionResult(Struct struct, short _version) {
fromStruct(struct, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public AddPartitionsToTxnPartitionResult() {
this.partitionIndex = 0;
this.errorCode = (short) 0;
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnPartitionResult");
}
this.partitionIndex = _readable.readInt();
this.errorCode = _readable.readShort();
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnPartitionResult");
}
int _numTaggedFields = 0;
_writable.writeInt(partitionIndex);
_writable.writeShort(errorCode);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnPartitionResult");
}
this._unknownTaggedFields = null;
this.partitionIndex = struct.getInt("partition_index");
this.errorCode = struct.getShort("error_code");
}
@Override
public Struct toStruct(short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnPartitionResult");
}
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("partition_index", this.partitionIndex);
struct.set("error_code", this.errorCode);
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of AddPartitionsToTxnPartitionResult");
}
_size += 4;
_size += 2;
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AddPartitionsToTxnPartitionResult)) return false;
AddPartitionsToTxnPartitionResult other = (AddPartitionsToTxnPartitionResult) obj;
if (partitionIndex != other.partitionIndex) return false;
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + partitionIndex;
return hashCode;
}
@Override
public String toString() {
return "AddPartitionsToTxnPartitionResult("
+ "partitionIndex=" + partitionIndex
+ ", errorCode=" + errorCode
+ ")";
}
public int partitionIndex() {
return this.partitionIndex;
}
public short errorCode() {
return this.errorCode;
}
@Override
public int next() {
return this.next;
}
@Override
public int prev() {
return this.prev;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AddPartitionsToTxnPartitionResult setPartitionIndex(int v) {
this.partitionIndex = v;
return this;
}
public AddPartitionsToTxnPartitionResult setErrorCode(short v) {
this.errorCode = v;
return this;
}
@Override
public void setNext(int v) {
this.next = v;
}
@Override
public void setPrev(int v) {
this.prev = v;
}
}
public static class AddPartitionsToTxnPartitionResultCollection extends ImplicitLinkedHashMultiCollection<AddPartitionsToTxnPartitionResult> {
public AddPartitionsToTxnPartitionResultCollection() {
super();
}
public AddPartitionsToTxnPartitionResultCollection(int expectedNumElements) {
super(expectedNumElements);
}
public AddPartitionsToTxnPartitionResultCollection(Iterator<AddPartitionsToTxnPartitionResult> iterator) {
super(iterator);
}
public AddPartitionsToTxnPartitionResult find(int partitionIndex) {
AddPartitionsToTxnPartitionResult _key = new AddPartitionsToTxnPartitionResult();
_key.setPartitionIndex(partitionIndex);
return find(_key);
}
public List<AddPartitionsToTxnPartitionResult> findAll(int partitionIndex) {
AddPartitionsToTxnPartitionResult _key = new AddPartitionsToTxnPartitionResult();
_key.setPartitionIndex(partitionIndex);
return findAll(_key);
}
}
public static class AddPartitionsToTxnTopicResultCollection extends ImplicitLinkedHashMultiCollection<AddPartitionsToTxnTopicResult> {
public AddPartitionsToTxnTopicResultCollection() {
super();
}
public AddPartitionsToTxnTopicResultCollection(int expectedNumElements) {
super(expectedNumElements);
}
public AddPartitionsToTxnTopicResultCollection(Iterator<AddPartitionsToTxnTopicResult> iterator) {
super(iterator);
}
public AddPartitionsToTxnTopicResult find(String name) {
AddPartitionsToTxnTopicResult _key = new AddPartitionsToTxnTopicResult();
_key.setName(name);
return find(_key);
}
public List<AddPartitionsToTxnTopicResult> findAll(String name) {
AddPartitionsToTxnTopicResult _key = new AddPartitionsToTxnTopicResult();
_key.setName(name);
return findAll(_key);
}
}
}

View File

@@ -0,0 +1,802 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.TreeMap;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import org.apache.kafka.common.utils.ImplicitLinkedHashCollection;
import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection;
public class AlterConfigsRequestData implements ApiMessage {
private AlterConfigsResourceCollection resources;
private boolean validateOnly;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("resources", new ArrayOf(AlterConfigsResource.SCHEMA_0), "The updates for each resource."),
new Field("validate_only", Type.BOOLEAN, "True if we should validate the request, but not change the configurations.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AlterConfigsRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public AlterConfigsRequestData(Struct struct, short _version) {
fromStruct(struct, _version);
}
public AlterConfigsRequestData() {
this.resources = new AlterConfigsResourceCollection(0);
this.validateOnly = false;
}
@Override
public short apiKey() {
return 33;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field resources was serialized as null");
} else {
AlterConfigsResourceCollection newCollection = new AlterConfigsResourceCollection(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new AlterConfigsResource(_readable, _version));
}
this.resources = newCollection;
}
}
this.validateOnly = _readable.readByte() != 0;
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(resources.size());
for (AlterConfigsResource resourcesElement : resources) {
resourcesElement.write(_writable, _cache, _version);
}
_writable.writeByte(validateOnly ? (byte) 1 : (byte) 0);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
this._unknownTaggedFields = null;
{
Object[] _nestedObjects = struct.getArray("resources");
this.resources = new AlterConfigsResourceCollection(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.resources.add(new AlterConfigsResource((Struct) nestedObject, _version));
}
}
this.validateOnly = struct.getBoolean("validate_only");
}
@Override
public Struct toStruct(short _version) {
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
{
Struct[] _nestedObjects = new Struct[resources.size()];
int i = 0;
for (AlterConfigsResource element : this.resources) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("resources", (Object[]) _nestedObjects);
}
struct.set("validate_only", this.validateOnly);
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
{
int _arraySize = 0;
_arraySize += 4;
for (AlterConfigsResource resourcesElement : resources) {
_arraySize += resourcesElement.size(_cache, _version);
}
_size += _arraySize;
}
_size += 1;
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AlterConfigsRequestData)) return false;
AlterConfigsRequestData other = (AlterConfigsRequestData) obj;
if (this.resources == null) {
if (other.resources != null) return false;
} else {
if (!this.resources.equals(other.resources)) return false;
}
if (validateOnly != other.validateOnly) return false;
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (resources == null ? 0 : resources.hashCode());
hashCode = 31 * hashCode + (validateOnly ? 1231 : 1237);
return hashCode;
}
@Override
public String toString() {
return "AlterConfigsRequestData("
+ "resources=" + MessageUtil.deepToString(resources.iterator())
+ ", validateOnly=" + (validateOnly ? "true" : "false")
+ ")";
}
public AlterConfigsResourceCollection resources() {
return this.resources;
}
public boolean validateOnly() {
return this.validateOnly;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AlterConfigsRequestData setResources(AlterConfigsResourceCollection v) {
this.resources = v;
return this;
}
public AlterConfigsRequestData setValidateOnly(boolean v) {
this.validateOnly = v;
return this;
}
static public class AlterConfigsResource implements Message, ImplicitLinkedHashMultiCollection.Element {
private byte resourceType;
private String resourceName;
private AlterableConfigCollection configs;
private List<RawTaggedField> _unknownTaggedFields;
private int next;
private int prev;
public static final Schema SCHEMA_0 =
new Schema(
new Field("resource_type", Type.INT8, "The resource type."),
new Field("resource_name", Type.STRING, "The resource name."),
new Field("configs", new ArrayOf(AlterableConfig.SCHEMA_0), "The configurations.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AlterConfigsResource(Readable _readable, short _version) {
read(_readable, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public AlterConfigsResource(Struct struct, short _version) {
fromStruct(struct, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public AlterConfigsResource() {
this.resourceType = (byte) 0;
this.resourceName = "";
this.configs = new AlterableConfigCollection(0);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterConfigsResource");
}
this.resourceType = _readable.readByte();
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field resourceName was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field resourceName had invalid length " + length);
} else {
this.resourceName = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field configs was serialized as null");
} else {
AlterableConfigCollection newCollection = new AlterableConfigCollection(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new AlterableConfig(_readable, _version));
}
this.configs = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterConfigsResource");
}
int _numTaggedFields = 0;
_writable.writeByte(resourceType);
{
byte[] _stringBytes = _cache.getSerializedValue(resourceName);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
_writable.writeInt(configs.size());
for (AlterableConfig configsElement : configs) {
configsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterConfigsResource");
}
this._unknownTaggedFields = null;
this.resourceType = struct.getByte("resource_type");
this.resourceName = struct.getString("resource_name");
{
Object[] _nestedObjects = struct.getArray("configs");
this.configs = new AlterableConfigCollection(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.configs.add(new AlterableConfig((Struct) nestedObject, _version));
}
}
}
@Override
public Struct toStruct(short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterConfigsResource");
}
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("resource_type", this.resourceType);
struct.set("resource_name", this.resourceName);
{
Struct[] _nestedObjects = new Struct[configs.size()];
int i = 0;
for (AlterableConfig element : this.configs) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("configs", (Object[]) _nestedObjects);
}
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of AlterConfigsResource");
}
_size += 1;
{
byte[] _stringBytes = resourceName.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'resourceName' field is too long to be serialized");
}
_cache.cacheSerializedValue(resourceName, _stringBytes);
_size += _stringBytes.length + 2;
}
{
int _arraySize = 0;
_arraySize += 4;
for (AlterableConfig configsElement : configs) {
_arraySize += configsElement.size(_cache, _version);
}
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AlterConfigsResource)) return false;
AlterConfigsResource other = (AlterConfigsResource) obj;
if (resourceType != other.resourceType) return false;
if (this.resourceName == null) {
if (other.resourceName != null) return false;
} else {
if (!this.resourceName.equals(other.resourceName)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + resourceType;
hashCode = 31 * hashCode + (resourceName == null ? 0 : resourceName.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AlterConfigsResource("
+ "resourceType=" + resourceType
+ ", resourceName=" + ((resourceName == null) ? "null" : "'" + resourceName.toString() + "'")
+ ", configs=" + MessageUtil.deepToString(configs.iterator())
+ ")";
}
public byte resourceType() {
return this.resourceType;
}
public String resourceName() {
return this.resourceName;
}
public AlterableConfigCollection configs() {
return this.configs;
}
@Override
public int next() {
return this.next;
}
@Override
public int prev() {
return this.prev;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AlterConfigsResource setResourceType(byte v) {
this.resourceType = v;
return this;
}
public AlterConfigsResource setResourceName(String v) {
this.resourceName = v;
return this;
}
public AlterConfigsResource setConfigs(AlterableConfigCollection v) {
this.configs = v;
return this;
}
@Override
public void setNext(int v) {
this.next = v;
}
@Override
public void setPrev(int v) {
this.prev = v;
}
}
static public class AlterableConfig implements Message, ImplicitLinkedHashMultiCollection.Element {
private String name;
private String value;
private List<RawTaggedField> _unknownTaggedFields;
private int next;
private int prev;
public static final Schema SCHEMA_0 =
new Schema(
new Field("name", Type.STRING, "The configuration key name."),
new Field("value", Type.NULLABLE_STRING, "The value to set for the configuration key.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AlterableConfig(Readable _readable, short _version) {
read(_readable, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public AlterableConfig(Struct struct, short _version) {
fromStruct(struct, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public AlterableConfig() {
this.name = "";
this.value = "";
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterableConfig");
}
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field name was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field name had invalid length " + length);
} else {
this.name = _readable.readString(length);
}
}
{
int length;
length = _readable.readShort();
if (length < 0) {
this.value = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field value had invalid length " + length);
} else {
this.value = _readable.readString(length);
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterableConfig");
}
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(name);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
if (value == null) {
_writable.writeShort((short) -1);
} else {
byte[] _stringBytes = _cache.getSerializedValue(value);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterableConfig");
}
this._unknownTaggedFields = null;
this.name = struct.getString("name");
this.value = struct.getString("value");
}
@Override
public Struct toStruct(short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterableConfig");
}
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("name", this.name);
struct.set("value", this.value);
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of AlterableConfig");
}
{
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'name' field is too long to be serialized");
}
_cache.cacheSerializedValue(name, _stringBytes);
_size += _stringBytes.length + 2;
}
if (value == null) {
_size += 2;
} else {
byte[] _stringBytes = value.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'value' field is too long to be serialized");
}
_cache.cacheSerializedValue(value, _stringBytes);
_size += _stringBytes.length + 2;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AlterableConfig)) return false;
AlterableConfig other = (AlterableConfig) obj;
if (this.name == null) {
if (other.name != null) return false;
} else {
if (!this.name.equals(other.name)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AlterableConfig("
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
+ ", value=" + ((value == null) ? "null" : "'" + value.toString() + "'")
+ ")";
}
public String name() {
return this.name;
}
public String value() {
return this.value;
}
@Override
public int next() {
return this.next;
}
@Override
public int prev() {
return this.prev;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AlterableConfig setName(String v) {
this.name = v;
return this;
}
public AlterableConfig setValue(String v) {
this.value = v;
return this;
}
@Override
public void setNext(int v) {
this.next = v;
}
@Override
public void setPrev(int v) {
this.prev = v;
}
}
public static class AlterableConfigCollection extends ImplicitLinkedHashMultiCollection<AlterableConfig> {
public AlterableConfigCollection() {
super();
}
public AlterableConfigCollection(int expectedNumElements) {
super(expectedNumElements);
}
public AlterableConfigCollection(Iterator<AlterableConfig> iterator) {
super(iterator);
}
public AlterableConfig find(String name) {
AlterableConfig _key = new AlterableConfig();
_key.setName(name);
return find(_key);
}
public List<AlterableConfig> findAll(String name) {
AlterableConfig _key = new AlterableConfig();
_key.setName(name);
return findAll(_key);
}
}
public static class AlterConfigsResourceCollection extends ImplicitLinkedHashMultiCollection<AlterConfigsResource> {
public AlterConfigsResourceCollection() {
super();
}
public AlterConfigsResourceCollection(int expectedNumElements) {
super(expectedNumElements);
}
public AlterConfigsResourceCollection(Iterator<AlterConfigsResource> iterator) {
super(iterator);
}
public AlterConfigsResource find(byte resourceType, String resourceName) {
AlterConfigsResource _key = new AlterConfigsResource();
_key.setResourceType(resourceType);
_key.setResourceName(resourceName);
return find(_key);
}
public List<AlterConfigsResource> findAll(byte resourceType, String resourceName) {
AlterConfigsResource _key = new AlterConfigsResource();
_key.setResourceType(resourceType);
_key.setResourceName(resourceName);
return findAll(_key);
}
}
}

View File

@@ -0,0 +1,491 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.TreeMap;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
public class AlterConfigsResponseData implements ApiMessage {
private int throttleTimeMs;
private List<AlterConfigsResourceResponse> responses;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
new Field("responses", new ArrayOf(AlterConfigsResourceResponse.SCHEMA_0), "The responses for each resource.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AlterConfigsResponseData(Readable _readable, short _version) {
read(_readable, _version);
}
public AlterConfigsResponseData(Struct struct, short _version) {
fromStruct(struct, _version);
}
public AlterConfigsResponseData() {
this.throttleTimeMs = 0;
this.responses = new ArrayList<AlterConfigsResourceResponse>();
}
@Override
public short apiKey() {
return 33;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
this.throttleTimeMs = _readable.readInt();
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field responses was serialized as null");
} else {
ArrayList<AlterConfigsResourceResponse> newCollection = new ArrayList<AlterConfigsResourceResponse>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new AlterConfigsResourceResponse(_readable, _version));
}
this.responses = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(throttleTimeMs);
_writable.writeInt(responses.size());
for (AlterConfigsResourceResponse responsesElement : responses) {
responsesElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
this._unknownTaggedFields = null;
this.throttleTimeMs = struct.getInt("throttle_time_ms");
{
Object[] _nestedObjects = struct.getArray("responses");
this.responses = new ArrayList<AlterConfigsResourceResponse>(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.responses.add(new AlterConfigsResourceResponse((Struct) nestedObject, _version));
}
}
}
@Override
public Struct toStruct(short _version) {
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("throttle_time_ms", this.throttleTimeMs);
{
Struct[] _nestedObjects = new Struct[responses.size()];
int i = 0;
for (AlterConfigsResourceResponse element : this.responses) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("responses", (Object[]) _nestedObjects);
}
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
_size += 4;
{
int _arraySize = 0;
_arraySize += 4;
for (AlterConfigsResourceResponse responsesElement : responses) {
_arraySize += responsesElement.size(_cache, _version);
}
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AlterConfigsResponseData)) return false;
AlterConfigsResponseData other = (AlterConfigsResponseData) obj;
if (throttleTimeMs != other.throttleTimeMs) return false;
if (this.responses == null) {
if (other.responses != null) return false;
} else {
if (!this.responses.equals(other.responses)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + throttleTimeMs;
hashCode = 31 * hashCode + (responses == null ? 0 : responses.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AlterConfigsResponseData("
+ "throttleTimeMs=" + throttleTimeMs
+ ", responses=" + MessageUtil.deepToString(responses.iterator())
+ ")";
}
public int throttleTimeMs() {
return this.throttleTimeMs;
}
public List<AlterConfigsResourceResponse> responses() {
return this.responses;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AlterConfigsResponseData setThrottleTimeMs(int v) {
this.throttleTimeMs = v;
return this;
}
public AlterConfigsResponseData setResponses(List<AlterConfigsResourceResponse> v) {
this.responses = v;
return this;
}
static public class AlterConfigsResourceResponse implements Message {
private short errorCode;
private String errorMessage;
private byte resourceType;
private String resourceName;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("error_code", Type.INT16, "The resource error code."),
new Field("error_message", Type.NULLABLE_STRING, "The resource error message, or null if there was no error."),
new Field("resource_type", Type.INT8, "The resource type."),
new Field("resource_name", Type.STRING, "The resource name.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AlterConfigsResourceResponse(Readable _readable, short _version) {
read(_readable, _version);
}
public AlterConfigsResourceResponse(Struct struct, short _version) {
fromStruct(struct, _version);
}
public AlterConfigsResourceResponse() {
this.errorCode = (short) 0;
this.errorMessage = "";
this.resourceType = (byte) 0;
this.resourceName = "";
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterConfigsResourceResponse");
}
this.errorCode = _readable.readShort();
{
int length;
length = _readable.readShort();
if (length < 0) {
this.errorMessage = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field errorMessage had invalid length " + length);
} else {
this.errorMessage = _readable.readString(length);
}
}
this.resourceType = _readable.readByte();
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field resourceName was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field resourceName had invalid length " + length);
} else {
this.resourceName = _readable.readString(length);
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterConfigsResourceResponse");
}
int _numTaggedFields = 0;
_writable.writeShort(errorCode);
if (errorMessage == null) {
_writable.writeShort((short) -1);
} else {
byte[] _stringBytes = _cache.getSerializedValue(errorMessage);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
_writable.writeByte(resourceType);
{
byte[] _stringBytes = _cache.getSerializedValue(resourceName);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterConfigsResourceResponse");
}
this._unknownTaggedFields = null;
this.errorCode = struct.getShort("error_code");
this.errorMessage = struct.getString("error_message");
this.resourceType = struct.getByte("resource_type");
this.resourceName = struct.getString("resource_name");
}
@Override
public Struct toStruct(short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterConfigsResourceResponse");
}
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("error_code", this.errorCode);
struct.set("error_message", this.errorMessage);
struct.set("resource_type", this.resourceType);
struct.set("resource_name", this.resourceName);
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of AlterConfigsResourceResponse");
}
_size += 2;
if (errorMessage == null) {
_size += 2;
} else {
byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'errorMessage' field is too long to be serialized");
}
_cache.cacheSerializedValue(errorMessage, _stringBytes);
_size += _stringBytes.length + 2;
}
_size += 1;
{
byte[] _stringBytes = resourceName.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'resourceName' field is too long to be serialized");
}
_cache.cacheSerializedValue(resourceName, _stringBytes);
_size += _stringBytes.length + 2;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AlterConfigsResourceResponse)) return false;
AlterConfigsResourceResponse other = (AlterConfigsResourceResponse) obj;
if (errorCode != other.errorCode) return false;
if (this.errorMessage == null) {
if (other.errorMessage != null) return false;
} else {
if (!this.errorMessage.equals(other.errorMessage)) return false;
}
if (resourceType != other.resourceType) return false;
if (this.resourceName == null) {
if (other.resourceName != null) return false;
} else {
if (!this.resourceName.equals(other.resourceName)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + errorCode;
hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode());
hashCode = 31 * hashCode + resourceType;
hashCode = 31 * hashCode + (resourceName == null ? 0 : resourceName.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AlterConfigsResourceResponse("
+ "errorCode=" + errorCode
+ ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'")
+ ", resourceType=" + resourceType
+ ", resourceName=" + ((resourceName == null) ? "null" : "'" + resourceName.toString() + "'")
+ ")";
}
public short errorCode() {
return this.errorCode;
}
public String errorMessage() {
return this.errorMessage;
}
public byte resourceType() {
return this.resourceType;
}
public String resourceName() {
return this.resourceName;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AlterConfigsResourceResponse setErrorCode(short v) {
this.errorCode = v;
return this;
}
public AlterConfigsResourceResponse setErrorMessage(String v) {
this.errorMessage = v;
return this;
}
public AlterConfigsResourceResponse setResourceType(byte v) {
this.resourceType = v;
return this;
}
public AlterConfigsResourceResponse setResourceName(String v) {
this.resourceName = v;
return this;
}
}
}

View File

@@ -0,0 +1,728 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.NavigableMap;
import java.util.TreeMap;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static java.util.Map.Entry;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class AlterPartitionReassignmentsRequestData implements ApiMessage {
private int timeoutMs;
private List<ReassignableTopic> topics;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("timeout_ms", Type.INT32, "The time in ms to wait for the request to complete."),
new Field("topics", new CompactArrayOf(ReassignableTopic.SCHEMA_0), "The topics to reassign."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public AlterPartitionReassignmentsRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public AlterPartitionReassignmentsRequestData(Struct struct, short _version) {
fromStruct(struct, _version);
}
public AlterPartitionReassignmentsRequestData() {
this.timeoutMs = 60000;
this.topics = new ArrayList<ReassignableTopic>();
}
@Override
public short apiKey() {
return 45;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
this.timeoutMs = _readable.readInt();
{
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
ArrayList<ReassignableTopic> newCollection = new ArrayList<ReassignableTopic>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new ReassignableTopic(_readable, _version));
}
this.topics = newCollection;
}
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(timeoutMs);
_writable.writeUnsignedVarint(topics.size() + 1);
for (ReassignableTopic topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
NavigableMap<Integer, Object> _taggedFields = null;
this._unknownTaggedFields = null;
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
this.timeoutMs = struct.getInt("timeout_ms");
{
Object[] _nestedObjects = struct.getArray("topics");
this.topics = new ArrayList<ReassignableTopic>(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.topics.add(new ReassignableTopic((Struct) nestedObject, _version));
}
}
if (!_taggedFields.isEmpty()) {
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
}
}
}
@Override
public Struct toStruct(short _version) {
TreeMap<Integer, Object> _taggedFields = null;
_taggedFields = new TreeMap<>();
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("timeout_ms", this.timeoutMs);
{
Struct[] _nestedObjects = new Struct[topics.size()];
int i = 0;
for (ReassignableTopic element : this.topics) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("topics", (Object[]) _nestedObjects);
}
struct.set("_tagged_fields", _taggedFields);
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
_size += 4;
{
int _arraySize = 0;
_arraySize += ByteUtils.sizeOfUnsignedVarint(topics.size() + 1);
for (ReassignableTopic topicsElement : topics) {
_arraySize += topicsElement.size(_cache, _version);
}
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AlterPartitionReassignmentsRequestData)) return false;
AlterPartitionReassignmentsRequestData other = (AlterPartitionReassignmentsRequestData) obj;
if (timeoutMs != other.timeoutMs) return false;
if (this.topics == null) {
if (other.topics != null) return false;
} else {
if (!this.topics.equals(other.topics)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + timeoutMs;
hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AlterPartitionReassignmentsRequestData("
+ "timeoutMs=" + timeoutMs
+ ", topics=" + MessageUtil.deepToString(topics.iterator())
+ ")";
}
public int timeoutMs() {
return this.timeoutMs;
}
public List<ReassignableTopic> topics() {
return this.topics;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AlterPartitionReassignmentsRequestData setTimeoutMs(int v) {
this.timeoutMs = v;
return this;
}
public AlterPartitionReassignmentsRequestData setTopics(List<ReassignableTopic> v) {
this.topics = v;
return this;
}
static public class ReassignableTopic implements Message {
private String name;
private List<ReassignablePartition> partitions;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("name", Type.COMPACT_STRING, "The topic name."),
new Field("partitions", new CompactArrayOf(ReassignablePartition.SCHEMA_0), "The partitions to reassign."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public ReassignableTopic(Readable _readable, short _version) {
read(_readable, _version);
}
public ReassignableTopic(Struct struct, short _version) {
fromStruct(struct, _version);
}
public ReassignableTopic() {
this.name = "";
this.partitions = new ArrayList<ReassignablePartition>();
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignableTopic");
}
{
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
throw new RuntimeException("non-nullable field name was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field name had invalid length " + length);
} else {
this.name = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitions was serialized as null");
} else {
ArrayList<ReassignablePartition> newCollection = new ArrayList<ReassignablePartition>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new ReassignablePartition(_readable, _version));
}
this.partitions = newCollection;
}
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignableTopic");
}
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(name);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
_writable.writeUnsignedVarint(partitions.size() + 1);
for (ReassignablePartition partitionsElement : partitions) {
partitionsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignableTopic");
}
NavigableMap<Integer, Object> _taggedFields = null;
this._unknownTaggedFields = null;
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
this.name = struct.getString("name");
{
Object[] _nestedObjects = struct.getArray("partitions");
this.partitions = new ArrayList<ReassignablePartition>(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.partitions.add(new ReassignablePartition((Struct) nestedObject, _version));
}
}
if (!_taggedFields.isEmpty()) {
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
}
}
}
@Override
public Struct toStruct(short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignableTopic");
}
TreeMap<Integer, Object> _taggedFields = null;
_taggedFields = new TreeMap<>();
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("name", this.name);
{
Struct[] _nestedObjects = new Struct[partitions.size()];
int i = 0;
for (ReassignablePartition element : this.partitions) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("partitions", (Object[]) _nestedObjects);
}
struct.set("_tagged_fields", _taggedFields);
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 0) {
throw new UnsupportedVersionException("Can't size version " + _version + " of ReassignableTopic");
}
{
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'name' field is too long to be serialized");
}
_cache.cacheSerializedValue(name, _stringBytes);
_size += _stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1);
}
{
int _arraySize = 0;
_arraySize += ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1);
for (ReassignablePartition partitionsElement : partitions) {
_arraySize += partitionsElement.size(_cache, _version);
}
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof ReassignableTopic)) return false;
ReassignableTopic other = (ReassignableTopic) obj;
if (this.name == null) {
if (other.name != null) return false;
} else {
if (!this.name.equals(other.name)) return false;
}
if (this.partitions == null) {
if (other.partitions != null) return false;
} else {
if (!this.partitions.equals(other.partitions)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode());
return hashCode;
}
@Override
public String toString() {
return "ReassignableTopic("
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
+ ")";
}
public String name() {
return this.name;
}
public List<ReassignablePartition> partitions() {
return this.partitions;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public ReassignableTopic setName(String v) {
this.name = v;
return this;
}
public ReassignableTopic setPartitions(List<ReassignablePartition> v) {
this.partitions = v;
return this;
}
}
static public class ReassignablePartition implements Message {
private int partitionIndex;
private List<Integer> replicas;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("partition_index", Type.INT32, "The partition index."),
new Field("replicas", CompactArrayOf.nullable(Type.INT32), "The replicas to place the partitions on, or null to cancel a pending reassignment for this partition."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public ReassignablePartition(Readable _readable, short _version) {
read(_readable, _version);
}
public ReassignablePartition(Struct struct, short _version) {
fromStruct(struct, _version);
}
public ReassignablePartition() {
this.partitionIndex = 0;
this.replicas = null;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignablePartition");
}
this.partitionIndex = _readable.readInt();
{
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
this.replicas = null;
} else {
ArrayList<Integer> newCollection = new ArrayList<Integer>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(_readable.readInt());
}
this.replicas = newCollection;
}
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignablePartition");
}
int _numTaggedFields = 0;
_writable.writeInt(partitionIndex);
if (replicas == null) {
_writable.writeUnsignedVarint(0);
} else {
_writable.writeUnsignedVarint(replicas.size() + 1);
for (Integer replicasElement : replicas) {
_writable.writeInt(replicasElement);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignablePartition");
}
NavigableMap<Integer, Object> _taggedFields = null;
this._unknownTaggedFields = null;
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
this.partitionIndex = struct.getInt("partition_index");
{
Object[] _nestedObjects = struct.getArray("replicas");
if (_nestedObjects == null) {
this.replicas = null;
} else {
this.replicas = new ArrayList<Integer>(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.replicas.add((Integer) nestedObject);
}
}
}
if (!_taggedFields.isEmpty()) {
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
}
}
}
@Override
public Struct toStruct(short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignablePartition");
}
TreeMap<Integer, Object> _taggedFields = null;
_taggedFields = new TreeMap<>();
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("partition_index", this.partitionIndex);
{
if (replicas == null) {
struct.set("replicas", null);
} else {
Integer[] _nestedObjects = new Integer[replicas.size()];
int i = 0;
for (Integer element : this.replicas) {
_nestedObjects[i++] = element;
}
struct.set("replicas", (Object[]) _nestedObjects);
}
}
struct.set("_tagged_fields", _taggedFields);
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 0) {
throw new UnsupportedVersionException("Can't size version " + _version + " of ReassignablePartition");
}
_size += 4;
if (replicas == null) {
_size += 1;
} else {
int _arraySize = 0;
_arraySize += ByteUtils.sizeOfUnsignedVarint(replicas.size() + 1);
_arraySize += replicas.size() * 4;
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof ReassignablePartition)) return false;
ReassignablePartition other = (ReassignablePartition) obj;
if (partitionIndex != other.partitionIndex) return false;
if (this.replicas == null) {
if (other.replicas != null) return false;
} else {
if (!this.replicas.equals(other.replicas)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + partitionIndex;
hashCode = 31 * hashCode + (replicas == null ? 0 : replicas.hashCode());
return hashCode;
}
@Override
public String toString() {
return "ReassignablePartition("
+ "partitionIndex=" + partitionIndex
+ ", replicas=" + ((replicas == null) ? "null" : MessageUtil.deepToString(replicas.iterator()))
+ ")";
}
public int partitionIndex() {
return this.partitionIndex;
}
public List<Integer> replicas() {
return this.replicas;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public ReassignablePartition setPartitionIndex(int v) {
this.partitionIndex = v;
return this;
}
public ReassignablePartition setReplicas(List<Integer> v) {
this.replicas = v;
return this;
}
}
}

View File

@@ -0,0 +1,795 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.NavigableMap;
import java.util.TreeMap;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static java.util.Map.Entry;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class AlterPartitionReassignmentsResponseData implements ApiMessage {
private int throttleTimeMs;
private short errorCode;
private String errorMessage;
private List<ReassignableTopicResponse> responses;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
new Field("error_code", Type.INT16, "The top-level error code, or 0 if there was no error."),
new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The top-level error message, or null if there was no error."),
new Field("responses", new CompactArrayOf(ReassignableTopicResponse.SCHEMA_0), "The responses to topics to reassign."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public AlterPartitionReassignmentsResponseData(Readable _readable, short _version) {
read(_readable, _version);
}
public AlterPartitionReassignmentsResponseData(Struct struct, short _version) {
fromStruct(struct, _version);
}
public AlterPartitionReassignmentsResponseData() {
this.throttleTimeMs = 0;
this.errorCode = (short) 0;
this.errorMessage = "";
this.responses = new ArrayList<ReassignableTopicResponse>();
}
@Override
public short apiKey() {
return 45;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
this.throttleTimeMs = _readable.readInt();
this.errorCode = _readable.readShort();
{
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
this.errorMessage = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field errorMessage had invalid length " + length);
} else {
this.errorMessage = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field responses was serialized as null");
} else {
ArrayList<ReassignableTopicResponse> newCollection = new ArrayList<ReassignableTopicResponse>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new ReassignableTopicResponse(_readable, _version));
}
this.responses = newCollection;
}
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(throttleTimeMs);
_writable.writeShort(errorCode);
if (errorMessage == null) {
_writable.writeUnsignedVarint(0);
} else {
byte[] _stringBytes = _cache.getSerializedValue(errorMessage);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
_writable.writeUnsignedVarint(responses.size() + 1);
for (ReassignableTopicResponse responsesElement : responses) {
responsesElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
NavigableMap<Integer, Object> _taggedFields = null;
this._unknownTaggedFields = null;
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
this.throttleTimeMs = struct.getInt("throttle_time_ms");
this.errorCode = struct.getShort("error_code");
this.errorMessage = struct.getString("error_message");
{
Object[] _nestedObjects = struct.getArray("responses");
this.responses = new ArrayList<ReassignableTopicResponse>(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.responses.add(new ReassignableTopicResponse((Struct) nestedObject, _version));
}
}
if (!_taggedFields.isEmpty()) {
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
}
}
}
@Override
public Struct toStruct(short _version) {
TreeMap<Integer, Object> _taggedFields = null;
_taggedFields = new TreeMap<>();
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("throttle_time_ms", this.throttleTimeMs);
struct.set("error_code", this.errorCode);
struct.set("error_message", this.errorMessage);
{
Struct[] _nestedObjects = new Struct[responses.size()];
int i = 0;
for (ReassignableTopicResponse element : this.responses) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("responses", (Object[]) _nestedObjects);
}
struct.set("_tagged_fields", _taggedFields);
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
_size += 4;
_size += 2;
if (errorMessage == null) {
_size += 1;
} else {
byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'errorMessage' field is too long to be serialized");
}
_cache.cacheSerializedValue(errorMessage, _stringBytes);
_size += _stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1);
}
{
int _arraySize = 0;
_arraySize += ByteUtils.sizeOfUnsignedVarint(responses.size() + 1);
for (ReassignableTopicResponse responsesElement : responses) {
_arraySize += responsesElement.size(_cache, _version);
}
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AlterPartitionReassignmentsResponseData)) return false;
AlterPartitionReassignmentsResponseData other = (AlterPartitionReassignmentsResponseData) obj;
if (throttleTimeMs != other.throttleTimeMs) return false;
if (errorCode != other.errorCode) return false;
if (this.errorMessage == null) {
if (other.errorMessage != null) return false;
} else {
if (!this.errorMessage.equals(other.errorMessage)) return false;
}
if (this.responses == null) {
if (other.responses != null) return false;
} else {
if (!this.responses.equals(other.responses)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + throttleTimeMs;
hashCode = 31 * hashCode + errorCode;
hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode());
hashCode = 31 * hashCode + (responses == null ? 0 : responses.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AlterPartitionReassignmentsResponseData("
+ "throttleTimeMs=" + throttleTimeMs
+ ", errorCode=" + errorCode
+ ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'")
+ ", responses=" + MessageUtil.deepToString(responses.iterator())
+ ")";
}
public int throttleTimeMs() {
return this.throttleTimeMs;
}
public short errorCode() {
return this.errorCode;
}
public String errorMessage() {
return this.errorMessage;
}
public List<ReassignableTopicResponse> responses() {
return this.responses;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AlterPartitionReassignmentsResponseData setThrottleTimeMs(int v) {
this.throttleTimeMs = v;
return this;
}
public AlterPartitionReassignmentsResponseData setErrorCode(short v) {
this.errorCode = v;
return this;
}
public AlterPartitionReassignmentsResponseData setErrorMessage(String v) {
this.errorMessage = v;
return this;
}
public AlterPartitionReassignmentsResponseData setResponses(List<ReassignableTopicResponse> v) {
this.responses = v;
return this;
}
static public class ReassignableTopicResponse implements Message {
private String name;
private List<ReassignablePartitionResponse> partitions;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("name", Type.COMPACT_STRING, "The topic name"),
new Field("partitions", new CompactArrayOf(ReassignablePartitionResponse.SCHEMA_0), "The responses to partitions to reassign"),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public ReassignableTopicResponse(Readable _readable, short _version) {
read(_readable, _version);
}
public ReassignableTopicResponse(Struct struct, short _version) {
fromStruct(struct, _version);
}
public ReassignableTopicResponse() {
this.name = "";
this.partitions = new ArrayList<ReassignablePartitionResponse>();
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignableTopicResponse");
}
{
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
throw new RuntimeException("non-nullable field name was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field name had invalid length " + length);
} else {
this.name = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitions was serialized as null");
} else {
ArrayList<ReassignablePartitionResponse> newCollection = new ArrayList<ReassignablePartitionResponse>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new ReassignablePartitionResponse(_readable, _version));
}
this.partitions = newCollection;
}
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignableTopicResponse");
}
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(name);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
_writable.writeUnsignedVarint(partitions.size() + 1);
for (ReassignablePartitionResponse partitionsElement : partitions) {
partitionsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignableTopicResponse");
}
NavigableMap<Integer, Object> _taggedFields = null;
this._unknownTaggedFields = null;
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
this.name = struct.getString("name");
{
Object[] _nestedObjects = struct.getArray("partitions");
this.partitions = new ArrayList<ReassignablePartitionResponse>(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.partitions.add(new ReassignablePartitionResponse((Struct) nestedObject, _version));
}
}
if (!_taggedFields.isEmpty()) {
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
}
}
}
@Override
public Struct toStruct(short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignableTopicResponse");
}
TreeMap<Integer, Object> _taggedFields = null;
_taggedFields = new TreeMap<>();
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("name", this.name);
{
Struct[] _nestedObjects = new Struct[partitions.size()];
int i = 0;
for (ReassignablePartitionResponse element : this.partitions) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("partitions", (Object[]) _nestedObjects);
}
struct.set("_tagged_fields", _taggedFields);
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 0) {
throw new UnsupportedVersionException("Can't size version " + _version + " of ReassignableTopicResponse");
}
{
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'name' field is too long to be serialized");
}
_cache.cacheSerializedValue(name, _stringBytes);
_size += _stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1);
}
{
int _arraySize = 0;
_arraySize += ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1);
for (ReassignablePartitionResponse partitionsElement : partitions) {
_arraySize += partitionsElement.size(_cache, _version);
}
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof ReassignableTopicResponse)) return false;
ReassignableTopicResponse other = (ReassignableTopicResponse) obj;
if (this.name == null) {
if (other.name != null) return false;
} else {
if (!this.name.equals(other.name)) return false;
}
if (this.partitions == null) {
if (other.partitions != null) return false;
} else {
if (!this.partitions.equals(other.partitions)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode());
return hashCode;
}
@Override
public String toString() {
return "ReassignableTopicResponse("
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
+ ")";
}
public String name() {
return this.name;
}
public List<ReassignablePartitionResponse> partitions() {
return this.partitions;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public ReassignableTopicResponse setName(String v) {
this.name = v;
return this;
}
public ReassignableTopicResponse setPartitions(List<ReassignablePartitionResponse> v) {
this.partitions = v;
return this;
}
}
static public class ReassignablePartitionResponse implements Message {
private int partitionIndex;
private short errorCode;
private String errorMessage;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("partition_index", Type.INT32, "The partition index."),
new Field("error_code", Type.INT16, "The error code for this partition, or 0 if there was no error."),
new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The error message for this partition, or null if there was no error."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public ReassignablePartitionResponse(Readable _readable, short _version) {
read(_readable, _version);
}
public ReassignablePartitionResponse(Struct struct, short _version) {
fromStruct(struct, _version);
}
public ReassignablePartitionResponse() {
this.partitionIndex = 0;
this.errorCode = (short) 0;
this.errorMessage = "";
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignablePartitionResponse");
}
this.partitionIndex = _readable.readInt();
this.errorCode = _readable.readShort();
{
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
this.errorMessage = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field errorMessage had invalid length " + length);
} else {
this.errorMessage = _readable.readString(length);
}
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignablePartitionResponse");
}
int _numTaggedFields = 0;
_writable.writeInt(partitionIndex);
_writable.writeShort(errorCode);
if (errorMessage == null) {
_writable.writeUnsignedVarint(0);
} else {
byte[] _stringBytes = _cache.getSerializedValue(errorMessage);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignablePartitionResponse");
}
NavigableMap<Integer, Object> _taggedFields = null;
this._unknownTaggedFields = null;
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
this.partitionIndex = struct.getInt("partition_index");
this.errorCode = struct.getShort("error_code");
this.errorMessage = struct.getString("error_message");
if (!_taggedFields.isEmpty()) {
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
}
}
}
@Override
public Struct toStruct(short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignablePartitionResponse");
}
TreeMap<Integer, Object> _taggedFields = null;
_taggedFields = new TreeMap<>();
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("partition_index", this.partitionIndex);
struct.set("error_code", this.errorCode);
struct.set("error_message", this.errorMessage);
struct.set("_tagged_fields", _taggedFields);
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 0) {
throw new UnsupportedVersionException("Can't size version " + _version + " of ReassignablePartitionResponse");
}
_size += 4;
_size += 2;
if (errorMessage == null) {
_size += 1;
} else {
byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'errorMessage' field is too long to be serialized");
}
_cache.cacheSerializedValue(errorMessage, _stringBytes);
_size += _stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1);
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof ReassignablePartitionResponse)) return false;
ReassignablePartitionResponse other = (ReassignablePartitionResponse) obj;
if (partitionIndex != other.partitionIndex) return false;
if (errorCode != other.errorCode) return false;
if (this.errorMessage == null) {
if (other.errorMessage != null) return false;
} else {
if (!this.errorMessage.equals(other.errorMessage)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + partitionIndex;
hashCode = 31 * hashCode + errorCode;
hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode());
return hashCode;
}
@Override
public String toString() {
return "ReassignablePartitionResponse("
+ "partitionIndex=" + partitionIndex
+ ", errorCode=" + errorCode
+ ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'")
+ ")";
}
public int partitionIndex() {
return this.partitionIndex;
}
public short errorCode() {
return this.errorCode;
}
public String errorMessage() {
return this.errorMessage;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public ReassignablePartitionResponse setPartitionIndex(int v) {
this.partitionIndex = v;
return this;
}
public ReassignablePartitionResponse setErrorCode(short v) {
this.errorCode = v;
return this;
}
public ReassignablePartitionResponse setErrorMessage(String v) {
this.errorMessage = v;
return this;
}
}
}

View File

@@ -0,0 +1,768 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.TreeMap;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import org.apache.kafka.common.utils.ImplicitLinkedHashCollection;
import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection;
public class AlterReplicaLogDirsRequestData implements ApiMessage {
private AlterReplicaLogDirCollection dirs;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("dirs", new ArrayOf(AlterReplicaLogDir.SCHEMA_0), "The alterations to make for each directory.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AlterReplicaLogDirsRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public AlterReplicaLogDirsRequestData(Struct struct, short _version) {
fromStruct(struct, _version);
}
public AlterReplicaLogDirsRequestData() {
this.dirs = new AlterReplicaLogDirCollection(0);
}
@Override
public short apiKey() {
return 34;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field dirs was serialized as null");
} else {
AlterReplicaLogDirCollection newCollection = new AlterReplicaLogDirCollection(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new AlterReplicaLogDir(_readable, _version));
}
this.dirs = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(dirs.size());
for (AlterReplicaLogDir dirsElement : dirs) {
dirsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
this._unknownTaggedFields = null;
{
Object[] _nestedObjects = struct.getArray("dirs");
this.dirs = new AlterReplicaLogDirCollection(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.dirs.add(new AlterReplicaLogDir((Struct) nestedObject, _version));
}
}
}
@Override
public Struct toStruct(short _version) {
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
{
Struct[] _nestedObjects = new Struct[dirs.size()];
int i = 0;
for (AlterReplicaLogDir element : this.dirs) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("dirs", (Object[]) _nestedObjects);
}
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
{
int _arraySize = 0;
_arraySize += 4;
for (AlterReplicaLogDir dirsElement : dirs) {
_arraySize += dirsElement.size(_cache, _version);
}
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AlterReplicaLogDirsRequestData)) return false;
AlterReplicaLogDirsRequestData other = (AlterReplicaLogDirsRequestData) obj;
if (this.dirs == null) {
if (other.dirs != null) return false;
} else {
if (!this.dirs.equals(other.dirs)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (dirs == null ? 0 : dirs.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AlterReplicaLogDirsRequestData("
+ "dirs=" + MessageUtil.deepToString(dirs.iterator())
+ ")";
}
public AlterReplicaLogDirCollection dirs() {
return this.dirs;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AlterReplicaLogDirsRequestData setDirs(AlterReplicaLogDirCollection v) {
this.dirs = v;
return this;
}
static public class AlterReplicaLogDir implements Message, ImplicitLinkedHashMultiCollection.Element {
private String path;
private AlterReplicaLogDirTopicCollection topics;
private List<RawTaggedField> _unknownTaggedFields;
private int next;
private int prev;
public static final Schema SCHEMA_0 =
new Schema(
new Field("path", Type.STRING, "The absolute directory path."),
new Field("topics", new ArrayOf(AlterReplicaLogDirTopic.SCHEMA_0), "The topics to add to the directory.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AlterReplicaLogDir(Readable _readable, short _version) {
read(_readable, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public AlterReplicaLogDir(Struct struct, short _version) {
fromStruct(struct, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public AlterReplicaLogDir() {
this.path = "";
this.topics = new AlterReplicaLogDirTopicCollection(0);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDir");
}
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field path was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field path had invalid length " + length);
} else {
this.path = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
AlterReplicaLogDirTopicCollection newCollection = new AlterReplicaLogDirTopicCollection(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new AlterReplicaLogDirTopic(_readable, _version));
}
this.topics = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDir");
}
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(path);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
_writable.writeInt(topics.size());
for (AlterReplicaLogDirTopic topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDir");
}
this._unknownTaggedFields = null;
this.path = struct.getString("path");
{
Object[] _nestedObjects = struct.getArray("topics");
this.topics = new AlterReplicaLogDirTopicCollection(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.topics.add(new AlterReplicaLogDirTopic((Struct) nestedObject, _version));
}
}
}
@Override
public Struct toStruct(short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDir");
}
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("path", this.path);
{
Struct[] _nestedObjects = new Struct[topics.size()];
int i = 0;
for (AlterReplicaLogDirTopic element : this.topics) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("topics", (Object[]) _nestedObjects);
}
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of AlterReplicaLogDir");
}
{
byte[] _stringBytes = path.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'path' field is too long to be serialized");
}
_cache.cacheSerializedValue(path, _stringBytes);
_size += _stringBytes.length + 2;
}
{
int _arraySize = 0;
_arraySize += 4;
for (AlterReplicaLogDirTopic topicsElement : topics) {
_arraySize += topicsElement.size(_cache, _version);
}
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AlterReplicaLogDir)) return false;
AlterReplicaLogDir other = (AlterReplicaLogDir) obj;
if (this.path == null) {
if (other.path != null) return false;
} else {
if (!this.path.equals(other.path)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (path == null ? 0 : path.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AlterReplicaLogDir("
+ "path=" + ((path == null) ? "null" : "'" + path.toString() + "'")
+ ", topics=" + MessageUtil.deepToString(topics.iterator())
+ ")";
}
public String path() {
return this.path;
}
public AlterReplicaLogDirTopicCollection topics() {
return this.topics;
}
@Override
public int next() {
return this.next;
}
@Override
public int prev() {
return this.prev;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AlterReplicaLogDir setPath(String v) {
this.path = v;
return this;
}
public AlterReplicaLogDir setTopics(AlterReplicaLogDirTopicCollection v) {
this.topics = v;
return this;
}
@Override
public void setNext(int v) {
this.next = v;
}
@Override
public void setPrev(int v) {
this.prev = v;
}
}
static public class AlterReplicaLogDirTopic implements Message, ImplicitLinkedHashMultiCollection.Element {
private String name;
private List<Integer> partitions;
private List<RawTaggedField> _unknownTaggedFields;
private int next;
private int prev;
public static final Schema SCHEMA_0 =
new Schema(
new Field("name", Type.STRING, "The topic name."),
new Field("partitions", new ArrayOf(Type.INT32), "The partition indexes.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AlterReplicaLogDirTopic(Readable _readable, short _version) {
read(_readable, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public AlterReplicaLogDirTopic(Struct struct, short _version) {
fromStruct(struct, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public AlterReplicaLogDirTopic() {
this.name = "";
this.partitions = new ArrayList<Integer>();
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDirTopic");
}
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field name was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field name had invalid length " + length);
} else {
this.name = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitions was serialized as null");
} else {
ArrayList<Integer> newCollection = new ArrayList<Integer>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(_readable.readInt());
}
this.partitions = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDirTopic");
}
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(name);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
_writable.writeInt(partitions.size());
for (Integer partitionsElement : partitions) {
_writable.writeInt(partitionsElement);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDirTopic");
}
this._unknownTaggedFields = null;
this.name = struct.getString("name");
{
Object[] _nestedObjects = struct.getArray("partitions");
this.partitions = new ArrayList<Integer>(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.partitions.add((Integer) nestedObject);
}
}
}
@Override
public Struct toStruct(short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDirTopic");
}
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("name", this.name);
{
Integer[] _nestedObjects = new Integer[partitions.size()];
int i = 0;
for (Integer element : this.partitions) {
_nestedObjects[i++] = element;
}
struct.set("partitions", (Object[]) _nestedObjects);
}
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of AlterReplicaLogDirTopic");
}
{
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'name' field is too long to be serialized");
}
_cache.cacheSerializedValue(name, _stringBytes);
_size += _stringBytes.length + 2;
}
{
int _arraySize = 0;
_arraySize += 4;
_arraySize += partitions.size() * 4;
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AlterReplicaLogDirTopic)) return false;
AlterReplicaLogDirTopic other = (AlterReplicaLogDirTopic) obj;
if (this.name == null) {
if (other.name != null) return false;
} else {
if (!this.name.equals(other.name)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AlterReplicaLogDirTopic("
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
+ ")";
}
public String name() {
return this.name;
}
public List<Integer> partitions() {
return this.partitions;
}
@Override
public int next() {
return this.next;
}
@Override
public int prev() {
return this.prev;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AlterReplicaLogDirTopic setName(String v) {
this.name = v;
return this;
}
public AlterReplicaLogDirTopic setPartitions(List<Integer> v) {
this.partitions = v;
return this;
}
@Override
public void setNext(int v) {
this.next = v;
}
@Override
public void setPrev(int v) {
this.prev = v;
}
}
public static class AlterReplicaLogDirTopicCollection extends ImplicitLinkedHashMultiCollection<AlterReplicaLogDirTopic> {
public AlterReplicaLogDirTopicCollection() {
super();
}
public AlterReplicaLogDirTopicCollection(int expectedNumElements) {
super(expectedNumElements);
}
public AlterReplicaLogDirTopicCollection(Iterator<AlterReplicaLogDirTopic> iterator) {
super(iterator);
}
public AlterReplicaLogDirTopic find(String name) {
AlterReplicaLogDirTopic _key = new AlterReplicaLogDirTopic();
_key.setName(name);
return find(_key);
}
public List<AlterReplicaLogDirTopic> findAll(String name) {
AlterReplicaLogDirTopic _key = new AlterReplicaLogDirTopic();
_key.setName(name);
return findAll(_key);
}
}
public static class AlterReplicaLogDirCollection extends ImplicitLinkedHashMultiCollection<AlterReplicaLogDir> {
public AlterReplicaLogDirCollection() {
super();
}
public AlterReplicaLogDirCollection(int expectedNumElements) {
super(expectedNumElements);
}
public AlterReplicaLogDirCollection(Iterator<AlterReplicaLogDir> iterator) {
super(iterator);
}
public AlterReplicaLogDir find(String path) {
AlterReplicaLogDir _key = new AlterReplicaLogDir();
_key.setPath(path);
return find(_key);
}
public List<AlterReplicaLogDir> findAll(String path) {
AlterReplicaLogDir _key = new AlterReplicaLogDir();
_key.setPath(path);
return findAll(_key);
}
}
}

View File

@@ -0,0 +1,625 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.TreeMap;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
public class AlterReplicaLogDirsResponseData implements ApiMessage {
private int throttleTimeMs;
private List<AlterReplicaLogDirTopicResult> results;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
new Field("results", new ArrayOf(AlterReplicaLogDirTopicResult.SCHEMA_0), "The results for each topic.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AlterReplicaLogDirsResponseData(Readable _readable, short _version) {
read(_readable, _version);
}
public AlterReplicaLogDirsResponseData(Struct struct, short _version) {
fromStruct(struct, _version);
}
public AlterReplicaLogDirsResponseData() {
this.throttleTimeMs = 0;
this.results = new ArrayList<AlterReplicaLogDirTopicResult>();
}
@Override
public short apiKey() {
return 34;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
this.throttleTimeMs = _readable.readInt();
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field results was serialized as null");
} else {
ArrayList<AlterReplicaLogDirTopicResult> newCollection = new ArrayList<AlterReplicaLogDirTopicResult>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new AlterReplicaLogDirTopicResult(_readable, _version));
}
this.results = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(throttleTimeMs);
_writable.writeInt(results.size());
for (AlterReplicaLogDirTopicResult resultsElement : results) {
resultsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
this._unknownTaggedFields = null;
this.throttleTimeMs = struct.getInt("throttle_time_ms");
{
Object[] _nestedObjects = struct.getArray("results");
this.results = new ArrayList<AlterReplicaLogDirTopicResult>(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.results.add(new AlterReplicaLogDirTopicResult((Struct) nestedObject, _version));
}
}
}
@Override
public Struct toStruct(short _version) {
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("throttle_time_ms", this.throttleTimeMs);
{
Struct[] _nestedObjects = new Struct[results.size()];
int i = 0;
for (AlterReplicaLogDirTopicResult element : this.results) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("results", (Object[]) _nestedObjects);
}
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
_size += 4;
{
int _arraySize = 0;
_arraySize += 4;
for (AlterReplicaLogDirTopicResult resultsElement : results) {
_arraySize += resultsElement.size(_cache, _version);
}
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AlterReplicaLogDirsResponseData)) return false;
AlterReplicaLogDirsResponseData other = (AlterReplicaLogDirsResponseData) obj;
if (throttleTimeMs != other.throttleTimeMs) return false;
if (this.results == null) {
if (other.results != null) return false;
} else {
if (!this.results.equals(other.results)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + throttleTimeMs;
hashCode = 31 * hashCode + (results == null ? 0 : results.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AlterReplicaLogDirsResponseData("
+ "throttleTimeMs=" + throttleTimeMs
+ ", results=" + MessageUtil.deepToString(results.iterator())
+ ")";
}
public int throttleTimeMs() {
return this.throttleTimeMs;
}
public List<AlterReplicaLogDirTopicResult> results() {
return this.results;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AlterReplicaLogDirsResponseData setThrottleTimeMs(int v) {
this.throttleTimeMs = v;
return this;
}
public AlterReplicaLogDirsResponseData setResults(List<AlterReplicaLogDirTopicResult> v) {
this.results = v;
return this;
}
static public class AlterReplicaLogDirTopicResult implements Message {
private String topicName;
private List<AlterReplicaLogDirPartitionResult> partitions;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("topic_name", Type.STRING, "The name of the topic."),
new Field("partitions", new ArrayOf(AlterReplicaLogDirPartitionResult.SCHEMA_0), "The results for each partition.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AlterReplicaLogDirTopicResult(Readable _readable, short _version) {
read(_readable, _version);
}
public AlterReplicaLogDirTopicResult(Struct struct, short _version) {
fromStruct(struct, _version);
}
public AlterReplicaLogDirTopicResult() {
this.topicName = "";
this.partitions = new ArrayList<AlterReplicaLogDirPartitionResult>();
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDirTopicResult");
}
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field topicName was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field topicName had invalid length " + length);
} else {
this.topicName = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitions was serialized as null");
} else {
ArrayList<AlterReplicaLogDirPartitionResult> newCollection = new ArrayList<AlterReplicaLogDirPartitionResult>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new AlterReplicaLogDirPartitionResult(_readable, _version));
}
this.partitions = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDirTopicResult");
}
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(topicName);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
_writable.writeInt(partitions.size());
for (AlterReplicaLogDirPartitionResult partitionsElement : partitions) {
partitionsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDirTopicResult");
}
this._unknownTaggedFields = null;
this.topicName = struct.getString("topic_name");
{
Object[] _nestedObjects = struct.getArray("partitions");
this.partitions = new ArrayList<AlterReplicaLogDirPartitionResult>(_nestedObjects.length);
for (Object nestedObject : _nestedObjects) {
this.partitions.add(new AlterReplicaLogDirPartitionResult((Struct) nestedObject, _version));
}
}
}
@Override
public Struct toStruct(short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDirTopicResult");
}
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("topic_name", this.topicName);
{
Struct[] _nestedObjects = new Struct[partitions.size()];
int i = 0;
for (AlterReplicaLogDirPartitionResult element : this.partitions) {
_nestedObjects[i++] = element.toStruct(_version);
}
struct.set("partitions", (Object[]) _nestedObjects);
}
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of AlterReplicaLogDirTopicResult");
}
{
byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'topicName' field is too long to be serialized");
}
_cache.cacheSerializedValue(topicName, _stringBytes);
_size += _stringBytes.length + 2;
}
{
int _arraySize = 0;
_arraySize += 4;
for (AlterReplicaLogDirPartitionResult partitionsElement : partitions) {
_arraySize += partitionsElement.size(_cache, _version);
}
_size += _arraySize;
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AlterReplicaLogDirTopicResult)) return false;
AlterReplicaLogDirTopicResult other = (AlterReplicaLogDirTopicResult) obj;
if (this.topicName == null) {
if (other.topicName != null) return false;
} else {
if (!this.topicName.equals(other.topicName)) return false;
}
if (this.partitions == null) {
if (other.partitions != null) return false;
} else {
if (!this.partitions.equals(other.partitions)) return false;
}
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode());
hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode());
return hashCode;
}
@Override
public String toString() {
return "AlterReplicaLogDirTopicResult("
+ "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'")
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
+ ")";
}
public String topicName() {
return this.topicName;
}
public List<AlterReplicaLogDirPartitionResult> partitions() {
return this.partitions;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AlterReplicaLogDirTopicResult setTopicName(String v) {
this.topicName = v;
return this;
}
public AlterReplicaLogDirTopicResult setPartitions(List<AlterReplicaLogDirPartitionResult> v) {
this.partitions = v;
return this;
}
}
static public class AlterReplicaLogDirPartitionResult implements Message {
private int partitionIndex;
private short errorCode;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("partition_index", Type.INT32, "The partition index."),
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public AlterReplicaLogDirPartitionResult(Readable _readable, short _version) {
read(_readable, _version);
}
public AlterReplicaLogDirPartitionResult(Struct struct, short _version) {
fromStruct(struct, _version);
}
public AlterReplicaLogDirPartitionResult() {
this.partitionIndex = 0;
this.errorCode = (short) 0;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDirPartitionResult");
}
this.partitionIndex = _readable.readInt();
this.errorCode = _readable.readShort();
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDirPartitionResult");
}
int _numTaggedFields = 0;
_writable.writeInt(partitionIndex);
_writable.writeShort(errorCode);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@SuppressWarnings("unchecked")
@Override
public void fromStruct(Struct struct, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDirPartitionResult");
}
this._unknownTaggedFields = null;
this.partitionIndex = struct.getInt("partition_index");
this.errorCode = struct.getShort("error_code");
}
@Override
public Struct toStruct(short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDirPartitionResult");
}
TreeMap<Integer, Object> _taggedFields = null;
Struct struct = new Struct(SCHEMAS[_version]);
struct.set("partition_index", this.partitionIndex);
struct.set("error_code", this.errorCode);
return struct;
}
@Override
public int size(ObjectSerializationCache _cache, short _version) {
int _size = 0, _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of AlterReplicaLogDirPartitionResult");
}
_size += 4;
_size += 2;
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
_size += _field.size();
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
return _size;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof AlterReplicaLogDirPartitionResult)) return false;
AlterReplicaLogDirPartitionResult other = (AlterReplicaLogDirPartitionResult) obj;
if (partitionIndex != other.partitionIndex) return false;
if (errorCode != other.errorCode) return false;
return true;
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + partitionIndex;
hashCode = 31 * hashCode + errorCode;
return hashCode;
}
@Override
public String toString() {
return "AlterReplicaLogDirPartitionResult("
+ "partitionIndex=" + partitionIndex
+ ", errorCode=" + errorCode
+ ")";
}
public int partitionIndex() {
return this.partitionIndex;
}
public short errorCode() {
return this.errorCode;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public AlterReplicaLogDirPartitionResult setPartitionIndex(int v) {
this.partitionIndex = v;
return this;
}
public AlterReplicaLogDirPartitionResult setErrorCode(short v) {
this.errorCode = v;
return this;
}
}
}

Some files were not shown because too many files have changed in this diff Show More