mirror of
https://github.com/didi/KnowStreaming.git
synced 2025-12-24 20:22:12 +08:00
Compare commits
3 Commits
v1.0.0
...
ve_kafka_g
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d9ef728427 | ||
|
|
f528567f5d | ||
|
|
7008677947 |
224
.gitignore
vendored
224
.gitignore
vendored
@@ -1,111 +1,113 @@
|
||||
### Intellij ###
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
||||
|
||||
*.iml
|
||||
|
||||
## Directory-based project format:
|
||||
.idea/
|
||||
# if you remove the above rule, at least ignore the following:
|
||||
|
||||
# User-specific stuff:
|
||||
# .idea/workspace.xml
|
||||
# .idea/tasks.xml
|
||||
# .idea/dictionaries
|
||||
# .idea/shelf
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
.idea/dataSources.ids
|
||||
.idea/dataSources.xml
|
||||
.idea/sqlDataSources.xml
|
||||
.idea/dynamic.xml
|
||||
.idea/uiDesigner.xml
|
||||
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
.idea/mongoSettings.xml
|
||||
|
||||
## File-based project format:
|
||||
*.ipr
|
||||
*.iws
|
||||
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
/out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
|
||||
|
||||
### Java ###
|
||||
*.class
|
||||
|
||||
# Mobile Tools for Java (J2ME)
|
||||
.mtj.tmp/
|
||||
|
||||
# Package Files #
|
||||
*.jar
|
||||
*.war
|
||||
*.ear
|
||||
|
||||
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
|
||||
hs_err_pid*
|
||||
|
||||
|
||||
### OSX ###
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
# Icon must end with two \r
|
||||
Icon
|
||||
|
||||
|
||||
# Thumbnails
|
||||
._*
|
||||
|
||||
# Files that might appear in the root of a volume
|
||||
.DocumentRevisions-V100
|
||||
.fseventsd
|
||||
.Spotlight-V100
|
||||
.TemporaryItems
|
||||
.Trashes
|
||||
.VolumeIcon.icns
|
||||
|
||||
# Directories potentially created on remote AFP share
|
||||
.AppleDB
|
||||
.AppleDesktop
|
||||
Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
|
||||
/target
|
||||
target/
|
||||
*.log
|
||||
*.log.*
|
||||
*.bak
|
||||
*.vscode
|
||||
*/.vscode/*
|
||||
*/.vscode
|
||||
*/velocity.log*
|
||||
*/*.log
|
||||
*/*.log.*
|
||||
web/node_modules/
|
||||
web/node_modules/*
|
||||
workspace.xml
|
||||
/output/*
|
||||
.gitversion
|
||||
*/node_modules/*
|
||||
web/src/main/resources/templates/*
|
||||
*/out/*
|
||||
*/dist/*
|
||||
.DS_Store
|
||||
### Intellij ###
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
||||
|
||||
*.iml
|
||||
|
||||
## Directory-based project format:
|
||||
.idea/
|
||||
.gradle/
|
||||
# if you remove the above rule, at least ignore the following:
|
||||
|
||||
# User-specific stuff:
|
||||
# .idea/workspace.xml
|
||||
# .idea/tasks.xml
|
||||
# .idea/dictionaries
|
||||
# .idea/shelf
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
.idea/dataSources.ids
|
||||
.idea/dataSources.xml
|
||||
.idea/sqlDataSources.xml
|
||||
.idea/dynamic.xml
|
||||
.idea/uiDesigner.xml
|
||||
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
.idea/mongoSettings.xml
|
||||
|
||||
## File-based project format:
|
||||
*.ipr
|
||||
*.iws
|
||||
*.iml
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
/out/
|
||||
build/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
|
||||
|
||||
### Java ###
|
||||
*.class
|
||||
|
||||
# Mobile Tools for Java (J2ME)
|
||||
.mtj.tmp/
|
||||
|
||||
# Package Files #
|
||||
*.jar
|
||||
*.war
|
||||
*.ear
|
||||
|
||||
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
|
||||
hs_err_pid*
|
||||
|
||||
|
||||
### OSX ###
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
# Icon must end with two \r
|
||||
Icon
|
||||
|
||||
|
||||
# Thumbnails
|
||||
._*
|
||||
|
||||
# Files that might appear in the root of a volume
|
||||
.DocumentRevisions-V100
|
||||
.fseventsd
|
||||
.Spotlight-V100
|
||||
.TemporaryItems
|
||||
.Trashes
|
||||
.VolumeIcon.icns
|
||||
|
||||
# Directories potentially created on remote AFP share
|
||||
.AppleDB
|
||||
.AppleDesktop
|
||||
Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
|
||||
/target
|
||||
target/
|
||||
*.log
|
||||
*.log.*
|
||||
*.bak
|
||||
*.vscode
|
||||
*/.vscode/*
|
||||
*/.vscode
|
||||
*/velocity.log*
|
||||
*/*.log
|
||||
*/*.log.*
|
||||
web/node_modules/
|
||||
web/node_modules/*
|
||||
workspace.xml
|
||||
/output/*
|
||||
.gitversion
|
||||
*/node_modules/*
|
||||
*/templates/*
|
||||
*/out/*
|
||||
*/dist/*
|
||||
.DS_Store
|
||||
|
||||
40
APP_META/990-startapp.required.sh
Executable file
40
APP_META/990-startapp.required.sh
Executable file
@@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
|
||||
SERVICE_PATH="/home/xiaoju/${APPNAME}"
|
||||
|
||||
#nginx logs ln
|
||||
if [ ! -L /home/xiaoju/nginx/logs ]; then
|
||||
rm -rf /home/xiaoju/nginx/logs
|
||||
mkdir -p /home/xiaoju/data1/nginx-logs && \
|
||||
ln -s /home/xiaoju/data1/nginx-logs /home/xiaoju/nginx/logs
|
||||
fi
|
||||
|
||||
if [ -f "/home/xiaoju/$APPNAME/.deploy/service.json" ]; then
|
||||
# cp service.json for nginx metric collect.
|
||||
su xiaoju -c "mkdir -p /home/xiaoju/nginx/.deploy && cp /home/xiaoju/$APPNAME/.deploy/service.json /home/xiaoju/nginx/.deploy"
|
||||
fi
|
||||
|
||||
#tomcat logs ln
|
||||
if [ ! -L /home/xiaoju/tomcat/logs ]; then
|
||||
rm -rf /home/xiaoju/tomcat/logs
|
||||
mkdir -p /home/xiaoju/data1/tomcat-logs && \
|
||||
ln -s /home/xiaoju/data1/tomcat-logs /home/xiaoju/tomcat/logs
|
||||
fi
|
||||
|
||||
#application logs ln
|
||||
if [ ! -L /home/xiaoju/${APPNAME}/logs ]; then
|
||||
mkdir -p /home/xiaoju/data1/${APPNAME}-logs && \
|
||||
ln -s /home/xiaoju/data1/${APPNAME}-logs /home/xiaoju/${APPNAME}/logs
|
||||
fi
|
||||
|
||||
if [ ! -L /data1 ]; then
|
||||
ln -s /home/xiaoju/data1 /data1
|
||||
fi
|
||||
|
||||
chown -R xiaoju.xiaoju /home/xiaoju/data1/
|
||||
chown -R xiaoju.xiaoju /data1/
|
||||
|
||||
mkdir -p '/etc/odin-super-agent/'; echo 'consul-client' >> /etc/odin-super-agent/agents.deny; /home/odin/super-agent/data/install/consul-client/current/control stop
|
||||
su xiaoju -c "cd $SERVICE_PATH && bash -x ./control.sh start"
|
||||
|
||||
/usr/bin/monit -c /etc/monitrc
|
||||
8
APP_META/990-stopapp.sh
Executable file
8
APP_META/990-stopapp.sh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
SERVICE_PATH="/home/xiaoju/${APPNAME}"
|
||||
|
||||
/usr/bin/monit stop all
|
||||
|
||||
su xiaoju -c "cd $SERVICE_PATH && ./control.sh stop"
|
||||
|
||||
25
APP_META/Dockerfile
Normal file
25
APP_META/Dockerfile
Normal file
@@ -0,0 +1,25 @@
|
||||
FROM registry.xiaojukeji.com/didionline/bigdatadatabus-didi-jdk11-tomcat-nginx-centos7:stable
|
||||
MAINTAINER zhuyefeng <zhuyefeng@didichuxing.com>
|
||||
|
||||
ENV JAVA_HOME /usr/local/jdk-11.0.2
|
||||
# TODO 设置模块名字
|
||||
ENV APPNAME service-discovery
|
||||
|
||||
RUN mkdir -p /etc/container/prestop
|
||||
ADD ./APP_META/nginx/conf/nginx.conf /home/xiaoju/nginx/conf/
|
||||
ADD ./APP_META/monit/monitrc /etc/monitrc
|
||||
#ADD ./APP_META/monit/nginx.cfg /etc/monit.d/
|
||||
ADD ./APP_META/990-startapp.required.sh /etc/container/init/990-startapp.required.sh
|
||||
ADD ./APP_META/990-stopapp.sh /etc/container/prestop/990-stopapp.sh
|
||||
|
||||
RUN mkdir -p /home/xiaoju/${APPNAME} && \
|
||||
# TODO 如果tomcat容器应用需要下面这步
|
||||
#mkdir -p /home/xiaoju/tomcat/webapps && \
|
||||
chmod 0700 /etc/monitrc && \
|
||||
chmod a+x /etc/container/init/990-startapp.required.sh && \
|
||||
chmod a+x /etc/container/prestop/990-stopapp.sh
|
||||
|
||||
COPY ./home-xiaoju-${APPNAME} /home/xiaoju/${APPNAME}
|
||||
|
||||
# TODO 如果tomcat容器应用需要下面这步
|
||||
#RUN ln -s /home/xiaoju/${APPNAME} /home/xiaoju/tomcat/webapps/
|
||||
13
APP_META/monit/monitrc
Executable file
13
APP_META/monit/monitrc
Executable file
@@ -0,0 +1,13 @@
|
||||
set daemon 10 # check services at 10 seconds intervals
|
||||
set log syslog
|
||||
|
||||
set httpd port 2812 and
|
||||
use address localhost # only accept connection from localhost
|
||||
allow localhost # allow localhost to connect to the server and
|
||||
allow admin:monit # require user 'admin' with password 'monit'
|
||||
#with ssl { # enable SSL/TLS and set path to server certificate
|
||||
# pemfile: /etc/ssl/certs/monit.pem
|
||||
#}
|
||||
|
||||
include /etc/monit.d/*
|
||||
|
||||
127
APP_META/nginx/conf/nginx.conf
Normal file
127
APP_META/nginx/conf/nginx.conf
Normal file
@@ -0,0 +1,127 @@
|
||||
#user xiaoju xiaoju;
|
||||
|
||||
worker_rlimit_nofile 204800;
|
||||
worker_processes 4;
|
||||
error_log /home/xiaoju/nginx/logs/error.log;
|
||||
pid /home/xiaoju/nginx/run/nginx.pid;
|
||||
|
||||
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
|
||||
include /home/xiaoju/nginx/modules/*.conf;
|
||||
|
||||
|
||||
events {
|
||||
use epoll;
|
||||
worker_connections 204800;
|
||||
|
||||
accept_mutex on;
|
||||
accept_mutex_delay 5ms;
|
||||
multi_accept on;
|
||||
}
|
||||
|
||||
|
||||
http {
|
||||
include mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
server_names_hash_bucket_size 128;
|
||||
#server_tag off;
|
||||
#server_info off;
|
||||
server_tokens off;
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
|
||||
fastcgi_connect_timeout 5;
|
||||
fastcgi_send_timeout 10;
|
||||
fastcgi_read_timeout 10;
|
||||
fastcgi_buffer_size 64k;
|
||||
fastcgi_buffers 4 64k;
|
||||
fastcgi_busy_buffers_size 128k;
|
||||
fastcgi_temp_file_write_size 128k;
|
||||
|
||||
keepalive_timeout 60;
|
||||
keepalive_requests 1024;
|
||||
client_header_buffer_size 4k;
|
||||
large_client_header_buffers 4 32k;
|
||||
client_max_body_size 10m;
|
||||
|
||||
client_body_buffer_size 512k;
|
||||
client_body_timeout 30;
|
||||
client_header_timeout 10;
|
||||
send_timeout 240;
|
||||
|
||||
proxy_connect_timeout 10s;
|
||||
proxy_send_timeout 15s;
|
||||
proxy_read_timeout 15s;
|
||||
proxy_buffers 64 8k;
|
||||
proxy_busy_buffers_size 128k;
|
||||
proxy_temp_file_write_size 64k;
|
||||
proxy_redirect off;
|
||||
#proxy_upstream_tries 2;
|
||||
proxy_next_upstream error invalid_header timeout http_502 http_504;
|
||||
|
||||
gzip on;
|
||||
gzip_min_length 1k;
|
||||
gzip_buffers 4 16k;
|
||||
gzip_http_version 1.0;
|
||||
gzip_comp_level 2;
|
||||
gzip_types text/plain application/x-javascript text/css text/xml application/xml+css application/json text/javascript;
|
||||
gzip_vary on;
|
||||
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Real-Port $remote_port;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_pass_header Server;
|
||||
|
||||
#operationid on;
|
||||
#operationid_header didi-header-rid;
|
||||
#operationid_eth eth0;
|
||||
#proxy_set_header didi-header-rid $operationid;
|
||||
|
||||
log_format main '$server_addr\t$host\t'
|
||||
'$remote_addr\t$http_x_forwarded_for\t'
|
||||
'$time_local\t'
|
||||
'$scheme\t$request\t'
|
||||
'$status\t$upstream_status\t'
|
||||
'$request_time\t$upstream_addr\t$upstream_response_time\t'
|
||||
'$request_length\t$bytes_sent\t'
|
||||
'$http_referer\t$http_cookie\t$http_user_agent\t'
|
||||
'$limit_rate\t$http_didi_header_omgid\t$remote_port';
|
||||
|
||||
|
||||
set_real_ip_from 10.0.0.0/8;
|
||||
set_real_ip_from 100.64.0.0/10;
|
||||
real_ip_header X-Real-IP;
|
||||
|
||||
server {
|
||||
listen 8080 backlog=4096;
|
||||
server_name localhost;
|
||||
access_log logs/access.log main;
|
||||
|
||||
location = /status.do {
|
||||
access_log off;
|
||||
root /home/xiaoju/nginx/html;
|
||||
}
|
||||
|
||||
location / {
|
||||
root html;
|
||||
index index.html index.htm;
|
||||
if ( $args !~ '^\?' ){
|
||||
proxy_pass http://127.0.0.1:8888;
|
||||
}
|
||||
}
|
||||
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
root html;
|
||||
}
|
||||
}
|
||||
|
||||
include conf.d/*.conf;
|
||||
# include servers/*.conf;
|
||||
# include server_conf/*.conf;
|
||||
# include upstream_conf/*.conf;
|
||||
}
|
||||
@@ -1,28 +1,11 @@
|
||||
# Contribution Guideline
|
||||
## Contributing to Kafka
|
||||
|
||||
Thanks for considering to contribute this project. All issues and pull requests are highly appreciated.
|
||||
*Before opening a pull request*, review the [Contributing](https://kafka.apache.org/contributing.html) and [Contributing Code Changes](https://cwiki.apache.org/confluence/display/KAFKA/Contributing+Code+Changes) pages.
|
||||
|
||||
## Pull Requests
|
||||
It lists steps that are required before creating a PR.
|
||||
|
||||
Before sending pull request to this project, please read and follow guidelines below.
|
||||
|
||||
1. Branch: We only accept pull request on `dev` branch.
|
||||
2. Coding style: Follow the coding style used in kafka-manager.
|
||||
3. Commit message: Use English and be aware of your spell.
|
||||
4. Test: Make sure to test your code.
|
||||
|
||||
Add device mode, API version, related log, screenshots and other related information in your pull request if possible.
|
||||
|
||||
NOTE: We assume all your contribution can be licensed under the [Apache License 2.0](LICENSE).
|
||||
|
||||
## Issues
|
||||
|
||||
We love clearly described issues. :)
|
||||
|
||||
Following information can help us to resolve the issue faster.
|
||||
|
||||
* Device mode and hardware information.
|
||||
* API version.
|
||||
* Logs.
|
||||
* Screenshots.
|
||||
* Steps to reproduce the issue.
|
||||
When you contribute code, you affirm that the contribution is your original work and that you
|
||||
license the work to the project under the project's open source license. Whether or not you
|
||||
state this explicitly, by submitting any copyrighted material via pull request, email, or
|
||||
other means you agree to license the material under the project's open source license and
|
||||
warrant that you have the legal authority to do so.
|
||||
|
||||
14
HEADER
Normal file
14
HEADER
Normal file
@@ -0,0 +1,14 @@
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
829
LICENSE
829
LICENSE
@@ -1,433 +1,396 @@
|
||||
Apache License
|
||||
|
||||
Version 2.0, January 2004
|
||||
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
|
||||
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
|
||||
|
||||
|
||||
1. Definitions.
|
||||
|
||||
|
||||
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
|
||||
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
|
||||
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
|
||||
other entities that control, are controlled by, or are under common
|
||||
|
||||
control with that entity. For the purposes of this definition,
|
||||
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
|
||||
direction or management of such entity, whether by contract or
|
||||
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
|
||||
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
|
||||
exercising permissions granted by this License.
|
||||
|
||||
|
||||
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
|
||||
including but not limited to software source code, documentation
|
||||
|
||||
source, and configuration files.
|
||||
|
||||
|
||||
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
|
||||
transformation or translation of a Source form, including but
|
||||
|
||||
not limited to compiled object code, generated documentation,
|
||||
|
||||
and conversions to other media types.
|
||||
|
||||
|
||||
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
|
||||
Object form, made available under the License, as indicated by a
|
||||
|
||||
copyright notice that is included in or attached to the work
|
||||
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
|
||||
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
|
||||
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
|
||||
the original version of the Work and any modifications or additions
|
||||
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
|
||||
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
|
||||
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
|
||||
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
|
||||
where such license applies only to those patent claims licensable
|
||||
|
||||
by such Contributor that are necessarily infringed by their
|
||||
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
|
||||
institute patent litigation against any entity (including a
|
||||
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
|
||||
or contributory patent infringement, then any patent licenses
|
||||
|
||||
granted to You under this License for that Work shall terminate
|
||||
|
||||
as of the date such litigation is filed.
|
||||
|
||||
|
||||
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
|
||||
modifications, and in Source or Object form, provided that You
|
||||
|
||||
meet the following conditions:
|
||||
|
||||
|
||||
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
|
||||
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
|
||||
stating that You changed the files; and
|
||||
|
||||
|
||||
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
|
||||
attribution notices from the Source form of the Work,
|
||||
|
||||
excluding those notices that do not pertain to any part of
|
||||
|
||||
the Derivative Works; and
|
||||
|
||||
|
||||
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
|
||||
include a readable copy of the attribution notices contained
|
||||
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
|
||||
of the following places: within a NOTICE text file distributed
|
||||
|
||||
as part of the Derivative Works; within the Source form or
|
||||
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
|
||||
within a display generated by the Derivative Works, if and
|
||||
|
||||
wherever such third-party notices normally appear. The contents
|
||||
|
||||
of the NOTICE file are for informational purposes only and
|
||||
|
||||
do not modify the License. You may add Your own attribution
|
||||
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
|
||||
that such additional attribution notices cannot be construed
|
||||
|
||||
as modifying the License.
|
||||
|
||||
|
||||
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
|
||||
may provide additional or different license terms and conditions
|
||||
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
|
||||
the conditions stated in this License.
|
||||
|
||||
|
||||
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
|
||||
this License, without any additional terms or conditions.
|
||||
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
|
||||
the terms of any separate license agreement you may have executed
|
||||
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
|
||||
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
|
||||
except as required for reasonable and customary use in describing the
|
||||
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
|
||||
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
|
||||
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
|
||||
incidental, or consequential damages of any character arising as a
|
||||
|
||||
result of this License or out of the use or inability to use the
|
||||
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
|
||||
other commercial damages or losses), even if such Contributor
|
||||
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
|
||||
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
|
||||
or other liability obligations and/or rights consistent with this
|
||||
|
||||
License. However, in accepting such obligations, You may act only
|
||||
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
|
||||
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
|
||||
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
|
||||
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
|
||||
replaced with your own identifying information. (Don't include
|
||||
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
|
||||
comment syntax for the file format. We also recommend that a
|
||||
|
||||
file or class name and description of purpose be included on the
|
||||
|
||||
same "printed page" as the copyright notice for easier
|
||||
|
||||
identification within third-party archives.
|
||||
|
||||
|
||||
|
||||
|
||||
Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd. All rights reserved.
|
||||
|
||||
|
||||
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
You may obtain a copy of the License at
|
||||
|
||||
|
||||
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
|
||||
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
||||
See the License for the specific language governing permissions and
|
||||
|
||||
limitations under the License.
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
------------------------------------------------------------------------------------
|
||||
This distribution has a binary dependency on jersey, which is available under the CDDL
|
||||
License as described below.
|
||||
|
||||
COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL - Version 1.1)
|
||||
1. Definitions.
|
||||
1.1. “Contributor” means each individual or entity that creates or contributes to the creation of Modifications.
|
||||
|
||||
1.2. “Contributor Version” means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor.
|
||||
|
||||
1.3. “Covered Software” means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof.
|
||||
|
||||
1.4. “Executable” means the Covered Software in any form other than Source Code.
|
||||
|
||||
1.5. “Initial Developer” means the individual or entity that first makes Original Software available under this License.
|
||||
|
||||
1.6. “Larger Work” means a work which combines Covered Software or portions thereof with code not governed by the terms of this License.
|
||||
|
||||
1.7. “License” means this document.
|
||||
|
||||
1.8. “Licensable” means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein.
|
||||
|
||||
1.9. “Modifications” means the Source Code and Executable form of any of the following:
|
||||
|
||||
A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications;
|
||||
|
||||
B. Any new file that contains any part of the Original Software or previous Modification; or
|
||||
|
||||
C. Any new file that is contributed or otherwise made available under the terms of this License.
|
||||
|
||||
1.10. “Original Software” means the Source Code and Executable form of computer software code that is originally released under this License.
|
||||
|
||||
1.11. “Patent Claims” means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor.
|
||||
|
||||
1.12. “Source Code” means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code.
|
||||
|
||||
1.13. “You” (or “Your”) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, “You” includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
2. License Grants.
|
||||
2.1. The Initial Developer Grant.
|
||||
|
||||
Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license:
|
||||
|
||||
(a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and
|
||||
|
||||
(b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof).
|
||||
|
||||
(c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License.
|
||||
|
||||
(d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices.
|
||||
|
||||
2.2. Contributor Grant.
|
||||
|
||||
Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
|
||||
|
||||
(a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and
|
||||
|
||||
(b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination).
|
||||
|
||||
(c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party.
|
||||
|
||||
(d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor.
|
||||
|
||||
3. Distribution Obligations.
|
||||
3.1. Availability of Source Code.
|
||||
|
||||
Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange.
|
||||
|
||||
3.2. Modifications.
|
||||
|
||||
The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License.
|
||||
|
||||
3.3. Required Notices.
|
||||
|
||||
You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer.
|
||||
|
||||
3.4. Application of Additional Terms.
|
||||
|
||||
You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients’ rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer.
|
||||
|
||||
3.5. Distribution of Executable Versions.
|
||||
|
||||
You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient’s rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer.
|
||||
|
||||
3.6. Larger Works.
|
||||
|
||||
You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software.
|
||||
|
||||
4. Versions of the License.
|
||||
4.1. New Versions.
|
||||
|
||||
Oracle is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License.
|
||||
|
||||
4.2. Effect of New Versions.
|
||||
|
||||
You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward.
|
||||
|
||||
4.3. Modified Versions.
|
||||
|
||||
When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License.
|
||||
|
||||
5. DISCLAIMER OF WARRANTY.
|
||||
COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN “AS IS” BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
|
||||
|
||||
6. TERMINATION.
|
||||
6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive.
|
||||
|
||||
6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as “Participant”) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant.
|
||||
|
||||
6.3. If You assert a patent infringement claim against Participant alleging that the Participant Software directly or indirectly infringes any patent where such claim is resolved (such as by license or settlement) prior to the initiation of patent infringement litigation, then the reasonable value of the licenses granted by such Participant under Sections 2.1 or 2.2 shall be taken into account in determining the amount or value of any payment or license.
|
||||
|
||||
6.4. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination.
|
||||
|
||||
7. LIMITATION OF LIABILITY.
|
||||
UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY’S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
|
||||
|
||||
8. U.S. GOVERNMENT END USERS.
|
||||
The Covered Software is a “commercial item,” as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of “commercial computer software” (as that term is defined at 48 C.F.R. § 252.227-7014(a)(1)) and “commercial computer software documentation” as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License.
|
||||
|
||||
9. MISCELLANEOUS.
|
||||
This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction’s conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys’ fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software.
|
||||
|
||||
10. RESPONSIBILITY FOR CLAIMS.
|
||||
As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability.
|
||||
|
||||
NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL)
|
||||
|
||||
The code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California.
|
||||
|
||||
------------------------------------------------------------------------------------
|
||||
This distribution has a binary dependency on zstd, which is available under the BSD 3-Clause License as described below.
|
||||
|
||||
BSD License
|
||||
|
||||
For Zstandard software
|
||||
|
||||
Copyright (c) 2016-present, Facebook, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name Facebook nor the names of its contributors may be used to
|
||||
endorse or promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
------------------------------------------------------------------------------------
|
||||
This distribution has a binary dependency on zstd-jni, which is available under the BSD 2-Clause License
|
||||
as described below.
|
||||
|
||||
Zstd-jni: JNI bindings to Zstd Library
|
||||
|
||||
Copyright (c) 2015-2016, Luben Karavelov/ All rights reserved.
|
||||
|
||||
BSD License
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
8
NOTICE
Normal file
8
NOTICE
Normal file
@@ -0,0 +1,8 @@
|
||||
Apache Kafka
|
||||
Copyright 2020 The Apache Software Foundation.
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (https://www.apache.org/).
|
||||
|
||||
This distribution has a binary dependency on jersey, which is available under the CDDL
|
||||
License. The source code of jersey can be found at https://github.com/jersey/jersey/.
|
||||
14
PULL_REQUEST_TEMPLATE.md
Normal file
14
PULL_REQUEST_TEMPLATE.md
Normal file
@@ -0,0 +1,14 @@
|
||||
*More detailed description of your change,
|
||||
if necessary. The PR title and PR message become
|
||||
the squashed commit message, so use a separate
|
||||
comment to ping reviewers.*
|
||||
|
||||
*Summary of testing strategy (including rationale)
|
||||
for the feature or bug fix. Unit and/or integration
|
||||
tests are expected for any behaviour change and
|
||||
system tests should be considered for larger changes.*
|
||||
|
||||
### Committer Checklist (excluded from commit message)
|
||||
- [ ] Verify design and implementation
|
||||
- [ ] Verify test coverage and CI build status
|
||||
- [ ] Verify documentation (including upgrade notes)
|
||||
321
README.md
321
README.md
@@ -1,115 +1,220 @@
|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||
|
||||
---
|
||||
|
||||
## 主要功能特性
|
||||
|
||||
|
||||
### 集群监控维度
|
||||
|
||||
- 多版本集群管控,支持从`0.10.2`到`2.4`版本;
|
||||
- 集群Topic、Broker等多维度历史与实时关键指标查看;
|
||||
|
||||
|
||||
### 集群管控维度
|
||||
|
||||
- 集群运维,包括逻辑Region方式管理集群;
|
||||
- Broker运维,包括优先副本选举;
|
||||
- Topic运维,包括创建、查询、扩容、修改属性、数据采样及迁移等;
|
||||
- 消费组运维,包括指定时间或指定偏移两种方式进行重置消费偏移;
|
||||
|
||||
|
||||
### 用户使用维度
|
||||
|
||||
- 管理员用户与普通用户视角区分;
|
||||
- 管理员用户与普通用户权限区分;
|
||||
Apache Kafka
|
||||
=================
|
||||
See our [web site](https://kafka.apache.org) for details on the project.
|
||||
|
||||
---
|
||||
|
||||
## kafka-manager架构图
|
||||
|
||||

|
||||
|
||||
You need to have [Java](http://www.oracle.com/technetwork/java/javase/downloads/index.html) installed.
|
||||
|
||||
---
|
||||
|
||||
## 安装手册
|
||||
|
||||
### 环境依赖
|
||||
|
||||
- `Maven 3.5.0+`(后端打包依赖)
|
||||
- `node v8.12.0+`(前端打包依赖)
|
||||
- `Java 8+`(运行环境需要)
|
||||
- `MySQL`(数据存储)
|
||||
|
||||
---
|
||||
|
||||
### 环境初始化
|
||||
|
||||
执行[create_mysql_table.sql](doc/create_mysql_table.sql)中的SQL命令,从而创建所需的MySQL库及表,默认创建的库名是`kafka_manager`。
|
||||
|
||||
```
|
||||
############# 示例:
|
||||
mysql -uXXXX -pXXX -h XXX.XXX.XXX.XXX -PXXXX < ./create_mysql_table.sql
|
||||
```
|
||||
|
||||
---
|
||||
Java 8 should be used for building in order to support both Java 8 and Java 11 at runtime.
|
||||
|
||||
|
||||
### 打包
|
||||
|
||||
执行`mvn install`命令即可。
|
||||
Scala 2.12 is used by default, see below for how to use a different Scala version or all of the supported Scala versions.
|
||||
|
||||
备注:每一次执行`mvn install`命令,都将在`web/src/main/resources/templates`下面生成最新的前端资源文件,如果`console`模块下的代码没有变更,可以修改`./pom.xml`文件,忽略对`console`模块的打包。
|
||||
|
||||
---
|
||||
|
||||
### 启动
|
||||
|
||||
```
|
||||
############# application.yml 是配置文件
|
||||
cp web/src/main/resources/application.yml web/target/
|
||||
cd web/target/
|
||||
nohup java -jar kafka-manager-web-1.0.0-SNAPSHOT.jar --spring.config.location=./application.yml > /dev/null 2>&1 &
|
||||
```
|
||||
|
||||
### 使用
|
||||
|
||||
本地启动的话,访问`http://localhost:8080`,输入帐号及密码进行登录。更多参考:[kafka-manager使用手册](doc/user_cn_guide.md)
|
||||
|
||||
### Build a jar and run it ###
|
||||
./gradlew jar
|
||||
|
||||
---
|
||||
|
||||
## 相关文档
|
||||
|
||||
- [kafka-manager使用手册](doc/user_cn_guide.md)
|
||||
|
||||
|
||||
## 钉钉交流群
|
||||
Follow instructions in https://kafka.apache.org/documentation.html#quickstart
|
||||
|
||||
搜索群号:`32821440` 或者扫码可入群交流
|
||||
### Build source jar ###
|
||||
./gradlew srcJar
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
## 项目成员
|
||||
|
||||
### 内部核心人员
|
||||
|
||||
`iceyuhui`、`liuyaguang`、`limengmonty`、`zhangliangmike`、`nullhuangyiming`、`zengqiao`、`eilenexuzhe`、`huangjiaweihjw`
|
||||
|
||||
|
||||
### 外部贡献者
|
||||
|
||||
`fangjunyu`、`zhoutaiyang`
|
||||
|
||||
|
||||
## 协议
|
||||
|
||||
`kafka-manager`基于`Apache-2.0`协议进行分发和使用,更多信息参见[协议文件](./LICENSE)
|
||||
### Build aggregated javadoc ###
|
||||
./gradlew aggregatedJavadoc
|
||||
|
||||
### Build javadoc and scaladoc ###
|
||||
./gradlew javadoc
|
||||
./gradlew javadocJar # builds a javadoc jar for each module
|
||||
./gradlew scaladoc
|
||||
./gradlew scaladocJar # builds a scaladoc jar for each module
|
||||
./gradlew docsJar # builds both (if applicable) javadoc and scaladoc jars for each module
|
||||
|
||||
### Run unit/integration tests ###
|
||||
./gradlew test # runs both unit and integration tests
|
||||
./gradlew unitTest
|
||||
./gradlew integrationTest
|
||||
|
||||
### Force re-running tests without code change ###
|
||||
./gradlew cleanTest test
|
||||
./gradlew cleanTest unitTest
|
||||
./gradlew cleanTest integrationTest
|
||||
|
||||
### Running a particular unit/integration test ###
|
||||
./gradlew clients:test --tests RequestResponseTest
|
||||
|
||||
### Running a particular test method within a unit/integration test ###
|
||||
./gradlew core:test --tests kafka.api.ProducerFailureHandlingTest.testCannotSendToInternalTopic
|
||||
./gradlew clients:test --tests org.apache.kafka.clients.MetadataTest.testMetadataUpdateWaitTime
|
||||
|
||||
### Running a particular unit/integration test with log4j output ###
|
||||
Change the log4j setting in either `clients/src/test/resources/log4j.properties` or `core/src/test/resources/log4j.properties`
|
||||
|
||||
./gradlew clients:test --tests RequestResponseTest
|
||||
|
||||
### Generating test coverage reports ###
|
||||
Generate coverage reports for the whole project:
|
||||
|
||||
./gradlew reportCoverage
|
||||
|
||||
Generate coverage for a single module, i.e.:
|
||||
|
||||
./gradlew clients:reportCoverage
|
||||
|
||||
### Building a binary release gzipped tar ball ###
|
||||
./gradlew clean releaseTarGz
|
||||
|
||||
The above command will fail if you haven't set up the signing key. To bypass signing the artifact, you can run:
|
||||
|
||||
./gradlew clean releaseTarGz -x signArchives
|
||||
|
||||
The release file can be found inside `./core/build/distributions/`.
|
||||
|
||||
### Cleaning the build ###
|
||||
./gradlew clean
|
||||
|
||||
### Running a task with one of the Scala versions available (2.12.x or 2.13.x) ###
|
||||
*Note that if building the jars with a version other than 2.12.x, you need to set the `SCALA_VERSION` variable or change it in `bin/kafka-run-class.sh` to run the quick start.*
|
||||
|
||||
You can pass either the major version (eg 2.12) or the full version (eg 2.12.7):
|
||||
|
||||
./gradlew -PscalaVersion=2.12 jar
|
||||
./gradlew -PscalaVersion=2.12 test
|
||||
./gradlew -PscalaVersion=2.12 releaseTarGz
|
||||
|
||||
### Running a task with all the scala versions enabled by default ###
|
||||
|
||||
Append `All` to the task name:
|
||||
|
||||
./gradlew testAll
|
||||
./gradlew jarAll
|
||||
./gradlew releaseTarGzAll
|
||||
|
||||
### Running a task for a specific project ###
|
||||
This is for `core`, `examples` and `clients`
|
||||
|
||||
./gradlew core:jar
|
||||
./gradlew core:test
|
||||
|
||||
### Listing all gradle tasks ###
|
||||
./gradlew tasks
|
||||
|
||||
### Building IDE project ####
|
||||
*Note that this is not strictly necessary (IntelliJ IDEA has good built-in support for Gradle projects, for example).*
|
||||
|
||||
./gradlew eclipse
|
||||
./gradlew idea
|
||||
|
||||
The `eclipse` task has been configured to use `${project_dir}/build_eclipse` as Eclipse's build directory. Eclipse's default
|
||||
build directory (`${project_dir}/bin`) clashes with Kafka's scripts directory and we don't use Gradle's build directory
|
||||
to avoid known issues with this configuration.
|
||||
|
||||
### Publishing the jar for all version of Scala and for all projects to maven ###
|
||||
./gradlew uploadArchivesAll
|
||||
|
||||
Please note for this to work you should create/update `${GRADLE_USER_HOME}/gradle.properties` (typically, `~/.gradle/gradle.properties`) and assign the following variables
|
||||
|
||||
mavenUrl=
|
||||
mavenUsername=
|
||||
mavenPassword=
|
||||
signing.keyId=
|
||||
signing.password=
|
||||
signing.secretKeyRingFile=
|
||||
|
||||
### Publishing the streams quickstart archetype artifact to maven ###
|
||||
For the Streams archetype project, one cannot use gradle to upload to maven; instead the `mvn deploy` command needs to be called at the quickstart folder:
|
||||
|
||||
cd streams/quickstart
|
||||
mvn deploy
|
||||
|
||||
Please note for this to work you should create/update user maven settings (typically, `${USER_HOME}/.m2/settings.xml`) to assign the following variables
|
||||
|
||||
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
|
||||
https://maven.apache.org/xsd/settings-1.0.0.xsd">
|
||||
...
|
||||
<servers>
|
||||
...
|
||||
<server>
|
||||
<id>apache.snapshots.https</id>
|
||||
<username>${maven_username}</username>
|
||||
<password>${maven_password}</password>
|
||||
</server>
|
||||
<server>
|
||||
<id>apache.releases.https</id>
|
||||
<username>${maven_username}</username>
|
||||
<password>${maven_password}</password>
|
||||
</server>
|
||||
...
|
||||
</servers>
|
||||
...
|
||||
|
||||
|
||||
### Installing the jars to the local Maven repository ###
|
||||
./gradlew installAll
|
||||
|
||||
### Building the test jar ###
|
||||
./gradlew testJar
|
||||
|
||||
### Determining how transitive dependencies are added ###
|
||||
./gradlew core:dependencies --configuration runtime
|
||||
|
||||
### Determining if any dependencies could be updated ###
|
||||
./gradlew dependencyUpdates
|
||||
|
||||
### Running code quality checks ###
|
||||
There are two code quality analysis tools that we regularly run, spotbugs and checkstyle.
|
||||
|
||||
#### Checkstyle ####
|
||||
Checkstyle enforces a consistent coding style in Kafka.
|
||||
You can run checkstyle using:
|
||||
|
||||
./gradlew checkstyleMain checkstyleTest
|
||||
|
||||
The checkstyle warnings will be found in `reports/checkstyle/reports/main.html` and `reports/checkstyle/reports/test.html` files in the
|
||||
subproject build directories. They are also printed to the console. The build will fail if Checkstyle fails.
|
||||
|
||||
#### Spotbugs ####
|
||||
Spotbugs uses static analysis to look for bugs in the code.
|
||||
You can run spotbugs using:
|
||||
|
||||
./gradlew spotbugsMain spotbugsTest -x test
|
||||
|
||||
The spotbugs warnings will be found in `reports/spotbugs/main.html` and `reports/spotbugs/test.html` files in the subproject build
|
||||
directories. Use -PxmlSpotBugsReport=true to generate an XML report instead of an HTML one.
|
||||
|
||||
### Common build options ###
|
||||
|
||||
The following options should be set with a `-P` switch, for example `./gradlew -PmaxParallelForks=1 test`.
|
||||
|
||||
* `commitId`: sets the build commit ID as .git/HEAD might not be correct if there are local commits added for build purposes.
|
||||
* `mavenUrl`: sets the URL of the maven deployment repository (`file://path/to/repo` can be used to point to a local repository).
|
||||
* `maxParallelForks`: limits the maximum number of processes for each task.
|
||||
* `showStandardStreams`: shows standard out and standard error of the test JVM(s) on the console.
|
||||
* `skipSigning`: skips signing of artifacts.
|
||||
* `testLoggingEvents`: unit test events to be logged, separated by comma. For example `./gradlew -PtestLoggingEvents=started,passed,skipped,failed test`.
|
||||
* `xmlSpotBugsReport`: enable XML reports for spotBugs. This also disables HTML reports as only one can be enabled at a time.
|
||||
|
||||
### Dependency Analysis ###
|
||||
|
||||
The gradle [dependency debugging documentation](https://docs.gradle.org/current/userguide/viewing_debugging_dependencies.html) mentions using the `dependencies` or `dependencyInsight` tasks to debug dependencies for the root project or individual subprojects.
|
||||
|
||||
Alternatively, use the `allDeps` or `allDepInsight` tasks for recursively iterating through all subprojects:
|
||||
|
||||
./gradlew allDeps
|
||||
|
||||
./gradlew allDepInsight --configuration runtime --dependency com.fasterxml.jackson.core:jackson-databind
|
||||
|
||||
These take the same arguments as the builtin variants.
|
||||
|
||||
### Running system tests ###
|
||||
|
||||
See [tests/README.md](tests/README.md).
|
||||
|
||||
### Running in Vagrant ###
|
||||
|
||||
See [vagrant/README.md](vagrant/README.md).
|
||||
|
||||
### Contribution ###
|
||||
|
||||
Apache Kafka is interested in building the community; we would welcome any thoughts or [patches](https://issues.apache.org/jira/browse/KAFKA). You can reach us [on the Apache mailing lists](http://kafka.apache.org/contact.html).
|
||||
|
||||
To contribute follow the instructions here:
|
||||
* https://kafka.apache.org/contributing.html
|
||||
|
||||
189
TROGDOR.md
Normal file
189
TROGDOR.md
Normal file
@@ -0,0 +1,189 @@
|
||||
Trogdor
|
||||
========================================
|
||||
Trogdor is a test framework for Apache Kafka.
|
||||
|
||||
Trogdor can run benchmarks and other workloads. Trogdor can also inject faults in order to stress test the system.
|
||||
|
||||
Quickstart
|
||||
=========================================================
|
||||
First, we want to start a single-node Kafka cluster with a ZooKeeper and a broker.
|
||||
|
||||
Running ZooKeeper:
|
||||
|
||||
> ./bin/zookeeper-server-start.sh ./config/zookeeper.properties &> /tmp/zookeeper.log &
|
||||
|
||||
Running Kafka:
|
||||
|
||||
> ./bin/kafka-server-start.sh ./config/server.properties &> /tmp/kafka.log &
|
||||
|
||||
Then, we want to run a Trogdor Agent, plus a Trogdor Coordinator.
|
||||
|
||||
To run the Trogdor Agent:
|
||||
|
||||
> ./bin/trogdor.sh agent -c ./config/trogdor.conf -n node0 &> /tmp/trogdor-agent.log &
|
||||
|
||||
To run the Trogdor Coordinator:
|
||||
|
||||
> ./bin/trogdor.sh coordinator -c ./config/trogdor.conf -n node0 &> /tmp/trogdor-coordinator.log &
|
||||
|
||||
Let's confirm that all of the daemons are running:
|
||||
|
||||
> jps
|
||||
116212 Coordinator
|
||||
115188 QuorumPeerMain
|
||||
116571 Jps
|
||||
115420 Kafka
|
||||
115694 Agent
|
||||
|
||||
Now, we can submit a test job to Trogdor.
|
||||
|
||||
> ./bin/trogdor.sh client createTask -t localhost:8889 -i produce0 --spec ./tests/spec/simple_produce_bench.json
|
||||
Sent CreateTaskRequest for task produce0.
|
||||
|
||||
We can run showTask to see what the task's status is:
|
||||
|
||||
> ./bin/trogdor.sh client showTask -t localhost:8889 -i produce0
|
||||
Task bar of type org.apache.kafka.trogdor.workload.ProduceBenchSpec is DONE. FINISHED at 2019-01-09T20:38:22.039-08:00 after 6s
|
||||
|
||||
To see the results, we use showTask with --show-status:
|
||||
|
||||
> ./bin/trogdor.sh client showTask -t localhost:8889 -i produce0 --show-status
|
||||
Task bar of type org.apache.kafka.trogdor.workload.ProduceBenchSpec is DONE. FINISHED at 2019-01-09T20:38:22.039-08:00 after 6s
|
||||
Status: {
|
||||
"totalSent" : 50000,
|
||||
"averageLatencyMs" : 17.83388,
|
||||
"p50LatencyMs" : 12,
|
||||
"p95LatencyMs" : 75,
|
||||
"p99LatencyMs" : 96,
|
||||
"transactionsCommitted" : 0
|
||||
}
|
||||
|
||||
Trogdor Architecture
|
||||
========================================
|
||||
Trogdor has a single coordinator process which manages multiple agent processes. Each agent process is responsible for a single cluster node.
|
||||
|
||||
The Trogdor coordinator manages tasks. A task is anything we might want to do on a cluster, such as running a benchmark, injecting a fault, or running a workload. In order to implement each task, the coordinator creates workers on one or more agent nodes.
|
||||
|
||||
The Trogdor agent process implements the tasks. For example, when running a workload, the agent process is the process which produces and consumes messages.
|
||||
|
||||
Both the coordinator and the agent expose a REST interface that accepts objects serialized via JSON. There is also a command-line program which makes it easy to send messages to either one without manually crafting the JSON message body.
|
||||
|
||||
All Trogdor RPCs are idempotent except the shutdown requests. Sending an idempotent RPC twice in a row has the same effect as sending the RPC once.
|
||||
|
||||
Tasks
|
||||
========================================
|
||||
Tasks are described by specifications containing:
|
||||
|
||||
* A "class" field describing the task type. This contains a full Java class name.
|
||||
* A "startMs" field describing when the task should start. This is given in terms of milliseconds since the UNIX epoch.
|
||||
* A "durationMs" field describing how long the task should last. This is given in terms of milliseconds.
|
||||
* Other fields which are task-specific.
|
||||
|
||||
The task specification is usually written as JSON. For example, this task specification describes a network partition between nodes 1 and 2, and 3:
|
||||
|
||||
{
|
||||
"class": "org.apache.kafka.trogdor.fault.NetworkPartitionFaultSpec",
|
||||
"startMs": 1000,
|
||||
"durationMs": 30000,
|
||||
"partitions": [["node1", "node2"], ["node3"]]
|
||||
}
|
||||
|
||||
This task runs a simple ProduceBench test on a cluster with one producer node, 5 topics, and 10,000 messages per second.
|
||||
The keys are generated sequentially and the configured partitioner (DefaultPartitioner) is used.
|
||||
|
||||
{
|
||||
"class": "org.apache.kafka.trogdor.workload.ProduceBenchSpec",
|
||||
"durationMs": 10000000,
|
||||
"producerNode": "node0",
|
||||
"bootstrapServers": "localhost:9092",
|
||||
"targetMessagesPerSec": 10000,
|
||||
"maxMessages": 50000,
|
||||
"activeTopics": {
|
||||
"foo[1-3]": {
|
||||
"numPartitions": 10,
|
||||
"replicationFactor": 1
|
||||
}
|
||||
},
|
||||
"inactiveTopics": {
|
||||
"foo[4-5]": {
|
||||
"numPartitions": 10,
|
||||
"replicationFactor": 1
|
||||
}
|
||||
},
|
||||
"keyGenerator": {
|
||||
"type": "sequential",
|
||||
"size": 8,
|
||||
"offset": 1
|
||||
},
|
||||
"useConfiguredPartitioner": true
|
||||
}
|
||||
|
||||
Tasks are submitted to the coordinator. Once the coordinator determines that it is time for the task to start, it creates workers on agent processes. The workers run until the task is done.
|
||||
|
||||
Task specifications are immutable; they do not change after the task has been created.
|
||||
|
||||
Tasks can be in several states:
|
||||
* PENDING, when task is waiting to execute,
|
||||
* RUNNING, when the task is running,
|
||||
* STOPPING, when the task is in the process of stopping,
|
||||
* DONE, when the task is done.
|
||||
|
||||
Tasks that are DONE also have an error field which will be set if the task failed.
|
||||
|
||||
Workloads
|
||||
========================================
|
||||
Trogdor can run several workloads. Workloads perform operations on the cluster and measure their performance. Workloads fail when the operations cannot be performed.
|
||||
|
||||
### ProduceBench
|
||||
ProduceBench starts a Kafka producer on a single agent node, producing to several partitions. The workload measures the average produce latency, as well as the median, 95th percentile, and 99th percentile latency.
|
||||
It can be configured to use a transactional producer which can commit transactions based on a set time interval or number of messages.
|
||||
|
||||
### RoundTripWorkload
|
||||
RoundTripWorkload tests both production and consumption. The workload starts a Kafka producer and consumer on a single node. The consumer will read back the messages that were produced by the producer.
|
||||
|
||||
### ConsumeBench
|
||||
ConsumeBench starts one or more Kafka consumers on a single agent node. Depending on the passed in configuration (see ConsumeBenchSpec), the consumers either subscribe to a set of topics (leveraging consumer group functionality and dynamic partition assignment) or manually assign partitions to themselves.
|
||||
The workload measures the average produce latency, as well as the median, 95th percentile, and 99th percentile latency.
|
||||
|
||||
Faults
|
||||
========================================
|
||||
Trogdor can run several faults which deliberately break something in the cluster.
|
||||
|
||||
### ProcessStopFault
|
||||
ProcessStopFault stops a process by sending it a SIGSTOP signal. When the fault ends, the process is resumed with SIGCONT.
|
||||
|
||||
### NetworkPartitionFault
|
||||
NetworkPartitionFault sets up an artificial network partition between one or more sets of nodes. Currently, this is implemented using iptables. The iptables rules are set up on the outbound traffic from the affected nodes. Therefore, the affected nodes should still be reachable from outside the cluster.
|
||||
|
||||
External Processes
|
||||
========================================
|
||||
Trogdor supports running arbitrary commands in external processes. This is a generic way to run any configurable command in the Trogdor framework - be it a Python program, bash script, docker image, etc.
|
||||
|
||||
### ExternalCommandWorker
|
||||
ExternalCommandWorker starts an external command defined by the ExternalCommandSpec. It essentially allows you to run any command on any Trogdor agent node.
|
||||
The worker communicates with the external process via its stdin, stdout and stderr in a JSON protocol. It uses stdout for any actionable communication and only logs what it sees in stderr.
|
||||
On startup the worker will first send a message describing the workload to the external process in this format:
|
||||
```
|
||||
{"id":<task ID string>, "workload":<configured workload JSON object>}
|
||||
```
|
||||
and will then listen for messages from the external process, again in a JSON format.
|
||||
Said JSON can contain the following fields:
|
||||
- status: If the object contains this field, the status of the worker will be set to the given value.
|
||||
- error: If the object contains this field, the error of the worker will be set to the given value. Once an error occurs, the external process will be terminated.
|
||||
- log: If the object contains this field, a log message will be issued with this text.
|
||||
An example:
|
||||
```json
|
||||
{"log": "Finished successfully.", "status": {"p99ProduceLatency": "100ms", "messagesSent": 10000}}
|
||||
```
|
||||
|
||||
Exec Mode
|
||||
========================================
|
||||
Sometimes, you just want to run a test quickly on a single node. In this case, you can use "exec mode." This mode allows you to run a single Trogdor Agent without a Coordinator.
|
||||
|
||||
When using exec mode, you must pass in a Task specification to use. The Agent will try to start this task.
|
||||
|
||||
For example:
|
||||
|
||||
> ./bin/trogdor.sh agent -n node0 -c ./config/trogdor.conf --exec ./tests/spec/simple_produce_bench.json
|
||||
|
||||
When using exec mode, the Agent will exit once the task is complete.
|
||||
199
Vagrantfile
vendored
Normal file
199
Vagrantfile
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
require 'socket'
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
# General config
|
||||
enable_dns = false
|
||||
# Override to false when bringing up a cluster on AWS
|
||||
enable_hostmanager = true
|
||||
enable_jmx = false
|
||||
num_zookeepers = 1
|
||||
num_brokers = 3
|
||||
num_workers = 0 # Generic workers that get the code, but don't start any services
|
||||
ram_megabytes = 1280
|
||||
base_box = "ubuntu/trusty64"
|
||||
|
||||
# EC2
|
||||
ec2_access_key = ENV['AWS_ACCESS_KEY']
|
||||
ec2_secret_key = ENV['AWS_SECRET_KEY']
|
||||
ec2_session_token = ENV['AWS_SESSION_TOKEN']
|
||||
ec2_keypair_name = nil
|
||||
ec2_keypair_file = nil
|
||||
|
||||
ec2_region = "us-east-1"
|
||||
ec2_az = nil # Uses set by AWS
|
||||
ec2_ami = "ami-29ebb519"
|
||||
ec2_instance_type = "m3.medium"
|
||||
ec2_spot_instance = ENV['SPOT_INSTANCE'] ? ENV['SPOT_INSTANCE'] == 'true' : true
|
||||
ec2_spot_max_price = "0.113" # On-demand price for instance type
|
||||
ec2_user = "ubuntu"
|
||||
ec2_instance_name_prefix = "kafka-vagrant"
|
||||
ec2_security_groups = nil
|
||||
ec2_subnet_id = nil
|
||||
# Only override this by setting it to false if you're running in a VPC and you
|
||||
# are running Vagrant from within that VPC as well.
|
||||
ec2_associate_public_ip = nil
|
||||
|
||||
jdk_major = '8'
|
||||
jdk_full = '8u202-linux-x64'
|
||||
|
||||
local_config_file = File.join(File.dirname(__FILE__), "Vagrantfile.local")
|
||||
if File.exists?(local_config_file) then
|
||||
eval(File.read(local_config_file), binding, "Vagrantfile.local")
|
||||
end
|
||||
|
||||
# TODO(ksweeney): RAM requirements are not empirical and can probably be significantly lowered.
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.hostmanager.enabled = enable_hostmanager
|
||||
config.hostmanager.manage_host = enable_dns
|
||||
config.hostmanager.include_offline = false
|
||||
|
||||
## Provider-specific global configs
|
||||
config.vm.provider :virtualbox do |vb,override|
|
||||
override.vm.box = base_box
|
||||
|
||||
override.hostmanager.ignore_private_ip = false
|
||||
|
||||
# Brokers started with the standard script currently set Xms and Xmx to 1G,
|
||||
# plus we need some extra head room.
|
||||
vb.customize ["modifyvm", :id, "--memory", ram_megabytes.to_s]
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-cachier")
|
||||
override.cache.scope = :box
|
||||
end
|
||||
end
|
||||
|
||||
config.vm.provider :aws do |aws,override|
|
||||
# The "box" is specified as an AMI
|
||||
override.vm.box = "dummy"
|
||||
override.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
|
||||
|
||||
cached_addresses = {}
|
||||
# Use a custom resolver that SSH's into the machine and finds the IP address
|
||||
# directly. This lets us get at the private IP address directly, avoiding
|
||||
# some issues with using the default IP resolver, which uses the public IP
|
||||
# address.
|
||||
override.hostmanager.ip_resolver = proc do |vm, resolving_vm|
|
||||
if !cached_addresses.has_key?(vm.name)
|
||||
state_id = vm.state.id
|
||||
if state_id != :not_created && state_id != :stopped && vm.communicate.ready?
|
||||
contents = ''
|
||||
vm.communicate.execute("/sbin/ifconfig eth0 | grep 'inet addr' | tail -n 1 | egrep -o '[0-9\.]+' | head -n 1 2>&1") do |type, data|
|
||||
contents << data
|
||||
end
|
||||
cached_addresses[vm.name] = contents.split("\n").first[/(\d+\.\d+\.\d+\.\d+)/, 1]
|
||||
else
|
||||
cached_addresses[vm.name] = nil
|
||||
end
|
||||
end
|
||||
cached_addresses[vm.name]
|
||||
end
|
||||
|
||||
override.ssh.username = ec2_user
|
||||
override.ssh.private_key_path = ec2_keypair_file
|
||||
|
||||
aws.access_key_id = ec2_access_key
|
||||
aws.secret_access_key = ec2_secret_key
|
||||
aws.session_token = ec2_session_token
|
||||
aws.keypair_name = ec2_keypair_name
|
||||
|
||||
aws.region = ec2_region
|
||||
aws.availability_zone = ec2_az
|
||||
aws.instance_type = ec2_instance_type
|
||||
aws.ami = ec2_ami
|
||||
aws.security_groups = ec2_security_groups
|
||||
aws.subnet_id = ec2_subnet_id
|
||||
# If a subnet is specified, default to turning on a public IP unless the
|
||||
# user explicitly specifies the option. Without a public IP, Vagrant won't
|
||||
# be able to SSH into the hosts unless Vagrant is also running in the VPC.
|
||||
if ec2_associate_public_ip.nil?
|
||||
aws.associate_public_ip = true unless ec2_subnet_id.nil?
|
||||
else
|
||||
aws.associate_public_ip = ec2_associate_public_ip
|
||||
end
|
||||
aws.region_config ec2_region do |region|
|
||||
region.spot_instance = ec2_spot_instance
|
||||
region.spot_max_price = ec2_spot_max_price
|
||||
end
|
||||
|
||||
# Exclude some directories that can grow very large from syncing
|
||||
override.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__exclude: ['.git', 'core/data/', 'logs/', 'tests/results/', 'results/']
|
||||
end
|
||||
|
||||
def name_node(node, name, ec2_instance_name_prefix)
|
||||
node.vm.hostname = name
|
||||
node.vm.provider :aws do |aws|
|
||||
aws.tags = {
|
||||
'Name' => ec2_instance_name_prefix + "-" + Socket.gethostname + "-" + name,
|
||||
'JenkinsBuildUrl' => ENV['BUILD_URL']
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
def assign_local_ip(node, ip_address)
|
||||
node.vm.provider :virtualbox do |vb,override|
|
||||
override.vm.network :private_network, ip: ip_address
|
||||
end
|
||||
end
|
||||
|
||||
## Cluster definition
|
||||
zookeepers = []
|
||||
(1..num_zookeepers).each { |i|
|
||||
name = "zk" + i.to_s
|
||||
zookeepers.push(name)
|
||||
config.vm.define name do |zookeeper|
|
||||
name_node(zookeeper, name, ec2_instance_name_prefix)
|
||||
ip_address = "192.168.50." + (10 + i).to_s
|
||||
assign_local_ip(zookeeper, ip_address)
|
||||
zookeeper.vm.provision "shell", path: "vagrant/base.sh", env: {"JDK_MAJOR" => jdk_major, "JDK_FULL" => jdk_full}
|
||||
zk_jmx_port = enable_jmx ? (8000 + i).to_s : ""
|
||||
zookeeper.vm.provision "shell", path: "vagrant/zk.sh", :args => [i.to_s, num_zookeepers, zk_jmx_port]
|
||||
end
|
||||
}
|
||||
|
||||
(1..num_brokers).each { |i|
|
||||
name = "broker" + i.to_s
|
||||
config.vm.define name do |broker|
|
||||
name_node(broker, name, ec2_instance_name_prefix)
|
||||
ip_address = "192.168.50." + (50 + i).to_s
|
||||
assign_local_ip(broker, ip_address)
|
||||
# We need to be careful about what we list as the publicly routable
|
||||
# address since this is registered in ZK and handed out to clients. If
|
||||
# host DNS isn't setup, we shouldn't use hostnames -- IP addresses must be
|
||||
# used to support clients running on the host.
|
||||
zookeeper_connect = zookeepers.map{ |zk_addr| zk_addr + ":2181"}.join(",")
|
||||
broker.vm.provision "shell", path: "vagrant/base.sh", env: {"JDK_MAJOR" => jdk_major, "JDK_FULL" => jdk_full}
|
||||
kafka_jmx_port = enable_jmx ? (9000 + i).to_s : ""
|
||||
broker.vm.provision "shell", path: "vagrant/broker.sh", :args => [i.to_s, enable_dns ? name : ip_address, zookeeper_connect, kafka_jmx_port]
|
||||
end
|
||||
}
|
||||
|
||||
(1..num_workers).each { |i|
|
||||
name = "worker" + i.to_s
|
||||
config.vm.define name do |worker|
|
||||
name_node(worker, name, ec2_instance_name_prefix)
|
||||
ip_address = "192.168.50." + (100 + i).to_s
|
||||
assign_local_ip(worker, ip_address)
|
||||
worker.vm.provision "shell", path: "vagrant/base.sh", env: {"JDK_MAJOR" => jdk_major, "JDK_FULL" => jdk_full}
|
||||
end
|
||||
}
|
||||
|
||||
end
|
||||
45
bin/connect-distributed.sh
Executable file
45
bin/connect-distributed.sh
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/bin/sh
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ $# -lt 1 ];
|
||||
then
|
||||
echo "USAGE: $0 [-daemon] connect-distributed.properties"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
base_dir=$(dirname $0)
|
||||
|
||||
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
|
||||
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties"
|
||||
fi
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G"
|
||||
fi
|
||||
|
||||
EXTRA_ARGS=${EXTRA_ARGS-'-name connectDistributed'}
|
||||
|
||||
COMMAND=$1
|
||||
case $COMMAND in
|
||||
-daemon)
|
||||
EXTRA_ARGS="-daemon "$EXTRA_ARGS
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectDistributed "$@"
|
||||
45
bin/connect-mirror-maker.sh
Executable file
45
bin/connect-mirror-maker.sh
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/bin/sh
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ $# -lt 1 ];
|
||||
then
|
||||
echo "USAGE: $0 [-daemon] mm2.properties"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
base_dir=$(dirname $0)
|
||||
|
||||
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
|
||||
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties"
|
||||
fi
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G"
|
||||
fi
|
||||
|
||||
EXTRA_ARGS=${EXTRA_ARGS-'-name mirrorMaker'}
|
||||
|
||||
COMMAND=$1
|
||||
case $COMMAND in
|
||||
-daemon)
|
||||
EXTRA_ARGS="-daemon "$EXTRA_ARGS
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.mirror.MirrorMaker "$@"
|
||||
45
bin/connect-standalone.sh
Executable file
45
bin/connect-standalone.sh
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/bin/sh
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ $# -lt 1 ];
|
||||
then
|
||||
echo "USAGE: $0 [-daemon] connect-standalone.properties"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
base_dir=$(dirname $0)
|
||||
|
||||
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
|
||||
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties"
|
||||
fi
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G"
|
||||
fi
|
||||
|
||||
EXTRA_ARGS=${EXTRA_ARGS-'-name connectStandalone'}
|
||||
|
||||
COMMAND=$1
|
||||
case $COMMAND in
|
||||
-daemon)
|
||||
EXTRA_ARGS="-daemon "$EXTRA_ARGS
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectStandalone "$@"
|
||||
17
bin/kafka-acls.sh
Executable file
17
bin/kafka-acls.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.AclCommand "$@"
|
||||
17
bin/kafka-broker-api-versions.sh
Executable file
17
bin/kafka-broker-api-versions.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.BrokerApiVersionsCommand "$@"
|
||||
17
bin/kafka-configs.sh
Executable file
17
bin/kafka-configs.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.ConfigCommand "$@"
|
||||
21
bin/kafka-console-consumer.sh
Executable file
21
bin/kafka-console-consumer.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M"
|
||||
fi
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleConsumer "$@"
|
||||
20
bin/kafka-console-producer.sh
Executable file
20
bin/kafka-console-producer.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M"
|
||||
fi
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleProducer "$@"
|
||||
17
bin/kafka-consumer-groups.sh
Executable file
17
bin/kafka-consumer-groups.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.ConsumerGroupCommand "$@"
|
||||
20
bin/kafka-consumer-perf-test.sh
Executable file
20
bin/kafka-consumer-perf-test.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M"
|
||||
fi
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsumerPerformance "$@"
|
||||
17
bin/kafka-delegation-tokens.sh
Executable file
17
bin/kafka-delegation-tokens.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.DelegationTokenCommand "$@"
|
||||
17
bin/kafka-delete-records.sh
Executable file
17
bin/kafka-delete-records.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.DeleteRecordsCommand "$@"
|
||||
18
bin/kafka-diskload-protector.sh
Executable file
18
bin/kafka-diskload-protector.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.DiskLoadProtectorCommand "$@"
|
||||
|
||||
17
bin/kafka-dump-log.sh
Executable file
17
bin/kafka-dump-log.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.tools.DumpLogSegments "$@"
|
||||
18
bin/kafka-exmetrics.sh
Executable file
18
bin/kafka-exmetrics.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.KafkaExMetricsCommand "$@"
|
||||
|
||||
17
bin/kafka-leader-election.sh
Executable file
17
bin/kafka-leader-election.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.LeaderElectionCommand "$@"
|
||||
17
bin/kafka-log-dirs.sh
Executable file
17
bin/kafka-log-dirs.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.LogDirsCommand "$@"
|
||||
17
bin/kafka-mirror-maker.sh
Executable file
17
bin/kafka-mirror-maker.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.tools.MirrorMaker "$@"
|
||||
17
bin/kafka-preferred-replica-election.sh
Executable file
17
bin/kafka-preferred-replica-election.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.PreferredReplicaLeaderElectionCommand "$@"
|
||||
20
bin/kafka-producer-perf-test.sh
Executable file
20
bin/kafka-producer-perf-test.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M"
|
||||
fi
|
||||
exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.ProducerPerformance "$@"
|
||||
17
bin/kafka-reassign-partitions.sh
Executable file
17
bin/kafka-reassign-partitions.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.ReassignPartitionsCommand "$@"
|
||||
17
bin/kafka-replica-verification.sh
Executable file
17
bin/kafka-replica-verification.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.tools.ReplicaVerificationTool "$@"
|
||||
316
bin/kafka-run-class.sh
Executable file
316
bin/kafka-run-class.sh
Executable file
@@ -0,0 +1,316 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ $# -lt 1 ];
|
||||
then
|
||||
echo "USAGE: $0 [-daemon] [-name servicename] [-loggc] classname [opts]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# CYGWIN == 1 if Cygwin is detected, else 0.
|
||||
if [[ $(uname -a) =~ "CYGWIN" ]]; then
|
||||
CYGWIN=1
|
||||
else
|
||||
CYGWIN=0
|
||||
fi
|
||||
|
||||
if [ -z "$INCLUDE_TEST_JARS" ]; then
|
||||
INCLUDE_TEST_JARS=false
|
||||
fi
|
||||
|
||||
# Exclude jars not necessary for running commands.
|
||||
regex="(-(test|test-sources|src|scaladoc|javadoc)\.jar|jar.asc)$"
|
||||
should_include_file() {
|
||||
if [ "$INCLUDE_TEST_JARS" = true ]; then
|
||||
return 0
|
||||
fi
|
||||
file=$1
|
||||
if [ -z "$(echo "$file" | egrep "$regex")" ] ; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
base_dir=$(dirname $0)/..
|
||||
|
||||
if [ -z "$SCALA_VERSION" ]; then
|
||||
SCALA_VERSION=2.12.10
|
||||
fi
|
||||
|
||||
if [ -z "$SCALA_BINARY_VERSION" ]; then
|
||||
SCALA_BINARY_VERSION=$(echo $SCALA_VERSION | cut -f 1-2 -d '.')
|
||||
fi
|
||||
|
||||
# run ./gradlew copyDependantLibs to get all dependant jars in a local dir
|
||||
shopt -s nullglob
|
||||
if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
|
||||
for dir in "$base_dir"/core/build/dependant-libs-${SCALA_VERSION}*;
|
||||
do
|
||||
CLASSPATH="$CLASSPATH:$dir/*"
|
||||
done
|
||||
fi
|
||||
|
||||
for file in "$base_dir"/examples/build/libs/kafka-examples*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
|
||||
clients_lib_dir=$(dirname $0)/../clients/build/libs
|
||||
streams_lib_dir=$(dirname $0)/../streams/build/libs
|
||||
streams_dependant_clients_lib_dir=$(dirname $0)/../streams/build/dependant-libs-${SCALA_VERSION}
|
||||
else
|
||||
clients_lib_dir=/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs
|
||||
streams_lib_dir=$clients_lib_dir
|
||||
streams_dependant_clients_lib_dir=$streams_lib_dir
|
||||
fi
|
||||
|
||||
|
||||
for file in "$clients_lib_dir"/kafka-clients*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
|
||||
for file in "$streams_lib_dir"/kafka-streams*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
|
||||
for file in "$base_dir"/streams/examples/build/libs/kafka-streams-examples*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
else
|
||||
VERSION_NO_DOTS=`echo $UPGRADE_KAFKA_STREAMS_TEST_VERSION | sed 's/\.//g'`
|
||||
SHORT_VERSION_NO_DOTS=${VERSION_NO_DOTS:0:((${#VERSION_NO_DOTS} - 1))} # remove last char, ie, bug-fix number
|
||||
for file in "$base_dir"/streams/upgrade-system-tests-$SHORT_VERSION_NO_DOTS/build/libs/kafka-streams-upgrade-system-tests*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$file":"$CLASSPATH"
|
||||
fi
|
||||
done
|
||||
if [ "$SHORT_VERSION_NO_DOTS" = "0100" ]; then
|
||||
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.8.jar":"$CLASSPATH"
|
||||
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.6.jar":"$CLASSPATH"
|
||||
fi
|
||||
if [ "$SHORT_VERSION_NO_DOTS" = "0101" ]; then
|
||||
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.9.jar":"$CLASSPATH"
|
||||
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.8.jar":"$CLASSPATH"
|
||||
fi
|
||||
fi
|
||||
|
||||
for file in "$streams_dependant_clients_lib_dir"/rocksdb*.jar;
|
||||
do
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
done
|
||||
|
||||
for file in "$streams_dependant_clients_lib_dir"/*hamcrest*.jar;
|
||||
do
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
done
|
||||
|
||||
for file in "$base_dir"/tools/build/libs/kafka-tools*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
|
||||
for dir in "$base_dir"/tools/build/dependant-libs-${SCALA_VERSION}*;
|
||||
do
|
||||
CLASSPATH="$CLASSPATH:$dir/*"
|
||||
done
|
||||
|
||||
for cc_pkg in "api" "transforms" "runtime" "file" "mirror" "mirror-client" "json" "tools" "basic-auth-extension"
|
||||
do
|
||||
for file in "$base_dir"/connect/${cc_pkg}/build/libs/connect-${cc_pkg}*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
if [ -d "$base_dir/connect/${cc_pkg}/build/dependant-libs" ] ; then
|
||||
CLASSPATH="$CLASSPATH:$base_dir/connect/${cc_pkg}/build/dependant-libs/*"
|
||||
fi
|
||||
done
|
||||
|
||||
# classpath addition for release
|
||||
for file in "$base_dir"/libs/*;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
|
||||
for file in "$base_dir"/core/build/libs/kafka_${SCALA_BINARY_VERSION}*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
shopt -u nullglob
|
||||
|
||||
if [ -z "$CLASSPATH" ] ; then
|
||||
echo "Classpath is empty. Please build the project first e.g. by running './gradlew jar -PscalaVersion=$SCALA_VERSION'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# JMX settings
|
||||
if [ -z "$KAFKA_JMX_OPTS" ]; then
|
||||
KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false "
|
||||
fi
|
||||
|
||||
# JMX port to use
|
||||
if [ $JMX_PORT ]; then
|
||||
KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
|
||||
fi
|
||||
|
||||
# Log directory to use
|
||||
if [ "x$LOG_DIR" = "x" ]; then
|
||||
LOG_DIR="$base_dir/logs"
|
||||
fi
|
||||
|
||||
# Log4j settings
|
||||
if [ -z "$KAFKA_LOG4J_OPTS" ]; then
|
||||
# Log to console. This is a tool.
|
||||
LOG4J_DIR="$base_dir/config/tools-log4j.properties"
|
||||
# If Cygwin is detected, LOG4J_DIR is converted to Windows format.
|
||||
(( CYGWIN )) && LOG4J_DIR=$(cygpath --path --mixed "${LOG4J_DIR}")
|
||||
KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_DIR}"
|
||||
else
|
||||
# create logs directory
|
||||
if [ ! -d "$LOG_DIR" ]; then
|
||||
mkdir -p "$LOG_DIR"
|
||||
fi
|
||||
fi
|
||||
|
||||
# If Cygwin is detected, LOG_DIR is converted to Windows format.
|
||||
(( CYGWIN )) && LOG_DIR=$(cygpath --path --mixed "${LOG_DIR}")
|
||||
KAFKA_LOG4J_OPTS="-Dkafka.logs.dir=$LOG_DIR $KAFKA_LOG4J_OPTS"
|
||||
|
||||
# Generic jvm settings you want to add
|
||||
if [ -z "$KAFKA_OPTS" ]; then
|
||||
KAFKA_OPTS=""
|
||||
fi
|
||||
|
||||
# Set Debug options if enabled
|
||||
if [ "x$KAFKA_DEBUG" != "x" ]; then
|
||||
|
||||
# Use default ports
|
||||
DEFAULT_JAVA_DEBUG_PORT="5005"
|
||||
|
||||
if [ -z "$JAVA_DEBUG_PORT" ]; then
|
||||
JAVA_DEBUG_PORT="$DEFAULT_JAVA_DEBUG_PORT"
|
||||
fi
|
||||
|
||||
# Use the defaults if JAVA_DEBUG_OPTS was not set
|
||||
DEFAULT_JAVA_DEBUG_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=${DEBUG_SUSPEND_FLAG:-n},address=0.0.0.0:$JAVA_DEBUG_PORT"
|
||||
if [ -z "$JAVA_DEBUG_OPTS" ]; then
|
||||
JAVA_DEBUG_OPTS="$DEFAULT_JAVA_DEBUG_OPTS"
|
||||
fi
|
||||
|
||||
echo "Enabling Java debug options: $JAVA_DEBUG_OPTS"
|
||||
KAFKA_OPTS="$JAVA_DEBUG_OPTS $KAFKA_OPTS"
|
||||
fi
|
||||
|
||||
# Which java to use
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
JAVA="java"
|
||||
else
|
||||
JAVA="$JAVA_HOME/bin/java"
|
||||
fi
|
||||
|
||||
# Memory options
|
||||
if [ -z "$KAFKA_HEAP_OPTS" ]; then
|
||||
KAFKA_HEAP_OPTS="-Xmx256M"
|
||||
fi
|
||||
|
||||
# JVM performance options
|
||||
# MaxInlineLevel=15 is the default since JDK 14 and can be removed once older JDKs are no longer supported
|
||||
if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then
|
||||
KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:InitiatingHeapOccupancyPercent=35 -XX:G1HeapRegionSize=16m -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 -Djava.awt.headless=true"
|
||||
fi
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
COMMAND=$1
|
||||
case $COMMAND in
|
||||
-name)
|
||||
DAEMON_NAME=$2
|
||||
CONSOLE_OUTPUT_FILE=$LOG_DIR/$DAEMON_NAME.out
|
||||
shift 2
|
||||
;;
|
||||
-loggc)
|
||||
if [ -z "$KAFKA_GC_LOG_OPTS" ]; then
|
||||
GC_LOG_ENABLED="true"
|
||||
fi
|
||||
shift
|
||||
;;
|
||||
-daemon)
|
||||
DAEMON_MODE="true"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# GC options
|
||||
GC_FILE_SUFFIX='-gc.log'
|
||||
GC_LOG_FILE_NAME=''
|
||||
if [ "x$GC_LOG_ENABLED" = "xtrue" ]; then
|
||||
GC_LOG_FILE_NAME=$DAEMON_NAME$GC_FILE_SUFFIX
|
||||
|
||||
# The first segment of the version number, which is '1' for releases before Java 9
|
||||
# it then becomes '9', '10', ...
|
||||
# Some examples of the first line of `java --version`:
|
||||
# 8 -> java version "1.8.0_152"
|
||||
# 9.0.4 -> java version "9.0.4"
|
||||
# 10 -> java version "10" 2018-03-20
|
||||
# 10.0.1 -> java version "10.0.1" 2018-04-17
|
||||
# We need to match to the end of the line to prevent sed from printing the characters that do not match
|
||||
JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
|
||||
if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
|
||||
KAFKA_GC_LOG_OPTS="-Xlog:gc*:file=$LOG_DIR/$GC_LOG_FILE_NAME:time"
|
||||
else
|
||||
KAFKA_GC_LOG_OPTS="-Xloggc:$LOG_DIR/$GC_LOG_FILE_NAME -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Remove a possible colon prefix from the classpath (happens at lines like `CLASSPATH="$CLASSPATH:$file"` when CLASSPATH is blank)
|
||||
# Syntax used on the right side is native Bash string manipulation; for more details see
|
||||
# http://tldp.org/LDP/abs/html/string-manipulation.html, specifically the section titled "Substring Removal"
|
||||
CLASSPATH=${CLASSPATH#:}
|
||||
|
||||
# If Cygwin is detected, classpath is converted to Windows format.
|
||||
(( CYGWIN )) && CLASSPATH=$(cygpath --path --mixed "${CLASSPATH}")
|
||||
|
||||
# Launch mode
|
||||
if [ "x$DAEMON_MODE" = "xtrue" ]; then
|
||||
nohup $JAVA $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp $CLASSPATH $KAFKA_OPTS "$@" > "$CONSOLE_OUTPUT_FILE" 2>&1 < /dev/null &
|
||||
else
|
||||
exec $JAVA $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp $CLASSPATH $KAFKA_OPTS "$@"
|
||||
fi
|
||||
51
bin/kafka-server-start.sh
Executable file
51
bin/kafka-server-start.sh
Executable file
@@ -0,0 +1,51 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ $# -lt 1 ];
|
||||
then
|
||||
echo "USAGE: $0 [-daemon] server.properties [--override property=value]*"
|
||||
exit 1
|
||||
fi
|
||||
base_dir=$(dirname $0)
|
||||
|
||||
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
|
||||
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
|
||||
fi
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx8G -Xms8G"
|
||||
export JMX_PORT=8099
|
||||
#export KAFKA_DEBUG=debug
|
||||
#export DAEMON_MODE=true
|
||||
export KAFKA_OPTS="-Djava.security.auth.login.config=$base_dir/../config/kafka_server_jaas.conf"
|
||||
export DEBUG_SUSPEND_FLAG="n"
|
||||
export JAVA_DEBUG_PORT="8096"
|
||||
export GC_LOG_ENABLED=true
|
||||
fi
|
||||
|
||||
EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'}
|
||||
|
||||
COMMAND=$1
|
||||
case $COMMAND in
|
||||
-daemon)
|
||||
EXTRA_ARGS="-daemon "$EXTRA_ARGS
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.ServiceDiscovery "$@"
|
||||
24
bin/kafka-server-stop.sh
Executable file
24
bin/kafka-server-stop.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/sh
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
SIGNAL=${SIGNAL:-TERM}
|
||||
PIDS=$(ps ax | grep -i 'kafka\.ServiceDiscovery' | grep java | grep -v grep | awk '{print $1}')
|
||||
|
||||
if [ -z "$PIDS" ]; then
|
||||
echo "No kafka server to stop"
|
||||
exit 1
|
||||
else
|
||||
kill -s $SIGNAL $PIDS
|
||||
fi
|
||||
21
bin/kafka-streams-application-reset.sh
Executable file
21
bin/kafka-streams-application-reset.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M"
|
||||
fi
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.tools.StreamsResetter "$@"
|
||||
17
bin/kafka-topics.sh
Executable file
17
bin/kafka-topics.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.TopicCommand "$@"
|
||||
20
bin/kafka-verifiable-consumer.sh
Executable file
20
bin/kafka-verifiable-consumer.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M"
|
||||
fi
|
||||
exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.VerifiableConsumer "$@"
|
||||
20
bin/kafka-verifiable-producer.sh
Executable file
20
bin/kafka-verifiable-producer.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M"
|
||||
fi
|
||||
exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.VerifiableProducer "$@"
|
||||
50
bin/trogdor.sh
Executable file
50
bin/trogdor.sh
Executable file
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
The Trogdor fault injector.
|
||||
|
||||
Usage:
|
||||
$0 [action] [options]
|
||||
|
||||
Actions:
|
||||
agent: Run the trogdor agent.
|
||||
coordinator: Run the trogdor coordinator.
|
||||
client: Run the client which communicates with the trogdor coordinator.
|
||||
agent-client: Run the client which communicates with the trogdor agent.
|
||||
help: This help message.
|
||||
EOF
|
||||
}
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
action="${1}"
|
||||
shift
|
||||
CLASS=""
|
||||
case ${action} in
|
||||
agent) CLASS="org.apache.kafka.trogdor.agent.Agent";;
|
||||
coordinator) CLASS="org.apache.kafka.trogdor.coordinator.Coordinator";;
|
||||
client) CLASS="org.apache.kafka.trogdor.coordinator.CoordinatorClient";;
|
||||
agent-client) CLASS="org.apache.kafka.trogdor.agent.AgentClient";;
|
||||
help) usage; exit 0;;
|
||||
*) echo "Unknown action '${action}'. Type '$0 help' for help."; exit 1;;
|
||||
esac
|
||||
|
||||
export INCLUDE_TEST_JARS=1
|
||||
exec $(dirname $0)/kafka-run-class.sh "${CLASS}" "$@"
|
||||
34
bin/windows/connect-distributed.bat
Normal file
34
bin/windows/connect-distributed.bat
Normal file
@@ -0,0 +1,34 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
IF [%1] EQU [] (
|
||||
echo USAGE: %0 connect-distributed.properties
|
||||
EXIT /B 1
|
||||
)
|
||||
|
||||
SetLocal
|
||||
rem Using pushd popd to set BASE_DIR to the absolute path
|
||||
pushd %~dp0..\..
|
||||
set BASE_DIR=%CD%
|
||||
popd
|
||||
|
||||
rem Log4j settings
|
||||
IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
|
||||
set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/tools-log4j.properties
|
||||
)
|
||||
|
||||
"%~dp0kafka-run-class.bat" org.apache.kafka.connect.cli.ConnectDistributed %*
|
||||
EndLocal
|
||||
34
bin/windows/connect-standalone.bat
Normal file
34
bin/windows/connect-standalone.bat
Normal file
@@ -0,0 +1,34 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
IF [%1] EQU [] (
|
||||
echo USAGE: %0 connect-standalone.properties
|
||||
EXIT /B 1
|
||||
)
|
||||
|
||||
SetLocal
|
||||
rem Using pushd popd to set BASE_DIR to the absolute path
|
||||
pushd %~dp0..\..
|
||||
set BASE_DIR=%CD%
|
||||
popd
|
||||
|
||||
rem Log4j settings
|
||||
IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
|
||||
set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/tools-log4j.properties
|
||||
)
|
||||
|
||||
"%~dp0kafka-run-class.bat" org.apache.kafka.connect.cli.ConnectStandalone %*
|
||||
EndLocal
|
||||
17
bin/windows/kafka-acls.bat
Normal file
17
bin/windows/kafka-acls.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.AclCommand %*
|
||||
17
bin/windows/kafka-broker-api-versions.bat
Normal file
17
bin/windows/kafka-broker-api-versions.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
%~dp0kafka-run-class.bat kafka.admin.BrokerApiVersionsCommand %*
|
||||
17
bin/windows/kafka-configs.bat
Normal file
17
bin/windows/kafka-configs.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.ConfigCommand %*
|
||||
20
bin/windows/kafka-console-consumer.bat
Normal file
20
bin/windows/kafka-console-consumer.bat
Normal file
@@ -0,0 +1,20 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
SetLocal
|
||||
set KAFKA_HEAP_OPTS=-Xmx512M
|
||||
"%~dp0kafka-run-class.bat" kafka.tools.ConsoleConsumer %*
|
||||
EndLocal
|
||||
20
bin/windows/kafka-console-producer.bat
Normal file
20
bin/windows/kafka-console-producer.bat
Normal file
@@ -0,0 +1,20 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
SetLocal
|
||||
set KAFKA_HEAP_OPTS=-Xmx512M
|
||||
"%~dp0kafka-run-class.bat" kafka.tools.ConsoleProducer %*
|
||||
EndLocal
|
||||
17
bin/windows/kafka-consumer-groups.bat
Normal file
17
bin/windows/kafka-consumer-groups.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.ConsumerGroupCommand %*
|
||||
20
bin/windows/kafka-consumer-perf-test.bat
Normal file
20
bin/windows/kafka-consumer-perf-test.bat
Normal file
@@ -0,0 +1,20 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
SetLocal
|
||||
set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M
|
||||
"%~dp0kafka-run-class.bat" kafka.tools.ConsumerPerformance %*
|
||||
EndLocal
|
||||
17
bin/windows/kafka-delegation-tokens.bat
Normal file
17
bin/windows/kafka-delegation-tokens.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.DelegationTokenCommand %*
|
||||
17
bin/windows/kafka-delete-records.bat
Normal file
17
bin/windows/kafka-delete-records.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.DeleteRecordsCommand %*
|
||||
17
bin/windows/kafka-dump-log.bat
Normal file
17
bin/windows/kafka-dump-log.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.tools.DumpLogSegments %*
|
||||
17
bin/windows/kafka-leader-election.bat
Normal file
17
bin/windows/kafka-leader-election.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.LeaderElectionCommand %*
|
||||
17
bin/windows/kafka-log-dirs.bat
Normal file
17
bin/windows/kafka-log-dirs.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.LogDirsCommand %*
|
||||
17
bin/windows/kafka-mirror-maker.bat
Normal file
17
bin/windows/kafka-mirror-maker.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.tools.MirrorMaker %*
|
||||
17
bin/windows/kafka-preferred-replica-election.bat
Normal file
17
bin/windows/kafka-preferred-replica-election.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.PreferredReplicaLeaderElectionCommand %*
|
||||
20
bin/windows/kafka-producer-perf-test.bat
Normal file
20
bin/windows/kafka-producer-perf-test.bat
Normal file
@@ -0,0 +1,20 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
SetLocal
|
||||
set KAFKA_HEAP_OPTS=-Xmx512M
|
||||
"%~dp0kafka-run-class.bat" org.apache.kafka.tools.ProducerPerformance %*
|
||||
EndLocal
|
||||
17
bin/windows/kafka-reassign-partitions.bat
Normal file
17
bin/windows/kafka-reassign-partitions.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.ReassignPartitionsCommand %*
|
||||
17
bin/windows/kafka-replica-verification.bat
Normal file
17
bin/windows/kafka-replica-verification.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.tools.ReplicaVerificationTool %*
|
||||
191
bin/windows/kafka-run-class.bat
Executable file
191
bin/windows/kafka-run-class.bat
Executable file
@@ -0,0 +1,191 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
setlocal enabledelayedexpansion
|
||||
|
||||
IF [%1] EQU [] (
|
||||
echo USAGE: %0 classname [opts]
|
||||
EXIT /B 1
|
||||
)
|
||||
|
||||
rem Using pushd popd to set BASE_DIR to the absolute path
|
||||
pushd %~dp0..\..
|
||||
set BASE_DIR=%CD%
|
||||
popd
|
||||
|
||||
IF ["%SCALA_VERSION%"] EQU [""] (
|
||||
set SCALA_VERSION=2.12.10
|
||||
)
|
||||
|
||||
IF ["%SCALA_BINARY_VERSION%"] EQU [""] (
|
||||
for /f "tokens=1,2 delims=." %%a in ("%SCALA_VERSION%") do (
|
||||
set FIRST=%%a
|
||||
set SECOND=%%b
|
||||
if ["!SECOND!"] EQU [""] (
|
||||
set SCALA_BINARY_VERSION=!FIRST!
|
||||
) else (
|
||||
set SCALA_BINARY_VERSION=!FIRST!.!SECOND!
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
rem Classpath addition for kafka-core dependencies
|
||||
for %%i in ("%BASE_DIR%\core\build\dependant-libs-%SCALA_VERSION%\*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
rem Classpath addition for kafka-examples
|
||||
for %%i in ("%BASE_DIR%\examples\build\libs\kafka-examples*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
rem Classpath addition for kafka-clients
|
||||
for %%i in ("%BASE_DIR%\clients\build\libs\kafka-clients*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
rem Classpath addition for kafka-streams
|
||||
for %%i in ("%BASE_DIR%\streams\build\libs\kafka-streams*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
rem Classpath addition for kafka-streams-examples
|
||||
for %%i in ("%BASE_DIR%\streams\examples\build\libs\kafka-streams-examples*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
for %%i in ("%BASE_DIR%\streams\build\dependant-libs-%SCALA_VERSION%\rocksdb*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
rem Classpath addition for kafka tools
|
||||
for %%i in ("%BASE_DIR%\tools\build\libs\kafka-tools*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
for %%i in ("%BASE_DIR%\tools\build\dependant-libs-%SCALA_VERSION%\*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
for %%p in (api runtime file json tools) do (
|
||||
for %%i in ("%BASE_DIR%\connect\%%p\build\libs\connect-%%p*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
if exist "%BASE_DIR%\connect\%%p\build\dependant-libs\*" (
|
||||
call :concat "%BASE_DIR%\connect\%%p\build\dependant-libs\*"
|
||||
)
|
||||
)
|
||||
|
||||
rem Classpath addition for release
|
||||
for %%i in ("%BASE_DIR%\libs\*") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
rem Classpath addition for core
|
||||
for %%i in ("%BASE_DIR%\core\build\libs\kafka_%SCALA_BINARY_VERSION%*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
rem JMX settings
|
||||
IF ["%KAFKA_JMX_OPTS%"] EQU [""] (
|
||||
set KAFKA_JMX_OPTS=-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false
|
||||
)
|
||||
|
||||
rem JMX port to use
|
||||
IF ["%JMX_PORT%"] NEQ [""] (
|
||||
set KAFKA_JMX_OPTS=%KAFKA_JMX_OPTS% -Dcom.sun.management.jmxremote.port=%JMX_PORT%
|
||||
)
|
||||
|
||||
rem Log directory to use
|
||||
IF ["%LOG_DIR%"] EQU [""] (
|
||||
set LOG_DIR=%BASE_DIR%/logs
|
||||
)
|
||||
|
||||
rem Log4j settings
|
||||
IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
|
||||
set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/tools-log4j.properties
|
||||
) ELSE (
|
||||
rem create logs directory
|
||||
IF not exist "%LOG_DIR%" (
|
||||
mkdir "%LOG_DIR%"
|
||||
)
|
||||
)
|
||||
|
||||
set KAFKA_LOG4J_OPTS=-Dkafka.logs.dir="%LOG_DIR%" "%KAFKA_LOG4J_OPTS%"
|
||||
|
||||
rem Generic jvm settings you want to add
|
||||
IF ["%KAFKA_OPTS%"] EQU [""] (
|
||||
set KAFKA_OPTS=
|
||||
)
|
||||
|
||||
set DEFAULT_JAVA_DEBUG_PORT=5005
|
||||
set DEFAULT_DEBUG_SUSPEND_FLAG=n
|
||||
rem Set Debug options if enabled
|
||||
IF ["%KAFKA_DEBUG%"] NEQ [""] (
|
||||
|
||||
|
||||
IF ["%JAVA_DEBUG_PORT%"] EQU [""] (
|
||||
set JAVA_DEBUG_PORT=%DEFAULT_JAVA_DEBUG_PORT%
|
||||
)
|
||||
|
||||
IF ["%DEBUG_SUSPEND_FLAG%"] EQU [""] (
|
||||
set DEBUG_SUSPEND_FLAG=%DEFAULT_DEBUG_SUSPEND_FLAG%
|
||||
)
|
||||
set DEFAULT_JAVA_DEBUG_OPTS=-agentlib:jdwp=transport=dt_socket,server=y,suspend=!DEBUG_SUSPEND_FLAG!,address=!JAVA_DEBUG_PORT!
|
||||
|
||||
IF ["%JAVA_DEBUG_OPTS%"] EQU [""] (
|
||||
set JAVA_DEBUG_OPTS=!DEFAULT_JAVA_DEBUG_OPTS!
|
||||
)
|
||||
|
||||
echo Enabling Java debug options: !JAVA_DEBUG_OPTS!
|
||||
set KAFKA_OPTS=!JAVA_DEBUG_OPTS! !KAFKA_OPTS!
|
||||
)
|
||||
|
||||
rem Which java to use
|
||||
IF ["%JAVA_HOME%"] EQU [""] (
|
||||
set JAVA=java
|
||||
) ELSE (
|
||||
set JAVA="%JAVA_HOME%/bin/java"
|
||||
)
|
||||
|
||||
rem Memory options
|
||||
IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
|
||||
set KAFKA_HEAP_OPTS=-Xmx256M
|
||||
)
|
||||
|
||||
rem JVM performance options
|
||||
IF ["%KAFKA_JVM_PERFORMANCE_OPTS%"] EQU [""] (
|
||||
set KAFKA_JVM_PERFORMANCE_OPTS=-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true
|
||||
)
|
||||
|
||||
IF not defined CLASSPATH (
|
||||
echo Classpath is empty. Please build the project first e.g. by running 'gradlew jarAll'
|
||||
EXIT /B 2
|
||||
)
|
||||
|
||||
set COMMAND=%JAVA% %KAFKA_HEAP_OPTS% %KAFKA_JVM_PERFORMANCE_OPTS% %KAFKA_JMX_OPTS% %KAFKA_LOG4J_OPTS% -cp "%CLASSPATH%" %KAFKA_OPTS% %*
|
||||
rem echo.
|
||||
rem echo %COMMAND%
|
||||
rem echo.
|
||||
%COMMAND%
|
||||
|
||||
goto :eof
|
||||
:concat
|
||||
IF not defined CLASSPATH (
|
||||
set CLASSPATH="%~1"
|
||||
) ELSE (
|
||||
set CLASSPATH=%CLASSPATH%;"%~1"
|
||||
)
|
||||
38
bin/windows/kafka-server-start.bat
Normal file
38
bin/windows/kafka-server-start.bat
Normal file
@@ -0,0 +1,38 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
IF [%1] EQU [] (
|
||||
echo USAGE: %0 server.properties
|
||||
EXIT /B 1
|
||||
)
|
||||
|
||||
SetLocal
|
||||
IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
|
||||
set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties
|
||||
)
|
||||
IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
|
||||
rem detect OS architecture
|
||||
wmic os get osarchitecture | find /i "32-bit" >nul 2>&1
|
||||
IF NOT ERRORLEVEL 1 (
|
||||
rem 32-bit OS
|
||||
set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M
|
||||
) ELSE (
|
||||
rem 64-bit OS
|
||||
set KAFKA_HEAP_OPTS=-Xmx1G -Xms1G
|
||||
)
|
||||
)
|
||||
"%~dp0kafka-run-class.bat" kafka.Kafka %*
|
||||
EndLocal
|
||||
18
bin/windows/kafka-server-stop.bat
Normal file
18
bin/windows/kafka-server-stop.bat
Normal file
@@ -0,0 +1,18 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
wmic process where (commandline like "%%kafka.Kafka%%" and not name="wmic.exe") delete
|
||||
rem ps ax | grep -i 'kafka.Kafka' | grep -v grep | awk '{print $1}' | xargs kill -SIGTERM
|
||||
23
bin/windows/kafka-streams-application-reset.bat
Normal file
23
bin/windows/kafka-streams-application-reset.bat
Normal file
@@ -0,0 +1,23 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
SetLocal
|
||||
IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
|
||||
set KAFKA_HEAP_OPTS=-Xmx512M
|
||||
)
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.tools.StreamsResetter %*
|
||||
EndLocal
|
||||
17
bin/windows/kafka-topics.bat
Normal file
17
bin/windows/kafka-topics.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.TopicCommand %*
|
||||
30
bin/windows/zookeeper-server-start.bat
Normal file
30
bin/windows/zookeeper-server-start.bat
Normal file
@@ -0,0 +1,30 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
IF [%1] EQU [] (
|
||||
echo USAGE: %0 zookeeper.properties
|
||||
EXIT /B 1
|
||||
)
|
||||
|
||||
SetLocal
|
||||
IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
|
||||
set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties
|
||||
)
|
||||
IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
|
||||
set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M
|
||||
)
|
||||
"%~dp0kafka-run-class.bat" org.apache.zookeeper.server.quorum.QuorumPeerMain %*
|
||||
EndLocal
|
||||
17
bin/windows/zookeeper-server-stop.bat
Normal file
17
bin/windows/zookeeper-server-stop.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
wmic process where (commandline like "%%zookeeper%%" and not name="wmic.exe") delete
|
||||
22
bin/windows/zookeeper-shell.bat
Normal file
22
bin/windows/zookeeper-shell.bat
Normal file
@@ -0,0 +1,22 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
IF [%1] EQU [] (
|
||||
echo USAGE: %0 zookeeper_host:port[/path] [-zk-tls-config-file file] [args...]
|
||||
EXIT /B 1
|
||||
)
|
||||
|
||||
"%~dp0kafka-run-class.bat" org.apache.zookeeper.ZooKeeperMainWithTlsSupportForKafka -server %*
|
||||
17
bin/zookeeper-security-migration.sh
Executable file
17
bin/zookeeper-security-migration.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.ZkSecurityMigrator "$@"
|
||||
44
bin/zookeeper-server-start.sh
Executable file
44
bin/zookeeper-server-start.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ $# -lt 1 ];
|
||||
then
|
||||
echo "USAGE: $0 [-daemon] zookeeper.properties"
|
||||
exit 1
|
||||
fi
|
||||
base_dir=$(dirname $0)
|
||||
|
||||
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
|
||||
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
|
||||
fi
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M -Xms512M"
|
||||
fi
|
||||
|
||||
EXTRA_ARGS=${EXTRA_ARGS-'-name zookeeper -loggc'}
|
||||
|
||||
COMMAND=$1
|
||||
case $COMMAND in
|
||||
-daemon)
|
||||
EXTRA_ARGS="-daemon "$EXTRA_ARGS
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
exec $base_dir/kafka-run-class.sh $EXTRA_ARGS org.apache.zookeeper.server.quorum.QuorumPeerMain "$@"
|
||||
24
bin/zookeeper-server-stop.sh
Executable file
24
bin/zookeeper-server-stop.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/sh
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
SIGNAL=${SIGNAL:-TERM}
|
||||
PIDS=$(ps ax | grep java | grep -i QuorumPeerMain | grep -v grep | awk '{print $1}')
|
||||
|
||||
if [ -z "$PIDS" ]; then
|
||||
echo "No zookeeper server to stop"
|
||||
exit 1
|
||||
else
|
||||
kill -s $SIGNAL $PIDS
|
||||
fi
|
||||
23
bin/zookeeper-shell.sh
Executable file
23
bin/zookeeper-shell.sh
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/bin/sh
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ $# -lt 1 ];
|
||||
then
|
||||
echo "USAGE: $0 zookeeper_host:port[/path] [-zk-tls-config-file file] [args...]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.ZooKeeperMainWithTlsSupportForKafka -server "$@"
|
||||
1202
build.gradle
Normal file
1202
build.gradle
Normal file
File diff suppressed because it is too large
Load Diff
83
build.sh
Normal file
83
build.sh
Normal file
@@ -0,0 +1,83 @@
|
||||
#!/bin/bash
|
||||
workspace=$(cd $(dirname $0) && pwd -P)
|
||||
cd $workspace
|
||||
|
||||
## TODO const
|
||||
APPNAME=service-discovery
|
||||
module=$APPNAME
|
||||
app=$module
|
||||
|
||||
gitversion=.gitversion
|
||||
control=./control.sh
|
||||
ngxfunc=./nginxfunc.sh
|
||||
|
||||
## function
|
||||
function build() {
|
||||
# 进行编译
|
||||
# cmd
|
||||
JVERSION=`java -version 2>&1 | awk 'NR==1{gsub(/"/,"");print $3}'`
|
||||
major=`echo $JVERSION | awk -F. '{print $1}'`
|
||||
mijor=`echo $JVERSION | awk -F. '{print $2}'`
|
||||
if [ $major -le 1 ] && [ $mijor -lt 11 ]; then
|
||||
export JAVA_HOME=/usr/local/jdk-11.0.2 #(使用jdk11请设置)
|
||||
export PATH=$JAVA_HOME/bin:$PATH
|
||||
fi
|
||||
# XXX 编译命令
|
||||
# mvn clean install -Ponline -Dmaven.test.skip=true -f ../pom.xml
|
||||
./gradlew -PscalaVersion=2.12 releaseTarGz
|
||||
|
||||
local sc=$?
|
||||
if [ $sc -ne 0 ];then
|
||||
## 编译失败, 退出码为 非0
|
||||
echo "$app build error"
|
||||
exit $sc
|
||||
else
|
||||
echo -n "$app build ok, vsn="
|
||||
gitversion
|
||||
fi
|
||||
}
|
||||
|
||||
function make_output() {
|
||||
# 新建output目录
|
||||
local output="./output"
|
||||
rm -rf $output &>/dev/null
|
||||
mkdir -p $output &>/dev/null
|
||||
|
||||
# 填充output目录, output内的内容 即为 线上部署内容
|
||||
(
|
||||
cp -rf $control $output && # 拷贝 control.sh脚本 至output目录
|
||||
cp -rf $ngxfunc $output &&
|
||||
cp -rf ./APP_META $output &&
|
||||
cp -rf ./APP_META/Dockerfile $output &&
|
||||
# XXX 解压程序包到output路径
|
||||
tar -xzvf core/build/distributions/kafka_2.12-sd-2.5.0-d-100.tgz
|
||||
mv kafka_2.12-sd-2.5.0-d-100 ${output}/service-discovery
|
||||
# unzip target/${module}.war -d ${output} && # 解压war包到output目录
|
||||
echo -e "make output ok."
|
||||
) || { echo -e "make output error"; exit 2; } # 填充output目录失败后, 退出码为 非0
|
||||
}
|
||||
|
||||
## internals
|
||||
function gitversion() {
|
||||
git log -1 --pretty=%h > $gitversion
|
||||
local gv=`cat $gitversion`
|
||||
echo "$gv"
|
||||
}
|
||||
|
||||
|
||||
##########################################
|
||||
## main
|
||||
## 其中,
|
||||
## 1.进行编译
|
||||
## 2.生成部署包output
|
||||
##########################################
|
||||
|
||||
# 1.进行编译
|
||||
build
|
||||
|
||||
# 2.生成部署包output
|
||||
make_output
|
||||
|
||||
# 编译成功
|
||||
echo -e "build done"
|
||||
exit 0
|
||||
20
checkstyle/.scalafmt.conf
Normal file
20
checkstyle/.scalafmt.conf
Normal file
@@ -0,0 +1,20 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
// contributor license agreements. See the NOTICE file distributed with
|
||||
// this work for additional information regarding copyright ownership.
|
||||
// The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance with
|
||||
// the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
docstrings = JavaDoc
|
||||
maxColumn = 120
|
||||
continuationIndent.defnSite = 2
|
||||
assumeStandardLibraryStripMargin = true
|
||||
danglingParentheses = true
|
||||
rewrite.rules = [SortImports, RedundantBraces, RedundantParens, SortModifiers]
|
||||
142
checkstyle/checkstyle.xml
Normal file
142
checkstyle/checkstyle.xml
Normal file
@@ -0,0 +1,142 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE module PUBLIC
|
||||
"-//Puppy Crawl//DTD Check Configuration 1.3//EN"
|
||||
"http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
|
||||
<!--
|
||||
// Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
// contributor license agreements. See the NOTICE file distributed with
|
||||
// this work for additional information regarding copyright ownership.
|
||||
// The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance with
|
||||
// the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
-->
|
||||
<module name="Checker">
|
||||
<property name="localeLanguage" value="en"/>
|
||||
|
||||
<module name="FileTabCharacter"/>
|
||||
|
||||
<!-- header -->
|
||||
<module name="Header">
|
||||
<property name="headerFile" value="${headerFile}" />
|
||||
</module>
|
||||
|
||||
<module name="TreeWalker">
|
||||
|
||||
<!-- code cleanup -->
|
||||
<module name="UnusedImports">
|
||||
<property name="processJavadoc" value="true" />
|
||||
</module>
|
||||
<module name="RedundantImport"/>
|
||||
<module name="IllegalImport" />
|
||||
<module name="EqualsHashCode"/>
|
||||
<module name="SimplifyBooleanExpression"/>
|
||||
<module name="OneStatementPerLine"/>
|
||||
<module name="UnnecessaryParentheses" />
|
||||
<module name="SimplifyBooleanReturn"/>
|
||||
|
||||
<!-- style -->
|
||||
<module name="DefaultComesLast"/>
|
||||
<module name="EmptyStatement"/>
|
||||
<module name="ArrayTypeStyle"/>
|
||||
<module name="UpperEll"/>
|
||||
<module name="LeftCurly"/>
|
||||
<module name="RightCurly"/>
|
||||
<module name="EmptyStatement"/>
|
||||
<module name="ConstantName">
|
||||
<property name="format" value="(^[A-Z][A-Z0-9]*(_[A-Z0-9]+)*$)|(^log$)"/>
|
||||
</module>
|
||||
<module name="LocalVariableName"/>
|
||||
<module name="LocalFinalVariableName"/>
|
||||
<module name="MemberName"/>
|
||||
<module name="ClassTypeParameterName">
|
||||
<property name="format" value="^[A-Z][a-zA-Z0-9]*$$"/>
|
||||
</module>
|
||||
<module name="MethodTypeParameterName">
|
||||
<property name="format" value="^[A-Z][a-zA-Z0-9]*$$"/>
|
||||
</module>
|
||||
<module name="InterfaceTypeParameterName">
|
||||
<property name="format" value="^[A-Z][a-zA-Z0-9]*$$"/>
|
||||
</module>
|
||||
<module name="PackageName"/>
|
||||
<module name="ParameterName"/>
|
||||
<module name="StaticVariableName"/>
|
||||
<module name="TypeName"/>
|
||||
<module name="AvoidStarImport"/>
|
||||
|
||||
<!-- variables that can be final should be final (suppressed except for Streams) -->
|
||||
<module name="FinalLocalVariable">
|
||||
<property name="tokens" value="VARIABLE_DEF,PARAMETER_DEF"/>
|
||||
<property name="validateEnhancedForLoopVariable" value="true"/>
|
||||
</module>
|
||||
|
||||
<!-- dependencies -->
|
||||
<module name="ImportControl">
|
||||
<property name="file" value="${importControlFile}"/>
|
||||
</module>
|
||||
|
||||
<!-- whitespace -->
|
||||
<module name="GenericWhitespace"/>
|
||||
<module name="NoWhitespaceBefore"/>
|
||||
<module name="WhitespaceAfter" />
|
||||
<module name="NoWhitespaceAfter"/>
|
||||
<module name="WhitespaceAround">
|
||||
<property name="allowEmptyConstructors" value="true"/>
|
||||
<property name="allowEmptyMethods" value="true"/>
|
||||
</module>
|
||||
<module name="Indentation"/>
|
||||
<module name="MethodParamPad"/>
|
||||
<module name="ParenPad"/>
|
||||
<module name="TypecastParenPad"/>
|
||||
|
||||
<!-- locale-sensitive methods should specify locale -->
|
||||
<module name="Regexp">
|
||||
<property name="format" value="\.to(Lower|Upper)Case\(\)"/>
|
||||
<property name="illegalPattern" value="true"/>
|
||||
<property name="ignoreComments" value="true"/>
|
||||
</module>
|
||||
|
||||
<!-- code quality -->
|
||||
<module name="MethodLength"/>
|
||||
<module name="ParameterNumber">
|
||||
<!-- default is 8 -->
|
||||
<property name="max" value="13"/>
|
||||
</module>
|
||||
<module name="ClassDataAbstractionCoupling">
|
||||
<!-- default is 7 -->
|
||||
<property name="max" value="25"/>
|
||||
</module>
|
||||
<module name="BooleanExpressionComplexity">
|
||||
<!-- default is 3 -->
|
||||
<property name="max" value="5"/>
|
||||
</module>
|
||||
|
||||
<module name="ClassFanOutComplexity">
|
||||
<!-- default is 20 -->
|
||||
<property name="max" value="50"/>
|
||||
</module>
|
||||
<module name="CyclomaticComplexity">
|
||||
<!-- default is 10-->
|
||||
<property name="max" value="16"/>
|
||||
</module>
|
||||
<module name="JavaNCSS">
|
||||
<!-- default is 50 -->
|
||||
<property name="methodMaximum" value="100"/>
|
||||
</module>
|
||||
<module name="NPathComplexity">
|
||||
<!-- default is 200 -->
|
||||
<property name="max" value="500"/>
|
||||
</module>
|
||||
</module>
|
||||
|
||||
<module name="SuppressionFilter">
|
||||
<property name="file" value="${suppressionsFile}"/>
|
||||
</module>
|
||||
</module>
|
||||
56
checkstyle/import-control-core.xml
Normal file
56
checkstyle/import-control-core.xml
Normal file
@@ -0,0 +1,56 @@
|
||||
<!DOCTYPE import-control PUBLIC
|
||||
"-//Puppy Crawl//DTD Import Control 1.1//EN"
|
||||
"http://www.puppycrawl.com/dtds/import_control_1_1.dtd">
|
||||
<!--
|
||||
// Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
// contributor license agreements. See the NOTICE file distributed with
|
||||
// this work for additional information regarding copyright ownership.
|
||||
// The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance with
|
||||
// the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
-->
|
||||
|
||||
<import-control pkg="kafka">
|
||||
|
||||
<!-- THINK HARD ABOUT THE LAYERING OF THE PROJECT BEFORE CHANGING THIS FILE -->
|
||||
|
||||
<!-- common library dependencies -->
|
||||
<allow pkg="java" />
|
||||
<allow pkg="scala" />
|
||||
<allow pkg="javax.management" />
|
||||
<allow pkg="org.slf4j" />
|
||||
<allow pkg="org.junit" />
|
||||
<allow pkg="org.easymock" />
|
||||
<allow pkg="java.security" />
|
||||
<allow pkg="javax.net.ssl" />
|
||||
<allow pkg="javax.security" />
|
||||
|
||||
<allow pkg="kafka.common" />
|
||||
<allow pkg="kafka.utils" />
|
||||
<allow pkg="kafka.serializer" />
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
|
||||
<subpackage name="tools">
|
||||
<allow pkg="org.apache.kafka.clients.admin" />
|
||||
<allow pkg="kafka.admin" />
|
||||
<allow pkg="joptsimple" />
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="coordinator">
|
||||
<allow class="kafka.server.MetadataCache" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="examples">
|
||||
<allow pkg="org.apache.kafka.clients" />
|
||||
</subpackage>
|
||||
|
||||
</import-control>
|
||||
47
checkstyle/import-control-jmh-benchmarks.xml
Normal file
47
checkstyle/import-control-jmh-benchmarks.xml
Normal file
@@ -0,0 +1,47 @@
|
||||
<!DOCTYPE import-control PUBLIC
|
||||
"-//Puppy Crawl//DTD Import Control 1.1//EN"
|
||||
"http://www.puppycrawl.com/dtds/import_control_1_1.dtd">
|
||||
<!--
|
||||
// Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
// contributor license agreements. See the NOTICE file distributed with
|
||||
// this work for additional information regarding copyright ownership.
|
||||
// The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance with
|
||||
// the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
-->
|
||||
|
||||
<import-control pkg="org.apache.kafka.jmh">
|
||||
|
||||
<allow pkg="java"/>
|
||||
<allow pkg="scala"/>
|
||||
<allow pkg="javax.management"/>
|
||||
<allow pkg="org.slf4j"/>
|
||||
<allow pkg="org.openjdk.jmh.annotations"/>
|
||||
<allow pkg="org.openjdk.jmh.runner"/>
|
||||
<allow pkg="org.openjdk.jmh.infra"/>
|
||||
<allow pkg="java.security"/>
|
||||
<allow pkg="javax.net.ssl"/>
|
||||
<allow pkg="javax.security"/>
|
||||
<allow pkg="org.apache.kafka.common"/>
|
||||
<allow pkg="org.apache.kafka.clients.producer"/>
|
||||
<allow pkg="kafka.cluster"/>
|
||||
<allow pkg="kafka.log"/>
|
||||
<allow pkg="kafka.server"/>
|
||||
<allow pkg="kafka.api"/>
|
||||
<allow class="kafka.utils.Pool"/>
|
||||
<allow class="kafka.utils.KafkaScheduler"/>
|
||||
<allow class="org.apache.kafka.clients.FetchSessionHandler"/>
|
||||
<allow pkg="org.mockito"/>
|
||||
|
||||
|
||||
<subpackage name="cache">
|
||||
</subpackage>
|
||||
</import-control>
|
||||
456
checkstyle/import-control.xml
Normal file
456
checkstyle/import-control.xml
Normal file
@@ -0,0 +1,456 @@
|
||||
<!DOCTYPE import-control PUBLIC
|
||||
"-//Puppy Crawl//DTD Import Control 1.1//EN"
|
||||
"http://www.puppycrawl.com/dtds/import_control_1_1.dtd">
|
||||
<!--
|
||||
// Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
// contributor license agreements. See the NOTICE file distributed with
|
||||
// this work for additional information regarding copyright ownership.
|
||||
// The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance with
|
||||
// the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
-->
|
||||
|
||||
<import-control pkg="org.apache.kafka">
|
||||
|
||||
<!-- THINK HARD ABOUT THE LAYERING OF THE PROJECT BEFORE CHANGING THIS FILE -->
|
||||
|
||||
<!-- common library dependencies -->
|
||||
<allow pkg="java" />
|
||||
<allow pkg="javax.management" />
|
||||
<allow pkg="org.slf4j" />
|
||||
<allow pkg="org.junit" />
|
||||
<allow pkg="org.hamcrest" />
|
||||
<allow pkg="org.mockito" />
|
||||
<allow pkg="org.easymock" />
|
||||
<allow pkg="org.powermock" />
|
||||
<allow pkg="java.security" />
|
||||
<allow pkg="javax.net.ssl" />
|
||||
<allow pkg="javax.security" />
|
||||
<allow pkg="org.ietf.jgss" />
|
||||
|
||||
<!-- no one depends on the server -->
|
||||
<disallow pkg="kafka" />
|
||||
|
||||
<!-- anyone can use public classes -->
|
||||
<allow pkg="org.apache.kafka.common" exact-match="true" />
|
||||
<allow pkg="org.apache.kafka.common.security" />
|
||||
<allow pkg="org.apache.kafka.common.serialization" />
|
||||
<allow pkg="org.apache.kafka.common.utils" />
|
||||
<allow pkg="org.apache.kafka.common.errors" exact-match="true" />
|
||||
<allow pkg="org.apache.kafka.common.memory" />
|
||||
|
||||
<subpackage name="common">
|
||||
<disallow pkg="org.apache.kafka.clients" />
|
||||
<allow pkg="org.apache.kafka.common" exact-match="true" />
|
||||
<allow pkg="org.apache.kafka.common.annotation" />
|
||||
<allow pkg="org.apache.kafka.common.config" exact-match="true" />
|
||||
<allow pkg="org.apache.kafka.common.internals" exact-match="true" />
|
||||
<allow pkg="org.apache.kafka.test" />
|
||||
|
||||
<subpackage name="acl">
|
||||
<allow pkg="org.apache.kafka.common.annotation" />
|
||||
<allow pkg="org.apache.kafka.common.acl" />
|
||||
<allow pkg="org.apache.kafka.common.resource" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="config">
|
||||
<allow pkg="org.apache.kafka.common.config" />
|
||||
<!-- for testing -->
|
||||
<allow pkg="org.apache.kafka.common.metrics" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="message">
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="org.apache.kafka.common.protocol" />
|
||||
<allow pkg="org.apache.kafka.common.protocol.types" />
|
||||
<allow pkg="org.apache.kafka.common.message" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="metrics">
|
||||
<allow pkg="org.apache.kafka.common.metrics" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="memory">
|
||||
<allow pkg="org.apache.kafka.common.metrics" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="network">
|
||||
<allow pkg="org.apache.kafka.common.security.auth" />
|
||||
<allow pkg="org.apache.kafka.common.protocol" />
|
||||
<allow pkg="org.apache.kafka.common.config" />
|
||||
<allow pkg="org.apache.kafka.common.metrics" />
|
||||
<allow pkg="org.apache.kafka.common.security" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="resource">
|
||||
<allow pkg="org.apache.kafka.common.annotation" />
|
||||
<allow pkg="org.apache.kafka.common.resource" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="security">
|
||||
<allow pkg="org.apache.kafka.common.annotation" />
|
||||
<allow pkg="org.apache.kafka.common.network" />
|
||||
<allow pkg="org.apache.kafka.common.config" />
|
||||
<allow pkg="org.apache.kafka.common.protocol" />
|
||||
<allow pkg="org.apache.kafka.common.errors" />
|
||||
<subpackage name="authenticator">
|
||||
<allow pkg="org.apache.kafka.common.message" />
|
||||
<allow pkg="org.apache.kafka.common.protocol.types" />
|
||||
<allow pkg="org.apache.kafka.common.requests" />
|
||||
<allow pkg="org.apache.kafka.clients" />
|
||||
</subpackage>
|
||||
<subpackage name="scram">
|
||||
<allow pkg="javax.crypto" />
|
||||
</subpackage>
|
||||
<subpackage name="oauthbearer">
|
||||
<allow pkg="com.fasterxml.jackson.databind" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="protocol">
|
||||
<allow pkg="org.apache.kafka.common.errors" />
|
||||
<allow pkg="org.apache.kafka.common.message" />
|
||||
<allow pkg="org.apache.kafka.common.protocol" />
|
||||
<allow pkg="org.apache.kafka.common.protocol.types" />
|
||||
<allow pkg="org.apache.kafka.common.record" />
|
||||
<allow pkg="org.apache.kafka.common.requests" />
|
||||
<allow pkg="org.apache.kafka.common.resource" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="record">
|
||||
<allow pkg="net.jpountz" />
|
||||
<allow pkg="org.apache.kafka.common.header" />
|
||||
<allow pkg="org.apache.kafka.common.record" />
|
||||
<allow pkg="org.apache.kafka.common.network" />
|
||||
<allow pkg="org.apache.kafka.common.protocol" />
|
||||
<allow pkg="org.apache.kafka.common.protocol.types" />
|
||||
<allow pkg="org.apache.kafka.common.errors" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="header">
|
||||
<allow pkg="org.apache.kafka.common.header" />
|
||||
<allow pkg="org.apache.kafka.common.record" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="requests">
|
||||
<allow pkg="org.apache.kafka.common.acl" />
|
||||
<allow pkg="org.apache.kafka.common.protocol" />
|
||||
<allow pkg="org.apache.kafka.common.message" />
|
||||
<allow pkg="org.apache.kafka.common.network" />
|
||||
<allow pkg="org.apache.kafka.common.requests" />
|
||||
<allow pkg="org.apache.kafka.common.resource" />
|
||||
<allow pkg="org.apache.kafka.common.record" />
|
||||
<!-- for AuthorizableRequestContext interface -->
|
||||
<allow pkg="org.apache.kafka.server.authorizer" />
|
||||
<!-- for testing -->
|
||||
<allow pkg="org.apache.kafka.common.errors" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="serialization">
|
||||
<allow class="org.apache.kafka.common.errors.SerializationException" />
|
||||
<allow class="org.apache.kafka.common.header.Headers" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="utils">
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="clients">
|
||||
<allow pkg="org.slf4j" />
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
<allow pkg="org.apache.kafka.clients" exact-match="true"/>
|
||||
<allow pkg="org.apache.kafka.test" />
|
||||
|
||||
<subpackage name="consumer">
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="producer">
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
<allow pkg="org.apache.kafka.clients.producer" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="admin">
|
||||
<allow pkg="org.apache.kafka.clients.admin" />
|
||||
<allow pkg="org.apache.kafka.clients.consumer.internals" />
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="server">
|
||||
<allow pkg="org.slf4j" />
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
<allow pkg="org.apache.kafka.test" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="tools">
|
||||
<allow pkg="org.apache.kafka.common"/>
|
||||
<allow pkg="org.apache.kafka.clients.admin" />
|
||||
<allow pkg="org.apache.kafka.clients.producer" />
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="net.sourceforge.argparse4j" />
|
||||
<allow pkg="org.apache.log4j" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="trogdor">
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="javax.servlet" />
|
||||
<allow pkg="javax.ws.rs" />
|
||||
<allow pkg="net.sourceforge.argparse4j" />
|
||||
<allow pkg="org.apache.kafka.clients" />
|
||||
<allow pkg="org.apache.kafka.clients.admin" />
|
||||
<allow pkg="org.apache.kafka.clients.consumer" exact-match="true"/>
|
||||
<allow pkg="org.apache.kafka.clients.producer" exact-match="true"/>
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
<allow pkg="org.apache.kafka.test"/>
|
||||
<allow pkg="org.apache.kafka.trogdor" />
|
||||
<allow pkg="org.apache.log4j" />
|
||||
<allow pkg="org.eclipse.jetty" />
|
||||
<allow pkg="org.glassfish.jersey" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="message">
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="com.fasterxml.jackson.annotation" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="streams">
|
||||
<allow pkg="org.apache.kafka.common"/>
|
||||
<allow pkg="org.apache.kafka.test"/>
|
||||
<allow pkg="org.apache.kafka.clients"/>
|
||||
<allow pkg="org.apache.kafka.clients.producer" exact-match="true"/>
|
||||
<allow pkg="org.apache.kafka.clients.consumer" exact-match="true"/>
|
||||
|
||||
<allow pkg="org.apache.kafka.streams"/>
|
||||
|
||||
<subpackage name="examples">
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="org.apache.kafka.connect.json" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="perf">
|
||||
<allow pkg="com.fasterxml.jackson.databind" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="integration">
|
||||
<allow pkg="kafka.admin" />
|
||||
<allow pkg="kafka.api" />
|
||||
<allow pkg="kafka.server" />
|
||||
<allow pkg="kafka.tools" />
|
||||
<allow pkg="kafka.utils" />
|
||||
<allow pkg="kafka.log" />
|
||||
<allow pkg="scala" />
|
||||
<allow class="kafka.zk.EmbeddedZookeeper"/>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="test">
|
||||
<allow pkg="kafka.admin" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="tools">
|
||||
<allow pkg="kafka.tools" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="state">
|
||||
<allow pkg="org.rocksdb" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="processor">
|
||||
<subpackage name="internals">
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="kafka.utils" />
|
||||
<allow pkg="org.apache.zookeeper" />
|
||||
<allow pkg="org.apache.zookeeper" />
|
||||
<allow pkg="org.apache.log4j" />
|
||||
<subpackage name="testutil">
|
||||
<allow pkg="org.apache.log4j" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="jmh">
|
||||
<allow pkg="org.openjdk.jmh.annotations" />
|
||||
<allow pkg="org.openjdk.jmh.runner" />
|
||||
<allow pkg="org.openjdk.jmh.runner.options" />
|
||||
<allow pkg="org.openjdk.jmh.infra" />
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
<allow pkg="org.apache.kafka.clients" />
|
||||
<allow pkg="org.apache.kafka.streams" />
|
||||
<allow pkg="org.github.jamm" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="log4jappender">
|
||||
<allow pkg="org.apache.log4j" />
|
||||
<allow pkg="org.apache.kafka.clients" />
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
<allow pkg="org.apache.kafka.test" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="test">
|
||||
<allow pkg="org.apache.kafka" />
|
||||
<allow pkg="org.bouncycastle" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="connect">
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
<allow pkg="org.apache.kafka.connect.data" />
|
||||
<allow pkg="org.apache.kafka.connect.errors" />
|
||||
<allow pkg="org.apache.kafka.connect.header" />
|
||||
<allow pkg="org.apache.kafka.connect.components"/>
|
||||
<allow pkg="org.apache.kafka.clients" />
|
||||
<allow pkg="org.apache.kafka.test"/>
|
||||
|
||||
<subpackage name="source">
|
||||
<allow pkg="org.apache.kafka.connect.connector" />
|
||||
<allow pkg="org.apache.kafka.connect.storage" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="sink">
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
<allow pkg="org.apache.kafka.connect.connector" />
|
||||
<allow pkg="org.apache.kafka.connect.storage" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="converters">
|
||||
<allow pkg="org.apache.kafka.connect.storage" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="connector.policy">
|
||||
<allow pkg="org.apache.kafka.connect.health" />
|
||||
<allow pkg="org.apache.kafka.connect.connector" />
|
||||
<!-- for testing -->
|
||||
<allow pkg="org.apache.kafka.connect.runtime" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="rest">
|
||||
<allow pkg="org.apache.kafka.connect.health" />
|
||||
<allow pkg="javax.ws.rs" />
|
||||
<allow pkg= "javax.security.auth"/>
|
||||
<subpackage name="basic">
|
||||
<allow pkg="org.apache.kafka.connect.rest"/>
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="mirror">
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
<allow pkg="org.apache.kafka.connect.source" />
|
||||
<allow pkg="org.apache.kafka.connect.sink" />
|
||||
<allow pkg="org.apache.kafka.connect.storage" />
|
||||
<allow pkg="org.apache.kafka.connect.connector" />
|
||||
<allow pkg="org.apache.kafka.connect.runtime" />
|
||||
<allow pkg="org.apache.kafka.connect.runtime.distributed" />
|
||||
<allow pkg="org.apache.kafka.connect.util" />
|
||||
<allow pkg="org.apache.kafka.connect.converters" />
|
||||
<allow pkg="net.sourceforge.argparse4j" />
|
||||
<!-- for tests -->
|
||||
<allow pkg="org.apache.kafka.connect.integration" />
|
||||
<allow pkg="org.apache.kafka.connect.mirror" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="runtime">
|
||||
<allow pkg="org.apache.kafka.connect" />
|
||||
<allow pkg="org.reflections"/>
|
||||
<allow pkg="org.reflections.util"/>
|
||||
<allow pkg="javax.crypto"/>
|
||||
|
||||
<subpackage name="rest">
|
||||
<allow pkg="org.eclipse.jetty" />
|
||||
<allow pkg="javax.ws.rs" />
|
||||
<allow pkg="javax.servlet" />
|
||||
<allow pkg="org.glassfish.jersey" />
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="org.apache.http"/>
|
||||
<subpackage name="resources">
|
||||
<allow pkg="org.apache.log4j" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="isolation">
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="org.apache.maven.artifact.versioning" />
|
||||
<allow pkg="javax.tools" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="distributed">
|
||||
<allow pkg="javax.ws.rs.core" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="cli">
|
||||
<allow pkg="org.apache.kafka.connect.runtime" />
|
||||
<allow pkg="org.apache.kafka.connect.storage" />
|
||||
<allow pkg="org.apache.kafka.connect.util" />
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
<allow pkg="org.apache.kafka.connect.connector.policy" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="storage">
|
||||
<allow pkg="org.apache.kafka.connect" />
|
||||
<allow pkg="org.apache.kafka.common.serialization" />
|
||||
<allow pkg="javax.crypto.spec"/>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="util">
|
||||
<allow pkg="org.apache.kafka.connect" />
|
||||
<allow pkg="org.reflections.vfs" />
|
||||
<!-- for annotations to avoid code duplication -->
|
||||
<allow pkg="com.fasterxml.jackson.annotation" />
|
||||
<allow pkg="com.fasterxml.jackson.databind" />
|
||||
<subpackage name="clusters">
|
||||
<allow pkg="kafka.server" />
|
||||
<allow pkg="kafka.zk" />
|
||||
<allow pkg="kafka.utils" />
|
||||
<allow class="javax.servlet.http.HttpServletResponse" />
|
||||
<allow class="javax.ws.rs.core.Response" />
|
||||
<allow pkg="com.fasterxml.jackson.core.type" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="integration">
|
||||
<allow pkg="org.apache.kafka.connect.util.clusters" />
|
||||
<allow pkg="org.apache.kafka.connect" />
|
||||
<allow pkg="org.apache.kafka.tools" />
|
||||
<allow pkg="javax.ws.rs" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="json">
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="org.apache.kafka.common.serialization" />
|
||||
<allow pkg="org.apache.kafka.common.errors" />
|
||||
<allow pkg="org.apache.kafka.connect.storage" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="file">
|
||||
<allow pkg="org.apache.kafka.connect" />
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
<!-- for tests -->
|
||||
<allow pkg="org.easymock" />
|
||||
<allow pkg="org.powermock" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="tools">
|
||||
<allow pkg="org.apache.kafka.connect" />
|
||||
<allow pkg="org.apache.kafka.tools" />
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="transforms">
|
||||
<allow class="org.apache.kafka.connect.connector.ConnectRecord" />
|
||||
<allow class="org.apache.kafka.connect.source.SourceRecord" />
|
||||
<allow class="org.apache.kafka.connect.sink.SinkRecord" />
|
||||
<allow pkg="org.apache.kafka.connect.transforms.util" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
</import-control>
|
||||
16
checkstyle/java.header
Normal file
16
checkstyle/java.header
Normal file
@@ -0,0 +1,16 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
267
checkstyle/suppressions.xml
Normal file
267
checkstyle/suppressions.xml
Normal file
@@ -0,0 +1,267 @@
|
||||
|
||||
|
||||
<!DOCTYPE suppressions PUBLIC
|
||||
"-//Puppy Crawl//DTD Suppressions 1.1//EN"
|
||||
"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
|
||||
|
||||
<suppressions>
|
||||
|
||||
<!-- Note that [/\\] must be used as the path separator for cross-platform support -->
|
||||
|
||||
<!-- Generator -->
|
||||
<suppress checks="CyclomaticComplexity|BooleanExpressionComplexity"
|
||||
files="(SchemaGenerator|MessageDataGenerator|FieldSpec).java"/>
|
||||
<suppress checks="NPathComplexity"
|
||||
files="(MessageDataGenerator|FieldSpec).java"/>
|
||||
<suppress checks="JavaNCSS"
|
||||
files="(ApiMessageType).java|MessageDataGenerator.java"/>
|
||||
<suppress checks="MethodLength"
|
||||
files="MessageDataGenerator.java"/>
|
||||
|
||||
<!-- Clients -->
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="(Fetcher|Sender|SenderTest|ConsumerCoordinator|KafkaConsumer|KafkaProducer|Utils|TransactionManager|TransactionManagerTest|KafkaAdminClient|NetworkClient|Admin).java"/>
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="(SaslServerAuthenticator|SaslAuthenticatorTest).java"/>
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="Errors.java"/>
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="Utils.java"/>
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="AbstractRequest.java"/>
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="AbstractResponse.java"/>
|
||||
|
||||
<suppress checks="MethodLength"
|
||||
files="KerberosLogin.java|RequestResponseTest.java|ConnectMetricsRegistry.java|KafkaConsumer.java"/>
|
||||
|
||||
<suppress checks="ParameterNumber"
|
||||
files="NetworkClient.java|FieldSpec.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="KafkaConsumer.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="Fetcher.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="Sender.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="ConfigDef.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="DefaultRecordBatch.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="Sender.java"/>
|
||||
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="(KafkaConsumer|ConsumerCoordinator|Fetcher|KafkaProducer|AbstractRequest|AbstractResponse|TransactionManager|Admin|KafkaAdminClient).java"/>
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="(Errors|SaslAuthenticatorTest|AgentTest|CoordinatorTest).java"/>
|
||||
|
||||
<suppress checks="BooleanExpressionComplexity"
|
||||
files="(Utils|Topic|KafkaLZ4BlockOutputStream|AclData|JoinGroupRequest).java"/>
|
||||
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="(ConsumerCoordinator|Fetcher|Sender|KafkaProducer|BufferPool|ConfigDef|RecordAccumulator|KerberosLogin|AbstractRequest|AbstractResponse|Selector|SslFactory|SslTransportLayer|SaslClientAuthenticator|SaslClientCallbackHandler|SaslServerAuthenticator|AbstractCoordinator|TransactionManager).java"/>
|
||||
|
||||
<suppress checks="JavaNCSS"
|
||||
files="(AbstractRequest|KerberosLogin|WorkerSinkTaskTest|TransactionManagerTest|SenderTest|KafkaAdminClient|ConsumerCoordinatorTest).java"/>
|
||||
|
||||
<suppress checks="NPathComplexity"
|
||||
files="(BufferPool|Fetcher|MetricName|Node|ConfigDef|RecordBatch|SslFactory|SslTransportLayer|MetadataResponse|KerberosLogin|Selector|Sender|Serdes|TokenInformation|Agent|Values|PluginUtils|MiniTrogdorCluster|TasksRequest|KafkaProducer).java"/>
|
||||
|
||||
<suppress checks="(JavaNCSS|CyclomaticComplexity|MethodLength)"
|
||||
files="CoordinatorClient.java"/>
|
||||
<suppress checks="(UnnecessaryParentheses|BooleanExpressionComplexity|CyclomaticComplexity|WhitespaceAfter|LocalVariableName)"
|
||||
files="Murmur3.java"/>
|
||||
|
||||
<suppress checks="(NPathComplexity|ClassFanOutComplexity|CyclomaticComplexity|ClassDataAbstractionCoupling|LocalVariableName|MemberName|ParameterName|MethodLength|JavaNCSS)"
|
||||
files="clients[\\/]src[\\/](generated|generated-test)[\\/].+.java$"/>
|
||||
|
||||
<suppress checks="NPathComplexity"
|
||||
files="MessageTest.java"/>
|
||||
|
||||
<!-- clients tests -->
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="(Sender|Fetcher|KafkaConsumer|Metrics|RequestResponse|TransactionManager|KafkaAdminClient|Message|KafkaProducer)Test.java"/>
|
||||
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="(ConsumerCoordinator|KafkaConsumer|RequestResponse|Fetcher|KafkaAdminClient|Message|KafkaProducer)Test.java"/>
|
||||
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="MockAdminClient.java"/>
|
||||
|
||||
<suppress checks="JavaNCSS"
|
||||
files="RequestResponseTest.java|FetcherTest.java"/>
|
||||
|
||||
<suppress checks="NPathComplexity"
|
||||
files="MemoryRecordsTest|MetricsTest"/>
|
||||
|
||||
<suppress checks="(WhitespaceAround|LocalVariableName|ImportControl|AvoidStarImport)"
|
||||
files="Murmur3Test.java"/>
|
||||
|
||||
<!-- Connect -->
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="DistributedHerder(|Test).java"/>
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="Worker.java"/>
|
||||
<suppress checks="MethodLength"
|
||||
files="(KafkaConfigBackingStore|RequestResponseTest|WorkerSinkTaskTest).java"/>
|
||||
|
||||
<suppress checks="ParameterNumber"
|
||||
files="(WorkerSinkTask|WorkerSourceTask).java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="WorkerCoordinator.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="ConfigKeyInfo.java"/>
|
||||
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="(RestServer|AbstractHerder|DistributedHerder).java"/>
|
||||
|
||||
<suppress checks="BooleanExpressionComplexity"
|
||||
files="JsonConverter.java"/>
|
||||
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="ConnectRecord.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="JsonConverter.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="FileStreamSourceTask.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="DistributedHerder.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="KafkaConfigBackingStore.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="(Values|ConnectHeader|ConnectHeaders).java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java"/>
|
||||
|
||||
<suppress checks="JavaNCSS"
|
||||
files="KafkaConfigBackingStore.java"/>
|
||||
<suppress checks="JavaNCSS"
|
||||
files="Values.java"/>
|
||||
|
||||
<suppress checks="NPathComplexity"
|
||||
files="(DistributedHerder|RestClient|JsonConverter|KafkaConfigBackingStore|FileStreamSourceTask).java"/>
|
||||
|
||||
<suppress checks="MethodLength"
|
||||
files="Values.java"/>
|
||||
|
||||
<!-- connect tests-->
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="(DistributedHerder|KafkaBasedLog)Test.java"/>
|
||||
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="(WorkerSinkTask|WorkerSourceTask)Test.java"/>
|
||||
|
||||
<!-- Streams -->
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="(TopologyBuilder|KafkaStreams|KStreamImpl|KTableImpl|StreamThread|StreamTask).java"/>
|
||||
|
||||
<suppress checks="MethodLength"
|
||||
files="(KTableImpl|StreamsPartitionAssignor.java)"/>
|
||||
|
||||
<suppress checks="ParameterNumber"
|
||||
files="StreamTask.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="RocksDBWindowStoreSupplier.java"/>
|
||||
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="(TopologyBuilder|KStreamImpl|StreamsPartitionAssignor|KafkaStreams|KTableImpl).java"/>
|
||||
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="TopologyBuilder.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="StreamsPartitionAssignor.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="StreamThread.java"/>
|
||||
|
||||
<suppress checks="JavaNCSS"
|
||||
files="StreamsPartitionAssignor.java"/>
|
||||
|
||||
<suppress checks="NPathComplexity"
|
||||
files="(ProcessorStateManager|InternalTopologyBuilder|StreamsPartitionAssignor|StreamThread).java"/>
|
||||
|
||||
<suppress checks="(FinalLocalVariable|UnnecessaryParentheses|BooleanExpressionComplexity|CyclomaticComplexity|WhitespaceAfter|LocalVariableName)"
|
||||
files="Murmur3.java"/>
|
||||
|
||||
<!-- suppress FinalLocalVariable outside of the streams package. -->
|
||||
<suppress checks="FinalLocalVariable"
|
||||
files="^(?!.*[\\/]org[\\/]apache[\\/]kafka[\\/]streams[\\/].*$)"/>
|
||||
|
||||
<!-- generated code -->
|
||||
<suppress checks="(NPathComplexity|ClassFanOutComplexity|CyclomaticComplexity|ClassDataAbstractionCoupling|FinalLocalVariable|LocalVariableName|MemberName|ParameterName|MethodLength|JavaNCSS)"
|
||||
files="streams[\\/]src[\\/](generated|generated-test)[\\/].+.java$"/>
|
||||
|
||||
|
||||
<!-- Streams tests -->
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="(StreamThreadTest|StreamTaskTest|ProcessorTopologyTestDriver).java"/>
|
||||
|
||||
<suppress checks="MethodLength"
|
||||
files="KStreamKTableJoinIntegrationTest.java"/>
|
||||
<suppress checks="MethodLength"
|
||||
files="KStreamKStreamJoinTest.java"/>
|
||||
<suppress checks="MethodLength"
|
||||
files="KStreamWindowAggregateTest.java"/>
|
||||
<suppress checks="MethodLength"
|
||||
files="RocksDBWindowStoreTest.java"/>
|
||||
|
||||
<suppress checks="MemberName"
|
||||
files="StreamsPartitionAssignorTest.java"/>
|
||||
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files=".*[/\\]streams[/\\].*test[/\\].*.java"/>
|
||||
|
||||
<suppress checks="BooleanExpressionComplexity"
|
||||
files="SmokeTestDriver.java"/>
|
||||
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="KStreamKStreamJoinTest.java|KTableKTableForeignKeyJoinIntegrationTest.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="RelationalSmokeTest.java|SmokeTestDriver.java"/>
|
||||
|
||||
<suppress checks="JavaNCSS"
|
||||
files="KStreamKStreamJoinTest.java"/>
|
||||
<suppress checks="JavaNCSS"
|
||||
files="SmokeTestDriver.java"/>
|
||||
|
||||
<suppress checks="NPathComplexity"
|
||||
files="EosTestDriver|KStreamKStreamJoinTest.java|RelationalSmokeTest.java|SmokeTestDriver.java|KStreamKStreamLeftJoinTest.java|KTableKTableForeignKeyJoinIntegrationTest.java"/>
|
||||
|
||||
<suppress checks="(FinalLocalVariable|WhitespaceAround|LocalVariableName|ImportControl|AvoidStarImport)"
|
||||
files="Murmur3Test.java"/>
|
||||
|
||||
|
||||
<!-- Streams Test-Utils -->
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="TopologyTestDriver.java"/>
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="TopologyTestDriver.java"/>
|
||||
|
||||
<!-- Tools -->
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="VerifiableConsumer.java"/>
|
||||
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="(StreamsResetter|ProducerPerformance|Agent).java"/>
|
||||
<suppress checks="BooleanExpressionComplexity"
|
||||
files="StreamsResetter.java"/>
|
||||
<suppress checks="NPathComplexity"
|
||||
files="(ProducerPerformance|StreamsResetter|Agent|TransactionalMessageCopier).java"/>
|
||||
<suppress checks="ImportControl"
|
||||
files="SignalLogger.java"/>
|
||||
<suppress checks="IllegalImport"
|
||||
files="SignalLogger.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="ProduceBenchSpec.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="SustainedConnectionSpec.java"/>
|
||||
|
||||
<!-- Log4J-Appender -->
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="KafkaLog4jAppender.java"/>
|
||||
|
||||
<suppress checks="NPathComplexity"
|
||||
files="KafkaLog4jAppender.java"/>
|
||||
<suppress checks="JavaNCSS"
|
||||
files="RequestResponseTest.java"/>
|
||||
|
||||
</suppressions>
|
||||
1
clients/.gitignore
vendored
Normal file
1
clients/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/bin/
|
||||
@@ -0,0 +1,952 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package org.apache.kafka.common.message;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.Objects;
|
||||
import java.util.TreeMap;
|
||||
import java.util.UUID;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.protocol.ApiMessage;
|
||||
import org.apache.kafka.common.protocol.MessageUtil;
|
||||
import org.apache.kafka.common.protocol.ObjectSerializationCache;
|
||||
import org.apache.kafka.common.protocol.Readable;
|
||||
import org.apache.kafka.common.protocol.Writable;
|
||||
import org.apache.kafka.common.protocol.types.CompactArrayOf;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedField;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
import org.apache.kafka.common.utils.ByteUtils;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
import static java.util.Map.Entry;
|
||||
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
|
||||
|
||||
|
||||
public class SimpleExampleMessageData implements ApiMessage {
|
||||
private UUID processId;
|
||||
private List<Integer> myTaggedIntArray;
|
||||
private String myNullableString;
|
||||
private short myInt16;
|
||||
private double myFloat64;
|
||||
private String myString;
|
||||
private byte[] myBytes;
|
||||
private UUID taggedUuid;
|
||||
private long taggedLong;
|
||||
private ByteBuffer zeroCopyByteBuffer;
|
||||
private ByteBuffer nullableZeroCopyByteBuffer;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 =
|
||||
new Schema(
|
||||
new Field("process_id", Type.UUID, ""),
|
||||
new Field("zero_copy_byte_buffer", Type.COMPACT_BYTES, ""),
|
||||
new Field("nullable_zero_copy_byte_buffer", Type.COMPACT_NULLABLE_BYTES, ""),
|
||||
TaggedFieldsSection.of(
|
||||
0, new Field("my_tagged_int_array", new CompactArrayOf(Type.INT32), ""),
|
||||
1, new Field("my_nullable_string", Type.COMPACT_NULLABLE_STRING, ""),
|
||||
2, new Field("my_int16", Type.INT16, ""),
|
||||
3, new Field("my_float64", Type.FLOAT64, ""),
|
||||
4, new Field("my_string", Type.COMPACT_STRING, ""),
|
||||
5, new Field("my_bytes", Type.COMPACT_NULLABLE_BYTES, ""),
|
||||
6, new Field("tagged_uuid", Type.UUID, ""),
|
||||
7, new Field("tagged_long", Type.INT64, "")
|
||||
)
|
||||
);
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public SimpleExampleMessageData(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public SimpleExampleMessageData(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public SimpleExampleMessageData() {
|
||||
this.processId = MessageUtil.ZERO_UUID;
|
||||
this.myTaggedIntArray = new ArrayList<Integer>();
|
||||
this.myNullableString = null;
|
||||
this.myInt16 = (short) 123;
|
||||
this.myFloat64 = Double.parseDouble("12.34");
|
||||
this.myString = "";
|
||||
this.myBytes = Bytes.EMPTY;
|
||||
this.taggedUuid = UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367");
|
||||
this.taggedLong = 0xcafcacafcacafcaL;
|
||||
this.zeroCopyByteBuffer = ByteUtils.EMPTY_BUF;
|
||||
this.nullableZeroCopyByteBuffer = ByteUtils.EMPTY_BUF;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short apiKey() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version >= 1) {
|
||||
this.processId = _readable.readUUID();
|
||||
} else {
|
||||
this.processId = MessageUtil.ZERO_UUID;
|
||||
}
|
||||
{
|
||||
this.myTaggedIntArray = new ArrayList<Integer>();
|
||||
}
|
||||
{
|
||||
this.myNullableString = null;
|
||||
}
|
||||
this.myInt16 = (short) 123;
|
||||
this.myFloat64 = Double.parseDouble("12.34");
|
||||
{
|
||||
this.myString = "";
|
||||
}
|
||||
{
|
||||
this.myBytes = Bytes.EMPTY;
|
||||
}
|
||||
this.taggedUuid = UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367");
|
||||
this.taggedLong = 0xcafcacafcacafcaL;
|
||||
if (_version >= 1) {
|
||||
int length;
|
||||
length = _readable.readUnsignedVarint() - 1;
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field zeroCopyByteBuffer was serialized as null");
|
||||
} else {
|
||||
this.zeroCopyByteBuffer = _readable.readByteBuffer(length);
|
||||
}
|
||||
} else {
|
||||
this.zeroCopyByteBuffer = ByteUtils.EMPTY_BUF;
|
||||
}
|
||||
if (_version >= 1) {
|
||||
int length;
|
||||
length = _readable.readUnsignedVarint() - 1;
|
||||
if (length < 0) {
|
||||
this.nullableZeroCopyByteBuffer = null;
|
||||
} else {
|
||||
this.nullableZeroCopyByteBuffer = _readable.readByteBuffer(length);
|
||||
}
|
||||
} else {
|
||||
this.nullableZeroCopyByteBuffer = ByteUtils.EMPTY_BUF;
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
if (_version >= 1) {
|
||||
int _numTaggedFields = _readable.readUnsignedVarint();
|
||||
for (int _i = 0; _i < _numTaggedFields; _i++) {
|
||||
int _tag = _readable.readUnsignedVarint();
|
||||
int _size = _readable.readUnsignedVarint();
|
||||
switch (_tag) {
|
||||
case 0: {
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readUnsignedVarint() - 1;
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field myTaggedIntArray was serialized as null");
|
||||
} else {
|
||||
ArrayList<Integer> newCollection = new ArrayList<Integer>(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(_readable.readInt());
|
||||
}
|
||||
this.myTaggedIntArray = newCollection;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
int length;
|
||||
length = _readable.readUnsignedVarint() - 1;
|
||||
if (length < 0) {
|
||||
this.myNullableString = null;
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field myNullableString had invalid length " + length);
|
||||
} else {
|
||||
this.myNullableString = _readable.readString(length);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
this.myInt16 = _readable.readShort();
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
this.myFloat64 = _readable.readDouble();
|
||||
break;
|
||||
}
|
||||
case 4: {
|
||||
int length;
|
||||
length = _readable.readUnsignedVarint() - 1;
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field myString was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field myString had invalid length " + length);
|
||||
} else {
|
||||
this.myString = _readable.readString(length);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 5: {
|
||||
int length;
|
||||
length = _readable.readUnsignedVarint() - 1;
|
||||
if (length < 0) {
|
||||
this.myBytes = null;
|
||||
} else {
|
||||
byte[] newBytes = new byte[length];
|
||||
_readable.readArray(newBytes);
|
||||
this.myBytes = newBytes;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 6: {
|
||||
this.taggedUuid = _readable.readUUID();
|
||||
break;
|
||||
}
|
||||
case 7: {
|
||||
this.taggedLong = _readable.readLong();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
int _numTaggedFields = 0;
|
||||
if (_version >= 1) {
|
||||
_writable.writeUUID(processId);
|
||||
} else {
|
||||
if (processId != MessageUtil.ZERO_UUID) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default processId at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (!myTaggedIntArray.isEmpty()) {
|
||||
_numTaggedFields++;
|
||||
}
|
||||
} else {
|
||||
if (!myTaggedIntArray.isEmpty()) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default myTaggedIntArray at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (myNullableString != null) {
|
||||
_numTaggedFields++;
|
||||
}
|
||||
} else {
|
||||
if (myNullableString != null) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default myNullableString at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (myInt16 != (short) 123) {
|
||||
_numTaggedFields++;
|
||||
}
|
||||
} else {
|
||||
if (myInt16 != (short) 123) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default myInt16 at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (myFloat64 != Double.parseDouble("12.34")) {
|
||||
_numTaggedFields++;
|
||||
}
|
||||
} else {
|
||||
if (myFloat64 != Double.parseDouble("12.34")) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default myFloat64 at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (!myString.equals("")) {
|
||||
_numTaggedFields++;
|
||||
}
|
||||
} else {
|
||||
if (!myString.equals("")) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default myString at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (myBytes == null || myBytes.length != 0) {
|
||||
_numTaggedFields++;
|
||||
}
|
||||
} else {
|
||||
if (myBytes == null || myBytes.length != 0) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default myBytes at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (taggedUuid != UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367")) {
|
||||
_numTaggedFields++;
|
||||
}
|
||||
} else {
|
||||
if (taggedUuid != UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367")) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default taggedUuid at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (taggedLong != 0xcafcacafcacafcaL) {
|
||||
_numTaggedFields++;
|
||||
}
|
||||
} else {
|
||||
if (taggedLong != 0xcafcacafcacafcaL) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default taggedLong at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
_writable.writeUnsignedVarint(zeroCopyByteBuffer.remaining() + 1);
|
||||
_writable.writeByteBuffer(zeroCopyByteBuffer);
|
||||
} else {
|
||||
if (zeroCopyByteBuffer.hasRemaining()) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default zeroCopyByteBuffer at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (nullableZeroCopyByteBuffer == null) {
|
||||
_writable.writeUnsignedVarint(0);
|
||||
} else {
|
||||
_writable.writeUnsignedVarint(nullableZeroCopyByteBuffer.remaining() + 1);
|
||||
_writable.writeByteBuffer(nullableZeroCopyByteBuffer);
|
||||
}
|
||||
} else {
|
||||
if (nullableZeroCopyByteBuffer == null || nullableZeroCopyByteBuffer.remaining() > 0) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default nullableZeroCopyByteBuffer at version " + _version);
|
||||
}
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_version >= 1) {
|
||||
_writable.writeUnsignedVarint(_numTaggedFields);
|
||||
{
|
||||
if (!myTaggedIntArray.isEmpty()) {
|
||||
_writable.writeUnsignedVarint(0);
|
||||
_writable.writeUnsignedVarint(_cache.getArraySizeInBytes(this.myTaggedIntArray));
|
||||
_writable.writeUnsignedVarint(myTaggedIntArray.size() + 1);
|
||||
for (Integer myTaggedIntArrayElement : myTaggedIntArray) {
|
||||
_writable.writeInt(myTaggedIntArrayElement);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (myNullableString != null) {
|
||||
_writable.writeUnsignedVarint(1);
|
||||
byte[] _stringBytes = _cache.getSerializedValue(this.myNullableString);
|
||||
_writable.writeUnsignedVarint(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
|
||||
_writable.writeUnsignedVarint(_stringBytes.length + 1);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
{
|
||||
if (myInt16 != (short) 123) {
|
||||
_writable.writeUnsignedVarint(2);
|
||||
_writable.writeUnsignedVarint(2);
|
||||
_writable.writeShort(myInt16);
|
||||
}
|
||||
}
|
||||
{
|
||||
if (myFloat64 != Double.parseDouble("12.34")) {
|
||||
_writable.writeUnsignedVarint(3);
|
||||
_writable.writeUnsignedVarint(8);
|
||||
_writable.writeDouble(myFloat64);
|
||||
}
|
||||
}
|
||||
{
|
||||
if (!myString.equals("")) {
|
||||
_writable.writeUnsignedVarint(4);
|
||||
byte[] _stringBytes = _cache.getSerializedValue(this.myString);
|
||||
_writable.writeUnsignedVarint(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
|
||||
_writable.writeUnsignedVarint(_stringBytes.length + 1);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
}
|
||||
if (myBytes == null) {
|
||||
_writable.writeUnsignedVarint(5);
|
||||
_writable.writeUnsignedVarint(1);
|
||||
_writable.writeUnsignedVarint(0);
|
||||
} else {
|
||||
if (myBytes.length != 0) {
|
||||
_writable.writeUnsignedVarint(5);
|
||||
_writable.writeUnsignedVarint(this.myBytes.length + ByteUtils.sizeOfUnsignedVarint(this.myBytes.length + 1));
|
||||
_writable.writeUnsignedVarint(this.myBytes.length + 1);
|
||||
_writable.writeByteArray(this.myBytes);
|
||||
}
|
||||
}
|
||||
{
|
||||
if (taggedUuid != UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367")) {
|
||||
_writable.writeUnsignedVarint(6);
|
||||
_writable.writeUnsignedVarint(16);
|
||||
_writable.writeUUID(taggedUuid);
|
||||
}
|
||||
}
|
||||
{
|
||||
if (taggedLong != 0xcafcacafcacafcaL) {
|
||||
_writable.writeUnsignedVarint(7);
|
||||
_writable.writeUnsignedVarint(8);
|
||||
_writable.writeLong(taggedLong);
|
||||
}
|
||||
}
|
||||
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
|
||||
} else {
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
NavigableMap<Integer, Object> _taggedFields = null;
|
||||
this._unknownTaggedFields = null;
|
||||
if (_version >= 1) {
|
||||
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
|
||||
}
|
||||
if (_version >= 1) {
|
||||
this.processId = struct.getUUID("process_id");
|
||||
} else {
|
||||
this.processId = MessageUtil.ZERO_UUID;
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (_taggedFields.containsKey(0)) {
|
||||
Object[] _nestedObjects = (Object[]) _taggedFields.remove(0);
|
||||
this.myTaggedIntArray = new ArrayList<Integer>(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.myTaggedIntArray.add((Integer) nestedObject);
|
||||
}
|
||||
} else {
|
||||
this.myTaggedIntArray = new ArrayList<Integer>();
|
||||
}
|
||||
} else {
|
||||
this.myTaggedIntArray = new ArrayList<Integer>();
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (_taggedFields.containsKey(1)) {
|
||||
this.myNullableString = (String) _taggedFields.remove(1);
|
||||
} else {
|
||||
this.myNullableString = null;
|
||||
}
|
||||
} else {
|
||||
this.myNullableString = null;
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (_taggedFields.containsKey(2)) {
|
||||
this.myInt16 = (Short) _taggedFields.remove(2);
|
||||
} else {
|
||||
this.myInt16 = (short) 123;
|
||||
}
|
||||
} else {
|
||||
this.myInt16 = (short) 123;
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (_taggedFields.containsKey(3)) {
|
||||
this.myFloat64 = (Double) _taggedFields.remove(3);
|
||||
} else {
|
||||
this.myFloat64 = Double.parseDouble("12.34");
|
||||
}
|
||||
} else {
|
||||
this.myFloat64 = Double.parseDouble("12.34");
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (_taggedFields.containsKey(4)) {
|
||||
this.myString = (String) _taggedFields.remove(4);
|
||||
} else {
|
||||
this.myString = "";
|
||||
}
|
||||
} else {
|
||||
this.myString = "";
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (_taggedFields.containsKey(5)) {
|
||||
this.myBytes = MessageUtil.byteBufferToArray((ByteBuffer) _taggedFields.remove(5));
|
||||
} else {
|
||||
this.myBytes = Bytes.EMPTY;
|
||||
}
|
||||
} else {
|
||||
this.myBytes = Bytes.EMPTY;
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (_taggedFields.containsKey(6)) {
|
||||
this.taggedUuid = (UUID) _taggedFields.remove(6);
|
||||
} else {
|
||||
this.taggedUuid = UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367");
|
||||
}
|
||||
} else {
|
||||
this.taggedUuid = UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367");
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (_taggedFields.containsKey(7)) {
|
||||
this.taggedLong = (Long) _taggedFields.remove(7);
|
||||
} else {
|
||||
this.taggedLong = 0xcafcacafcacafcaL;
|
||||
}
|
||||
} else {
|
||||
this.taggedLong = 0xcafcacafcacafcaL;
|
||||
}
|
||||
if (_version >= 1) {
|
||||
this.zeroCopyByteBuffer = struct.getBytes("zero_copy_byte_buffer");
|
||||
} else {
|
||||
this.zeroCopyByteBuffer = ByteUtils.EMPTY_BUF;
|
||||
}
|
||||
if (_version >= 1) {
|
||||
this.nullableZeroCopyByteBuffer = struct.getBytes("nullable_zero_copy_byte_buffer");
|
||||
} else {
|
||||
this.nullableZeroCopyByteBuffer = ByteUtils.EMPTY_BUF;
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (!_taggedFields.isEmpty()) {
|
||||
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
|
||||
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
|
||||
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
if (_version >= 1) {
|
||||
_taggedFields = new TreeMap<>();
|
||||
}
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
if (_version >= 1) {
|
||||
struct.set("process_id", this.processId);
|
||||
} else {
|
||||
if (processId != MessageUtil.ZERO_UUID) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default processId at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (!myTaggedIntArray.isEmpty()) {
|
||||
Integer[] _nestedObjects = new Integer[myTaggedIntArray.size()];
|
||||
int i = 0;
|
||||
for (Integer element : this.myTaggedIntArray) {
|
||||
_nestedObjects[i++] = element;
|
||||
}
|
||||
_taggedFields.put(0, _nestedObjects);
|
||||
}
|
||||
} else {
|
||||
if (!myTaggedIntArray.isEmpty()) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default myTaggedIntArray at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (myNullableString != null) {
|
||||
_taggedFields.put(1, myNullableString);
|
||||
}
|
||||
} else {
|
||||
if (myNullableString != null) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default myNullableString at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (myInt16 != (short) 123) {
|
||||
_taggedFields.put(2, myInt16);
|
||||
}
|
||||
} else {
|
||||
if (myInt16 != (short) 123) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default myInt16 at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (myFloat64 != Double.parseDouble("12.34")) {
|
||||
_taggedFields.put(3, myFloat64);
|
||||
}
|
||||
} else {
|
||||
if (myFloat64 != Double.parseDouble("12.34")) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default myFloat64 at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (!myString.equals("")) {
|
||||
_taggedFields.put(4, myString);
|
||||
}
|
||||
} else {
|
||||
if (!myString.equals("")) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default myString at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (myBytes == null || myBytes.length != 0) {
|
||||
_taggedFields.put(5, (myBytes == null) ? null : ByteBuffer.wrap(myBytes));
|
||||
}
|
||||
} else {
|
||||
if (myBytes == null || myBytes.length != 0) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default myBytes at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (taggedUuid != UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367")) {
|
||||
_taggedFields.put(6, taggedUuid);
|
||||
}
|
||||
} else {
|
||||
if (taggedUuid != UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367")) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default taggedUuid at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (taggedLong != 0xcafcacafcacafcaL) {
|
||||
_taggedFields.put(7, taggedLong);
|
||||
}
|
||||
} else {
|
||||
if (taggedLong != 0xcafcacafcacafcaL) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default taggedLong at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
struct.set("zero_copy_byte_buffer", this.zeroCopyByteBuffer);
|
||||
} else {
|
||||
if (zeroCopyByteBuffer.hasRemaining()) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default zeroCopyByteBuffer at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
struct.set("nullable_zero_copy_byte_buffer", this.nullableZeroCopyByteBuffer);
|
||||
} else {
|
||||
if (nullableZeroCopyByteBuffer == null || nullableZeroCopyByteBuffer.remaining() > 0) {
|
||||
throw new UnsupportedVersionException("Attempted to write a non-default nullableZeroCopyByteBuffer at version " + _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
struct.set("_tagged_fields", _taggedFields);
|
||||
}
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version >= 1) {
|
||||
_size += 16;
|
||||
}
|
||||
if (_version >= 1) {
|
||||
{
|
||||
if (!myTaggedIntArray.isEmpty()) {
|
||||
_numTaggedFields++;
|
||||
_size += 1;
|
||||
int _arraySize = 0;
|
||||
_arraySize += ByteUtils.sizeOfUnsignedVarint(myTaggedIntArray.size() + 1);
|
||||
_arraySize += myTaggedIntArray.size() * 4;
|
||||
_cache.setArraySizeInBytes(myTaggedIntArray, _arraySize);
|
||||
_size += _arraySize + ByteUtils.sizeOfUnsignedVarint(_arraySize);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (myNullableString == null) {
|
||||
} else {
|
||||
_numTaggedFields++;
|
||||
_size += 1;
|
||||
byte[] _stringBytes = myNullableString.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'myNullableString' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(myNullableString, _stringBytes);
|
||||
int _stringPrefixSize = ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1);
|
||||
_size += _stringBytes.length + _stringPrefixSize + ByteUtils.sizeOfUnsignedVarint(_stringPrefixSize);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (myInt16 != (short) 123) {
|
||||
_numTaggedFields++;
|
||||
_size += 1;
|
||||
_size += 1;
|
||||
_size += 2;
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (myFloat64 != Double.parseDouble("12.34")) {
|
||||
_numTaggedFields++;
|
||||
_size += 1;
|
||||
_size += 1;
|
||||
_size += 8;
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
{
|
||||
if (!myString.equals("")) {
|
||||
_numTaggedFields++;
|
||||
_size += 1;
|
||||
byte[] _stringBytes = myString.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'myString' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(myString, _stringBytes);
|
||||
int _stringPrefixSize = ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1);
|
||||
_size += _stringBytes.length + _stringPrefixSize + ByteUtils.sizeOfUnsignedVarint(_stringPrefixSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (myBytes == null) {
|
||||
_numTaggedFields++;
|
||||
_size += 1;
|
||||
_size += 1;
|
||||
_size += 1;
|
||||
} else {
|
||||
if (myBytes.length != 0) {
|
||||
_numTaggedFields++;
|
||||
_size += 1;
|
||||
int _bytesSize = myBytes.length;
|
||||
_bytesSize += ByteUtils.sizeOfUnsignedVarint(myBytes.length + 1);
|
||||
_size += _bytesSize + ByteUtils.sizeOfUnsignedVarint(_bytesSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (taggedUuid != UUID.fromString("212d5494-4a8b-4fdf-94b3-88b470beb367")) {
|
||||
_numTaggedFields++;
|
||||
_size += 1;
|
||||
_size += 1;
|
||||
_size += 16;
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (taggedLong != 0xcafcacafcacafcaL) {
|
||||
_numTaggedFields++;
|
||||
_size += 1;
|
||||
_size += 1;
|
||||
_size += 8;
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
{
|
||||
int _bytesSize = zeroCopyByteBuffer.remaining();
|
||||
_bytesSize += ByteUtils.sizeOfUnsignedVarint(zeroCopyByteBuffer.remaining() + 1);
|
||||
_size += _bytesSize;
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
if (nullableZeroCopyByteBuffer == null) {
|
||||
_size += 1;
|
||||
} else {
|
||||
int _bytesSize = nullableZeroCopyByteBuffer.remaining();
|
||||
_bytesSize += ByteUtils.sizeOfUnsignedVarint(nullableZeroCopyByteBuffer.remaining() + 1);
|
||||
_size += _bytesSize;
|
||||
}
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
|
||||
} else {
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof SimpleExampleMessageData)) return false;
|
||||
SimpleExampleMessageData other = (SimpleExampleMessageData) obj;
|
||||
if (!this.processId.equals(other.processId)) return false;
|
||||
if (this.myTaggedIntArray == null) {
|
||||
if (other.myTaggedIntArray != null) return false;
|
||||
} else {
|
||||
if (!this.myTaggedIntArray.equals(other.myTaggedIntArray)) return false;
|
||||
}
|
||||
if (this.myNullableString == null) {
|
||||
if (other.myNullableString != null) return false;
|
||||
} else {
|
||||
if (!this.myNullableString.equals(other.myNullableString)) return false;
|
||||
}
|
||||
if (myInt16 != other.myInt16) return false;
|
||||
if (myFloat64 != other.myFloat64) return false;
|
||||
if (this.myString == null) {
|
||||
if (other.myString != null) return false;
|
||||
} else {
|
||||
if (!this.myString.equals(other.myString)) return false;
|
||||
}
|
||||
if (!Arrays.equals(this.myBytes, other.myBytes)) return false;
|
||||
if (!this.taggedUuid.equals(other.taggedUuid)) return false;
|
||||
if (taggedLong != other.taggedLong) return false;
|
||||
if (!Objects.equals(this.zeroCopyByteBuffer, other.zeroCopyByteBuffer)) return false;
|
||||
if (!Objects.equals(this.nullableZeroCopyByteBuffer, other.nullableZeroCopyByteBuffer)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + processId.hashCode();
|
||||
hashCode = 31 * hashCode + (myTaggedIntArray == null ? 0 : myTaggedIntArray.hashCode());
|
||||
hashCode = 31 * hashCode + (myNullableString == null ? 0 : myNullableString.hashCode());
|
||||
hashCode = 31 * hashCode + myInt16;
|
||||
hashCode = 31 * hashCode + Double.hashCode(myFloat64);
|
||||
hashCode = 31 * hashCode + (myString == null ? 0 : myString.hashCode());
|
||||
hashCode = 31 * hashCode + Arrays.hashCode(myBytes);
|
||||
hashCode = 31 * hashCode + taggedUuid.hashCode();
|
||||
hashCode = 31 * hashCode + ((int) (taggedLong >> 32) ^ (int) taggedLong);
|
||||
hashCode = 31 * hashCode + Objects.hashCode(zeroCopyByteBuffer);
|
||||
hashCode = 31 * hashCode + Objects.hashCode(nullableZeroCopyByteBuffer);
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SimpleExampleMessageData("
|
||||
+ ", myTaggedIntArray=" + MessageUtil.deepToString(myTaggedIntArray.iterator())
|
||||
+ ", myNullableString=" + ((myNullableString == null) ? "null" : "'" + myNullableString.toString() + "'")
|
||||
+ ", myInt16=" + myInt16
|
||||
+ ", myFloat64=" + myFloat64
|
||||
+ ", myString=" + ((myString == null) ? "null" : "'" + myString.toString() + "'")
|
||||
+ ", myBytes=" + Arrays.toString(myBytes)
|
||||
+ ", taggedLong=" + taggedLong
|
||||
+ ", zeroCopyByteBuffer=" + zeroCopyByteBuffer
|
||||
+ ", nullableZeroCopyByteBuffer=" + nullableZeroCopyByteBuffer
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public UUID processId() {
|
||||
return this.processId;
|
||||
}
|
||||
|
||||
public List<Integer> myTaggedIntArray() {
|
||||
return this.myTaggedIntArray;
|
||||
}
|
||||
|
||||
public String myNullableString() {
|
||||
return this.myNullableString;
|
||||
}
|
||||
|
||||
public short myInt16() {
|
||||
return this.myInt16;
|
||||
}
|
||||
|
||||
public double myFloat64() {
|
||||
return this.myFloat64;
|
||||
}
|
||||
|
||||
public String myString() {
|
||||
return this.myString;
|
||||
}
|
||||
|
||||
public byte[] myBytes() {
|
||||
return this.myBytes;
|
||||
}
|
||||
|
||||
public UUID taggedUuid() {
|
||||
return this.taggedUuid;
|
||||
}
|
||||
|
||||
public long taggedLong() {
|
||||
return this.taggedLong;
|
||||
}
|
||||
|
||||
public ByteBuffer zeroCopyByteBuffer() {
|
||||
return this.zeroCopyByteBuffer;
|
||||
}
|
||||
|
||||
public ByteBuffer nullableZeroCopyByteBuffer() {
|
||||
return this.nullableZeroCopyByteBuffer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public SimpleExampleMessageData setProcessId(UUID v) {
|
||||
this.processId = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public SimpleExampleMessageData setMyTaggedIntArray(List<Integer> v) {
|
||||
this.myTaggedIntArray = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public SimpleExampleMessageData setMyNullableString(String v) {
|
||||
this.myNullableString = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public SimpleExampleMessageData setMyInt16(short v) {
|
||||
this.myInt16 = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public SimpleExampleMessageData setMyFloat64(double v) {
|
||||
this.myFloat64 = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public SimpleExampleMessageData setMyString(String v) {
|
||||
this.myString = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public SimpleExampleMessageData setMyBytes(byte[] v) {
|
||||
this.myBytes = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public SimpleExampleMessageData setTaggedUuid(UUID v) {
|
||||
this.taggedUuid = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public SimpleExampleMessageData setTaggedLong(long v) {
|
||||
this.taggedLong = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public SimpleExampleMessageData setZeroCopyByteBuffer(ByteBuffer v) {
|
||||
this.zeroCopyByteBuffer = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public SimpleExampleMessageData setNullableZeroCopyByteBuffer(ByteBuffer v) {
|
||||
this.nullableZeroCopyByteBuffer = v;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,281 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package org.apache.kafka.common.message;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.protocol.ApiMessage;
|
||||
import org.apache.kafka.common.protocol.ObjectSerializationCache;
|
||||
import org.apache.kafka.common.protocol.Readable;
|
||||
import org.apache.kafka.common.protocol.Writable;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedField;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
import org.apache.kafka.common.utils.ByteUtils;
|
||||
|
||||
|
||||
public class AddOffsetsToTxnRequestData implements ApiMessage {
|
||||
private String transactionalId;
|
||||
private long producerId;
|
||||
private short producerEpoch;
|
||||
private String groupId;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("transactional_id", Type.STRING, "The transactional id corresponding to the transaction."),
|
||||
new Field("producer_id", Type.INT64, "Current producer id in use by the transactional id."),
|
||||
new Field("producer_epoch", Type.INT16, "Current epoch associated with the producer id."),
|
||||
new Field("group_id", Type.STRING, "The unique group identifier.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AddOffsetsToTxnRequestData(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public AddOffsetsToTxnRequestData(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public AddOffsetsToTxnRequestData() {
|
||||
this.transactionalId = "";
|
||||
this.producerId = 0L;
|
||||
this.producerEpoch = (short) 0;
|
||||
this.groupId = "";
|
||||
}
|
||||
|
||||
@Override
|
||||
public short apiKey() {
|
||||
return 25;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
{
|
||||
int length;
|
||||
length = _readable.readShort();
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field transactionalId was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field transactionalId had invalid length " + length);
|
||||
} else {
|
||||
this.transactionalId = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
this.producerId = _readable.readLong();
|
||||
this.producerEpoch = _readable.readShort();
|
||||
{
|
||||
int length;
|
||||
length = _readable.readShort();
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field groupId was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field groupId had invalid length " + length);
|
||||
} else {
|
||||
this.groupId = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
int _numTaggedFields = 0;
|
||||
{
|
||||
byte[] _stringBytes = _cache.getSerializedValue(transactionalId);
|
||||
_writable.writeShort((short) _stringBytes.length);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
_writable.writeLong(producerId);
|
||||
_writable.writeShort(producerEpoch);
|
||||
{
|
||||
byte[] _stringBytes = _cache.getSerializedValue(groupId);
|
||||
_writable.writeShort((short) _stringBytes.length);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
this._unknownTaggedFields = null;
|
||||
this.transactionalId = struct.getString("transactional_id");
|
||||
this.producerId = struct.getLong("producer_id");
|
||||
this.producerEpoch = struct.getShort("producer_epoch");
|
||||
this.groupId = struct.getString("group_id");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("transactional_id", this.transactionalId);
|
||||
struct.set("producer_id", this.producerId);
|
||||
struct.set("producer_epoch", this.producerEpoch);
|
||||
struct.set("group_id", this.groupId);
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
{
|
||||
byte[] _stringBytes = transactionalId.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'transactionalId' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(transactionalId, _stringBytes);
|
||||
_size += _stringBytes.length + 2;
|
||||
}
|
||||
_size += 8;
|
||||
_size += 2;
|
||||
{
|
||||
byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'groupId' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(groupId, _stringBytes);
|
||||
_size += _stringBytes.length + 2;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AddOffsetsToTxnRequestData)) return false;
|
||||
AddOffsetsToTxnRequestData other = (AddOffsetsToTxnRequestData) obj;
|
||||
if (this.transactionalId == null) {
|
||||
if (other.transactionalId != null) return false;
|
||||
} else {
|
||||
if (!this.transactionalId.equals(other.transactionalId)) return false;
|
||||
}
|
||||
if (producerId != other.producerId) return false;
|
||||
if (producerEpoch != other.producerEpoch) return false;
|
||||
if (this.groupId == null) {
|
||||
if (other.groupId != null) return false;
|
||||
} else {
|
||||
if (!this.groupId.equals(other.groupId)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + (transactionalId == null ? 0 : transactionalId.hashCode());
|
||||
hashCode = 31 * hashCode + ((int) (producerId >> 32) ^ (int) producerId);
|
||||
hashCode = 31 * hashCode + producerEpoch;
|
||||
hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AddOffsetsToTxnRequestData("
|
||||
+ "transactionalId=" + ((transactionalId == null) ? "null" : "'" + transactionalId.toString() + "'")
|
||||
+ ", producerId=" + producerId
|
||||
+ ", producerEpoch=" + producerEpoch
|
||||
+ ", groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'")
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public String transactionalId() {
|
||||
return this.transactionalId;
|
||||
}
|
||||
|
||||
public long producerId() {
|
||||
return this.producerId;
|
||||
}
|
||||
|
||||
public short producerEpoch() {
|
||||
return this.producerEpoch;
|
||||
}
|
||||
|
||||
public String groupId() {
|
||||
return this.groupId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AddOffsetsToTxnRequestData setTransactionalId(String v) {
|
||||
this.transactionalId = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AddOffsetsToTxnRequestData setProducerId(long v) {
|
||||
this.producerId = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AddOffsetsToTxnRequestData setProducerEpoch(short v) {
|
||||
this.producerEpoch = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AddOffsetsToTxnRequestData setGroupId(String v) {
|
||||
this.groupId = v;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,190 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package org.apache.kafka.common.message;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.protocol.ApiMessage;
|
||||
import org.apache.kafka.common.protocol.ObjectSerializationCache;
|
||||
import org.apache.kafka.common.protocol.Readable;
|
||||
import org.apache.kafka.common.protocol.Writable;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedField;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
import org.apache.kafka.common.utils.ByteUtils;
|
||||
|
||||
|
||||
public class AddOffsetsToTxnResponseData implements ApiMessage {
|
||||
private int throttleTimeMs;
|
||||
private short errorCode;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
|
||||
new Field("error_code", Type.INT16, "The response error code, or 0 if there was no error.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AddOffsetsToTxnResponseData(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public AddOffsetsToTxnResponseData(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public AddOffsetsToTxnResponseData() {
|
||||
this.throttleTimeMs = 0;
|
||||
this.errorCode = (short) 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short apiKey() {
|
||||
return 25;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
this.throttleTimeMs = _readable.readInt();
|
||||
this.errorCode = _readable.readShort();
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeInt(throttleTimeMs);
|
||||
_writable.writeShort(errorCode);
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
this._unknownTaggedFields = null;
|
||||
this.throttleTimeMs = struct.getInt("throttle_time_ms");
|
||||
this.errorCode = struct.getShort("error_code");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("throttle_time_ms", this.throttleTimeMs);
|
||||
struct.set("error_code", this.errorCode);
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
_size += 4;
|
||||
_size += 2;
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AddOffsetsToTxnResponseData)) return false;
|
||||
AddOffsetsToTxnResponseData other = (AddOffsetsToTxnResponseData) obj;
|
||||
if (throttleTimeMs != other.throttleTimeMs) return false;
|
||||
if (errorCode != other.errorCode) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + throttleTimeMs;
|
||||
hashCode = 31 * hashCode + errorCode;
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AddOffsetsToTxnResponseData("
|
||||
+ "throttleTimeMs=" + throttleTimeMs
|
||||
+ ", errorCode=" + errorCode
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public int throttleTimeMs() {
|
||||
return this.throttleTimeMs;
|
||||
}
|
||||
|
||||
public short errorCode() {
|
||||
return this.errorCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AddOffsetsToTxnResponseData setThrottleTimeMs(int v) {
|
||||
this.throttleTimeMs = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AddOffsetsToTxnResponseData setErrorCode(short v) {
|
||||
this.errorCode = v;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,576 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package org.apache.kafka.common.message;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.protocol.ApiMessage;
|
||||
import org.apache.kafka.common.protocol.Message;
|
||||
import org.apache.kafka.common.protocol.MessageUtil;
|
||||
import org.apache.kafka.common.protocol.ObjectSerializationCache;
|
||||
import org.apache.kafka.common.protocol.Readable;
|
||||
import org.apache.kafka.common.protocol.Writable;
|
||||
import org.apache.kafka.common.protocol.types.ArrayOf;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedField;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
import org.apache.kafka.common.utils.ByteUtils;
|
||||
import org.apache.kafka.common.utils.ImplicitLinkedHashCollection;
|
||||
import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection;
|
||||
|
||||
|
||||
public class AddPartitionsToTxnRequestData implements ApiMessage {
|
||||
private String transactionalId;
|
||||
private long producerId;
|
||||
private short producerEpoch;
|
||||
private AddPartitionsToTxnTopicCollection topics;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("transactional_id", Type.STRING, "The transactional id corresponding to the transaction."),
|
||||
new Field("producer_id", Type.INT64, "Current producer id in use by the transactional id."),
|
||||
new Field("producer_epoch", Type.INT16, "Current epoch associated with the producer id."),
|
||||
new Field("topics", new ArrayOf(AddPartitionsToTxnTopic.SCHEMA_0), "The partitions to add to the transation.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AddPartitionsToTxnRequestData(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnRequestData(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnRequestData() {
|
||||
this.transactionalId = "";
|
||||
this.producerId = 0L;
|
||||
this.producerEpoch = (short) 0;
|
||||
this.topics = new AddPartitionsToTxnTopicCollection(0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public short apiKey() {
|
||||
return 24;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
{
|
||||
int length;
|
||||
length = _readable.readShort();
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field transactionalId was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field transactionalId had invalid length " + length);
|
||||
} else {
|
||||
this.transactionalId = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
this.producerId = _readable.readLong();
|
||||
this.producerEpoch = _readable.readShort();
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readInt();
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field topics was serialized as null");
|
||||
} else {
|
||||
AddPartitionsToTxnTopicCollection newCollection = new AddPartitionsToTxnTopicCollection(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new AddPartitionsToTxnTopic(_readable, _version));
|
||||
}
|
||||
this.topics = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
int _numTaggedFields = 0;
|
||||
{
|
||||
byte[] _stringBytes = _cache.getSerializedValue(transactionalId);
|
||||
_writable.writeShort((short) _stringBytes.length);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
_writable.writeLong(producerId);
|
||||
_writable.writeShort(producerEpoch);
|
||||
_writable.writeInt(topics.size());
|
||||
for (AddPartitionsToTxnTopic topicsElement : topics) {
|
||||
topicsElement.write(_writable, _cache, _version);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
this._unknownTaggedFields = null;
|
||||
this.transactionalId = struct.getString("transactional_id");
|
||||
this.producerId = struct.getLong("producer_id");
|
||||
this.producerEpoch = struct.getShort("producer_epoch");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("topics");
|
||||
this.topics = new AddPartitionsToTxnTopicCollection(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.topics.add(new AddPartitionsToTxnTopic((Struct) nestedObject, _version));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("transactional_id", this.transactionalId);
|
||||
struct.set("producer_id", this.producerId);
|
||||
struct.set("producer_epoch", this.producerEpoch);
|
||||
{
|
||||
Struct[] _nestedObjects = new Struct[topics.size()];
|
||||
int i = 0;
|
||||
for (AddPartitionsToTxnTopic element : this.topics) {
|
||||
_nestedObjects[i++] = element.toStruct(_version);
|
||||
}
|
||||
struct.set("topics", (Object[]) _nestedObjects);
|
||||
}
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
{
|
||||
byte[] _stringBytes = transactionalId.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'transactionalId' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(transactionalId, _stringBytes);
|
||||
_size += _stringBytes.length + 2;
|
||||
}
|
||||
_size += 8;
|
||||
_size += 2;
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += 4;
|
||||
for (AddPartitionsToTxnTopic topicsElement : topics) {
|
||||
_arraySize += topicsElement.size(_cache, _version);
|
||||
}
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AddPartitionsToTxnRequestData)) return false;
|
||||
AddPartitionsToTxnRequestData other = (AddPartitionsToTxnRequestData) obj;
|
||||
if (this.transactionalId == null) {
|
||||
if (other.transactionalId != null) return false;
|
||||
} else {
|
||||
if (!this.transactionalId.equals(other.transactionalId)) return false;
|
||||
}
|
||||
if (producerId != other.producerId) return false;
|
||||
if (producerEpoch != other.producerEpoch) return false;
|
||||
if (this.topics == null) {
|
||||
if (other.topics != null) return false;
|
||||
} else {
|
||||
if (!this.topics.equals(other.topics)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + (transactionalId == null ? 0 : transactionalId.hashCode());
|
||||
hashCode = 31 * hashCode + ((int) (producerId >> 32) ^ (int) producerId);
|
||||
hashCode = 31 * hashCode + producerEpoch;
|
||||
hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AddPartitionsToTxnRequestData("
|
||||
+ "transactionalId=" + ((transactionalId == null) ? "null" : "'" + transactionalId.toString() + "'")
|
||||
+ ", producerId=" + producerId
|
||||
+ ", producerEpoch=" + producerEpoch
|
||||
+ ", topics=" + MessageUtil.deepToString(topics.iterator())
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public String transactionalId() {
|
||||
return this.transactionalId;
|
||||
}
|
||||
|
||||
public long producerId() {
|
||||
return this.producerId;
|
||||
}
|
||||
|
||||
public short producerEpoch() {
|
||||
return this.producerEpoch;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopicCollection topics() {
|
||||
return this.topics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnRequestData setTransactionalId(String v) {
|
||||
this.transactionalId = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnRequestData setProducerId(long v) {
|
||||
this.producerId = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnRequestData setProducerEpoch(short v) {
|
||||
this.producerEpoch = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnRequestData setTopics(AddPartitionsToTxnTopicCollection v) {
|
||||
this.topics = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
static public class AddPartitionsToTxnTopic implements Message, ImplicitLinkedHashMultiCollection.Element {
|
||||
private String name;
|
||||
private List<Integer> partitions;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
private int next;
|
||||
private int prev;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("name", Type.STRING, "The name of the topic."),
|
||||
new Field("partitions", new ArrayOf(Type.INT32), "The partition indexes to add to the transaction")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AddPartitionsToTxnTopic(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopic(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopic() {
|
||||
this.name = "";
|
||||
this.partitions = new ArrayList<Integer>();
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnTopic");
|
||||
}
|
||||
{
|
||||
int length;
|
||||
length = _readable.readShort();
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field name was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field name had invalid length " + length);
|
||||
} else {
|
||||
this.name = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readInt();
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field partitions was serialized as null");
|
||||
} else {
|
||||
ArrayList<Integer> newCollection = new ArrayList<Integer>(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(_readable.readInt());
|
||||
}
|
||||
this.partitions = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnTopic");
|
||||
}
|
||||
int _numTaggedFields = 0;
|
||||
{
|
||||
byte[] _stringBytes = _cache.getSerializedValue(name);
|
||||
_writable.writeShort((short) _stringBytes.length);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
_writable.writeInt(partitions.size());
|
||||
for (Integer partitionsElement : partitions) {
|
||||
_writable.writeInt(partitionsElement);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnTopic");
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
this.name = struct.getString("name");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("partitions");
|
||||
this.partitions = new ArrayList<Integer>(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.partitions.add((Integer) nestedObject);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnTopic");
|
||||
}
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("name", this.name);
|
||||
{
|
||||
Integer[] _nestedObjects = new Integer[partitions.size()];
|
||||
int i = 0;
|
||||
for (Integer element : this.partitions) {
|
||||
_nestedObjects[i++] = element;
|
||||
}
|
||||
struct.set("partitions", (Object[]) _nestedObjects);
|
||||
}
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't size version " + _version + " of AddPartitionsToTxnTopic");
|
||||
}
|
||||
{
|
||||
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'name' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(name, _stringBytes);
|
||||
_size += _stringBytes.length + 2;
|
||||
}
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += 4;
|
||||
_arraySize += partitions.size() * 4;
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AddPartitionsToTxnTopic)) return false;
|
||||
AddPartitionsToTxnTopic other = (AddPartitionsToTxnTopic) obj;
|
||||
if (this.name == null) {
|
||||
if (other.name != null) return false;
|
||||
} else {
|
||||
if (!this.name.equals(other.name)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AddPartitionsToTxnTopic("
|
||||
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
|
||||
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public List<Integer> partitions() {
|
||||
return this.partitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int next() {
|
||||
return this.next;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int prev() {
|
||||
return this.prev;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopic setName(String v) {
|
||||
this.name = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopic setPartitions(List<Integer> v) {
|
||||
this.partitions = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNext(int v) {
|
||||
this.next = v;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setPrev(int v) {
|
||||
this.prev = v;
|
||||
}
|
||||
}
|
||||
|
||||
public static class AddPartitionsToTxnTopicCollection extends ImplicitLinkedHashMultiCollection<AddPartitionsToTxnTopic> {
|
||||
public AddPartitionsToTxnTopicCollection() {
|
||||
super();
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopicCollection(int expectedNumElements) {
|
||||
super(expectedNumElements);
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopicCollection(Iterator<AddPartitionsToTxnTopic> iterator) {
|
||||
super(iterator);
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopic find(String name) {
|
||||
AddPartitionsToTxnTopic _key = new AddPartitionsToTxnTopic();
|
||||
_key.setName(name);
|
||||
return find(_key);
|
||||
}
|
||||
|
||||
public List<AddPartitionsToTxnTopic> findAll(String name) {
|
||||
AddPartitionsToTxnTopic _key = new AddPartitionsToTxnTopic();
|
||||
_key.setName(name);
|
||||
return findAll(_key);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,730 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package org.apache.kafka.common.message;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.protocol.ApiMessage;
|
||||
import org.apache.kafka.common.protocol.Message;
|
||||
import org.apache.kafka.common.protocol.MessageUtil;
|
||||
import org.apache.kafka.common.protocol.ObjectSerializationCache;
|
||||
import org.apache.kafka.common.protocol.Readable;
|
||||
import org.apache.kafka.common.protocol.Writable;
|
||||
import org.apache.kafka.common.protocol.types.ArrayOf;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedField;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
import org.apache.kafka.common.utils.ByteUtils;
|
||||
import org.apache.kafka.common.utils.ImplicitLinkedHashCollection;
|
||||
import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection;
|
||||
|
||||
|
||||
public class AddPartitionsToTxnResponseData implements ApiMessage {
|
||||
private int throttleTimeMs;
|
||||
private AddPartitionsToTxnTopicResultCollection results;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
|
||||
new Field("results", new ArrayOf(AddPartitionsToTxnTopicResult.SCHEMA_0), "The results for each topic.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AddPartitionsToTxnResponseData(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnResponseData(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnResponseData() {
|
||||
this.throttleTimeMs = 0;
|
||||
this.results = new AddPartitionsToTxnTopicResultCollection(0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public short apiKey() {
|
||||
return 24;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
this.throttleTimeMs = _readable.readInt();
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readInt();
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field results was serialized as null");
|
||||
} else {
|
||||
AddPartitionsToTxnTopicResultCollection newCollection = new AddPartitionsToTxnTopicResultCollection(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new AddPartitionsToTxnTopicResult(_readable, _version));
|
||||
}
|
||||
this.results = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeInt(throttleTimeMs);
|
||||
_writable.writeInt(results.size());
|
||||
for (AddPartitionsToTxnTopicResult resultsElement : results) {
|
||||
resultsElement.write(_writable, _cache, _version);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
this._unknownTaggedFields = null;
|
||||
this.throttleTimeMs = struct.getInt("throttle_time_ms");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("results");
|
||||
this.results = new AddPartitionsToTxnTopicResultCollection(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.results.add(new AddPartitionsToTxnTopicResult((Struct) nestedObject, _version));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("throttle_time_ms", this.throttleTimeMs);
|
||||
{
|
||||
Struct[] _nestedObjects = new Struct[results.size()];
|
||||
int i = 0;
|
||||
for (AddPartitionsToTxnTopicResult element : this.results) {
|
||||
_nestedObjects[i++] = element.toStruct(_version);
|
||||
}
|
||||
struct.set("results", (Object[]) _nestedObjects);
|
||||
}
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
_size += 4;
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += 4;
|
||||
for (AddPartitionsToTxnTopicResult resultsElement : results) {
|
||||
_arraySize += resultsElement.size(_cache, _version);
|
||||
}
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AddPartitionsToTxnResponseData)) return false;
|
||||
AddPartitionsToTxnResponseData other = (AddPartitionsToTxnResponseData) obj;
|
||||
if (throttleTimeMs != other.throttleTimeMs) return false;
|
||||
if (this.results == null) {
|
||||
if (other.results != null) return false;
|
||||
} else {
|
||||
if (!this.results.equals(other.results)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + throttleTimeMs;
|
||||
hashCode = 31 * hashCode + (results == null ? 0 : results.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AddPartitionsToTxnResponseData("
|
||||
+ "throttleTimeMs=" + throttleTimeMs
|
||||
+ ", results=" + MessageUtil.deepToString(results.iterator())
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public int throttleTimeMs() {
|
||||
return this.throttleTimeMs;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopicResultCollection results() {
|
||||
return this.results;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnResponseData setThrottleTimeMs(int v) {
|
||||
this.throttleTimeMs = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnResponseData setResults(AddPartitionsToTxnTopicResultCollection v) {
|
||||
this.results = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
static public class AddPartitionsToTxnTopicResult implements Message, ImplicitLinkedHashMultiCollection.Element {
|
||||
private String name;
|
||||
private AddPartitionsToTxnPartitionResultCollection results;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
private int next;
|
||||
private int prev;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("name", Type.STRING, "The topic name."),
|
||||
new Field("results", new ArrayOf(AddPartitionsToTxnPartitionResult.SCHEMA_0), "The results for each partition")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AddPartitionsToTxnTopicResult(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopicResult(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopicResult() {
|
||||
this.name = "";
|
||||
this.results = new AddPartitionsToTxnPartitionResultCollection(0);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnTopicResult");
|
||||
}
|
||||
{
|
||||
int length;
|
||||
length = _readable.readShort();
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field name was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field name had invalid length " + length);
|
||||
} else {
|
||||
this.name = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readInt();
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field results was serialized as null");
|
||||
} else {
|
||||
AddPartitionsToTxnPartitionResultCollection newCollection = new AddPartitionsToTxnPartitionResultCollection(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new AddPartitionsToTxnPartitionResult(_readable, _version));
|
||||
}
|
||||
this.results = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnTopicResult");
|
||||
}
|
||||
int _numTaggedFields = 0;
|
||||
{
|
||||
byte[] _stringBytes = _cache.getSerializedValue(name);
|
||||
_writable.writeShort((short) _stringBytes.length);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
_writable.writeInt(results.size());
|
||||
for (AddPartitionsToTxnPartitionResult resultsElement : results) {
|
||||
resultsElement.write(_writable, _cache, _version);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnTopicResult");
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
this.name = struct.getString("name");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("results");
|
||||
this.results = new AddPartitionsToTxnPartitionResultCollection(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.results.add(new AddPartitionsToTxnPartitionResult((Struct) nestedObject, _version));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnTopicResult");
|
||||
}
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("name", this.name);
|
||||
{
|
||||
Struct[] _nestedObjects = new Struct[results.size()];
|
||||
int i = 0;
|
||||
for (AddPartitionsToTxnPartitionResult element : this.results) {
|
||||
_nestedObjects[i++] = element.toStruct(_version);
|
||||
}
|
||||
struct.set("results", (Object[]) _nestedObjects);
|
||||
}
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't size version " + _version + " of AddPartitionsToTxnTopicResult");
|
||||
}
|
||||
{
|
||||
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'name' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(name, _stringBytes);
|
||||
_size += _stringBytes.length + 2;
|
||||
}
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += 4;
|
||||
for (AddPartitionsToTxnPartitionResult resultsElement : results) {
|
||||
_arraySize += resultsElement.size(_cache, _version);
|
||||
}
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AddPartitionsToTxnTopicResult)) return false;
|
||||
AddPartitionsToTxnTopicResult other = (AddPartitionsToTxnTopicResult) obj;
|
||||
if (this.name == null) {
|
||||
if (other.name != null) return false;
|
||||
} else {
|
||||
if (!this.name.equals(other.name)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AddPartitionsToTxnTopicResult("
|
||||
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
|
||||
+ ", results=" + MessageUtil.deepToString(results.iterator())
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnPartitionResultCollection results() {
|
||||
return this.results;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int next() {
|
||||
return this.next;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int prev() {
|
||||
return this.prev;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopicResult setName(String v) {
|
||||
this.name = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopicResult setResults(AddPartitionsToTxnPartitionResultCollection v) {
|
||||
this.results = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNext(int v) {
|
||||
this.next = v;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setPrev(int v) {
|
||||
this.prev = v;
|
||||
}
|
||||
}
|
||||
|
||||
static public class AddPartitionsToTxnPartitionResult implements Message, ImplicitLinkedHashMultiCollection.Element {
|
||||
private int partitionIndex;
|
||||
private short errorCode;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
private int next;
|
||||
private int prev;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("partition_index", Type.INT32, "The partition indexes."),
|
||||
new Field("error_code", Type.INT16, "The response error code.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AddPartitionsToTxnPartitionResult(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnPartitionResult(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnPartitionResult() {
|
||||
this.partitionIndex = 0;
|
||||
this.errorCode = (short) 0;
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnPartitionResult");
|
||||
}
|
||||
this.partitionIndex = _readable.readInt();
|
||||
this.errorCode = _readable.readShort();
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnPartitionResult");
|
||||
}
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeInt(partitionIndex);
|
||||
_writable.writeShort(errorCode);
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnPartitionResult");
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
this.partitionIndex = struct.getInt("partition_index");
|
||||
this.errorCode = struct.getShort("error_code");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnPartitionResult");
|
||||
}
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("partition_index", this.partitionIndex);
|
||||
struct.set("error_code", this.errorCode);
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't size version " + _version + " of AddPartitionsToTxnPartitionResult");
|
||||
}
|
||||
_size += 4;
|
||||
_size += 2;
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AddPartitionsToTxnPartitionResult)) return false;
|
||||
AddPartitionsToTxnPartitionResult other = (AddPartitionsToTxnPartitionResult) obj;
|
||||
if (partitionIndex != other.partitionIndex) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + partitionIndex;
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AddPartitionsToTxnPartitionResult("
|
||||
+ "partitionIndex=" + partitionIndex
|
||||
+ ", errorCode=" + errorCode
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public int partitionIndex() {
|
||||
return this.partitionIndex;
|
||||
}
|
||||
|
||||
public short errorCode() {
|
||||
return this.errorCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int next() {
|
||||
return this.next;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int prev() {
|
||||
return this.prev;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnPartitionResult setPartitionIndex(int v) {
|
||||
this.partitionIndex = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnPartitionResult setErrorCode(short v) {
|
||||
this.errorCode = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNext(int v) {
|
||||
this.next = v;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setPrev(int v) {
|
||||
this.prev = v;
|
||||
}
|
||||
}
|
||||
|
||||
public static class AddPartitionsToTxnPartitionResultCollection extends ImplicitLinkedHashMultiCollection<AddPartitionsToTxnPartitionResult> {
|
||||
public AddPartitionsToTxnPartitionResultCollection() {
|
||||
super();
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnPartitionResultCollection(int expectedNumElements) {
|
||||
super(expectedNumElements);
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnPartitionResultCollection(Iterator<AddPartitionsToTxnPartitionResult> iterator) {
|
||||
super(iterator);
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnPartitionResult find(int partitionIndex) {
|
||||
AddPartitionsToTxnPartitionResult _key = new AddPartitionsToTxnPartitionResult();
|
||||
_key.setPartitionIndex(partitionIndex);
|
||||
return find(_key);
|
||||
}
|
||||
|
||||
public List<AddPartitionsToTxnPartitionResult> findAll(int partitionIndex) {
|
||||
AddPartitionsToTxnPartitionResult _key = new AddPartitionsToTxnPartitionResult();
|
||||
_key.setPartitionIndex(partitionIndex);
|
||||
return findAll(_key);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class AddPartitionsToTxnTopicResultCollection extends ImplicitLinkedHashMultiCollection<AddPartitionsToTxnTopicResult> {
|
||||
public AddPartitionsToTxnTopicResultCollection() {
|
||||
super();
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopicResultCollection(int expectedNumElements) {
|
||||
super(expectedNumElements);
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopicResultCollection(Iterator<AddPartitionsToTxnTopicResult> iterator) {
|
||||
super(iterator);
|
||||
}
|
||||
|
||||
public AddPartitionsToTxnTopicResult find(String name) {
|
||||
AddPartitionsToTxnTopicResult _key = new AddPartitionsToTxnTopicResult();
|
||||
_key.setName(name);
|
||||
return find(_key);
|
||||
}
|
||||
|
||||
public List<AddPartitionsToTxnTopicResult> findAll(String name) {
|
||||
AddPartitionsToTxnTopicResult _key = new AddPartitionsToTxnTopicResult();
|
||||
_key.setName(name);
|
||||
return findAll(_key);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,802 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package org.apache.kafka.common.message;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.protocol.ApiMessage;
|
||||
import org.apache.kafka.common.protocol.Message;
|
||||
import org.apache.kafka.common.protocol.MessageUtil;
|
||||
import org.apache.kafka.common.protocol.ObjectSerializationCache;
|
||||
import org.apache.kafka.common.protocol.Readable;
|
||||
import org.apache.kafka.common.protocol.Writable;
|
||||
import org.apache.kafka.common.protocol.types.ArrayOf;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedField;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
import org.apache.kafka.common.utils.ByteUtils;
|
||||
import org.apache.kafka.common.utils.ImplicitLinkedHashCollection;
|
||||
import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection;
|
||||
|
||||
|
||||
public class AlterConfigsRequestData implements ApiMessage {
|
||||
private AlterConfigsResourceCollection resources;
|
||||
private boolean validateOnly;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("resources", new ArrayOf(AlterConfigsResource.SCHEMA_0), "The updates for each resource."),
|
||||
new Field("validate_only", Type.BOOLEAN, "True if we should validate the request, but not change the configurations.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AlterConfigsRequestData(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public AlterConfigsRequestData(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public AlterConfigsRequestData() {
|
||||
this.resources = new AlterConfigsResourceCollection(0);
|
||||
this.validateOnly = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short apiKey() {
|
||||
return 33;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readInt();
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field resources was serialized as null");
|
||||
} else {
|
||||
AlterConfigsResourceCollection newCollection = new AlterConfigsResourceCollection(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new AlterConfigsResource(_readable, _version));
|
||||
}
|
||||
this.resources = newCollection;
|
||||
}
|
||||
}
|
||||
this.validateOnly = _readable.readByte() != 0;
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeInt(resources.size());
|
||||
for (AlterConfigsResource resourcesElement : resources) {
|
||||
resourcesElement.write(_writable, _cache, _version);
|
||||
}
|
||||
_writable.writeByte(validateOnly ? (byte) 1 : (byte) 0);
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
this._unknownTaggedFields = null;
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("resources");
|
||||
this.resources = new AlterConfigsResourceCollection(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.resources.add(new AlterConfigsResource((Struct) nestedObject, _version));
|
||||
}
|
||||
}
|
||||
this.validateOnly = struct.getBoolean("validate_only");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
{
|
||||
Struct[] _nestedObjects = new Struct[resources.size()];
|
||||
int i = 0;
|
||||
for (AlterConfigsResource element : this.resources) {
|
||||
_nestedObjects[i++] = element.toStruct(_version);
|
||||
}
|
||||
struct.set("resources", (Object[]) _nestedObjects);
|
||||
}
|
||||
struct.set("validate_only", this.validateOnly);
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += 4;
|
||||
for (AlterConfigsResource resourcesElement : resources) {
|
||||
_arraySize += resourcesElement.size(_cache, _version);
|
||||
}
|
||||
_size += _arraySize;
|
||||
}
|
||||
_size += 1;
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AlterConfigsRequestData)) return false;
|
||||
AlterConfigsRequestData other = (AlterConfigsRequestData) obj;
|
||||
if (this.resources == null) {
|
||||
if (other.resources != null) return false;
|
||||
} else {
|
||||
if (!this.resources.equals(other.resources)) return false;
|
||||
}
|
||||
if (validateOnly != other.validateOnly) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + (resources == null ? 0 : resources.hashCode());
|
||||
hashCode = 31 * hashCode + (validateOnly ? 1231 : 1237);
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlterConfigsRequestData("
|
||||
+ "resources=" + MessageUtil.deepToString(resources.iterator())
|
||||
+ ", validateOnly=" + (validateOnly ? "true" : "false")
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public AlterConfigsResourceCollection resources() {
|
||||
return this.resources;
|
||||
}
|
||||
|
||||
public boolean validateOnly() {
|
||||
return this.validateOnly;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AlterConfigsRequestData setResources(AlterConfigsResourceCollection v) {
|
||||
this.resources = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterConfigsRequestData setValidateOnly(boolean v) {
|
||||
this.validateOnly = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
static public class AlterConfigsResource implements Message, ImplicitLinkedHashMultiCollection.Element {
|
||||
private byte resourceType;
|
||||
private String resourceName;
|
||||
private AlterableConfigCollection configs;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
private int next;
|
||||
private int prev;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("resource_type", Type.INT8, "The resource type."),
|
||||
new Field("resource_name", Type.STRING, "The resource name."),
|
||||
new Field("configs", new ArrayOf(AlterableConfig.SCHEMA_0), "The configurations.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AlterConfigsResource(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public AlterConfigsResource(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public AlterConfigsResource() {
|
||||
this.resourceType = (byte) 0;
|
||||
this.resourceName = "";
|
||||
this.configs = new AlterableConfigCollection(0);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterConfigsResource");
|
||||
}
|
||||
this.resourceType = _readable.readByte();
|
||||
{
|
||||
int length;
|
||||
length = _readable.readShort();
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field resourceName was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field resourceName had invalid length " + length);
|
||||
} else {
|
||||
this.resourceName = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readInt();
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field configs was serialized as null");
|
||||
} else {
|
||||
AlterableConfigCollection newCollection = new AlterableConfigCollection(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new AlterableConfig(_readable, _version));
|
||||
}
|
||||
this.configs = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterConfigsResource");
|
||||
}
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeByte(resourceType);
|
||||
{
|
||||
byte[] _stringBytes = _cache.getSerializedValue(resourceName);
|
||||
_writable.writeShort((short) _stringBytes.length);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
_writable.writeInt(configs.size());
|
||||
for (AlterableConfig configsElement : configs) {
|
||||
configsElement.write(_writable, _cache, _version);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterConfigsResource");
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
this.resourceType = struct.getByte("resource_type");
|
||||
this.resourceName = struct.getString("resource_name");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("configs");
|
||||
this.configs = new AlterableConfigCollection(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.configs.add(new AlterableConfig((Struct) nestedObject, _version));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterConfigsResource");
|
||||
}
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("resource_type", this.resourceType);
|
||||
struct.set("resource_name", this.resourceName);
|
||||
{
|
||||
Struct[] _nestedObjects = new Struct[configs.size()];
|
||||
int i = 0;
|
||||
for (AlterableConfig element : this.configs) {
|
||||
_nestedObjects[i++] = element.toStruct(_version);
|
||||
}
|
||||
struct.set("configs", (Object[]) _nestedObjects);
|
||||
}
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't size version " + _version + " of AlterConfigsResource");
|
||||
}
|
||||
_size += 1;
|
||||
{
|
||||
byte[] _stringBytes = resourceName.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'resourceName' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(resourceName, _stringBytes);
|
||||
_size += _stringBytes.length + 2;
|
||||
}
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += 4;
|
||||
for (AlterableConfig configsElement : configs) {
|
||||
_arraySize += configsElement.size(_cache, _version);
|
||||
}
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AlterConfigsResource)) return false;
|
||||
AlterConfigsResource other = (AlterConfigsResource) obj;
|
||||
if (resourceType != other.resourceType) return false;
|
||||
if (this.resourceName == null) {
|
||||
if (other.resourceName != null) return false;
|
||||
} else {
|
||||
if (!this.resourceName.equals(other.resourceName)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + resourceType;
|
||||
hashCode = 31 * hashCode + (resourceName == null ? 0 : resourceName.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlterConfigsResource("
|
||||
+ "resourceType=" + resourceType
|
||||
+ ", resourceName=" + ((resourceName == null) ? "null" : "'" + resourceName.toString() + "'")
|
||||
+ ", configs=" + MessageUtil.deepToString(configs.iterator())
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public byte resourceType() {
|
||||
return this.resourceType;
|
||||
}
|
||||
|
||||
public String resourceName() {
|
||||
return this.resourceName;
|
||||
}
|
||||
|
||||
public AlterableConfigCollection configs() {
|
||||
return this.configs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int next() {
|
||||
return this.next;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int prev() {
|
||||
return this.prev;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AlterConfigsResource setResourceType(byte v) {
|
||||
this.resourceType = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterConfigsResource setResourceName(String v) {
|
||||
this.resourceName = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterConfigsResource setConfigs(AlterableConfigCollection v) {
|
||||
this.configs = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNext(int v) {
|
||||
this.next = v;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setPrev(int v) {
|
||||
this.prev = v;
|
||||
}
|
||||
}
|
||||
|
||||
static public class AlterableConfig implements Message, ImplicitLinkedHashMultiCollection.Element {
|
||||
private String name;
|
||||
private String value;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
private int next;
|
||||
private int prev;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("name", Type.STRING, "The configuration key name."),
|
||||
new Field("value", Type.NULLABLE_STRING, "The value to set for the configuration key.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AlterableConfig(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public AlterableConfig(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public AlterableConfig() {
|
||||
this.name = "";
|
||||
this.value = "";
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterableConfig");
|
||||
}
|
||||
{
|
||||
int length;
|
||||
length = _readable.readShort();
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field name was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field name had invalid length " + length);
|
||||
} else {
|
||||
this.name = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
{
|
||||
int length;
|
||||
length = _readable.readShort();
|
||||
if (length < 0) {
|
||||
this.value = null;
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field value had invalid length " + length);
|
||||
} else {
|
||||
this.value = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterableConfig");
|
||||
}
|
||||
int _numTaggedFields = 0;
|
||||
{
|
||||
byte[] _stringBytes = _cache.getSerializedValue(name);
|
||||
_writable.writeShort((short) _stringBytes.length);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
if (value == null) {
|
||||
_writable.writeShort((short) -1);
|
||||
} else {
|
||||
byte[] _stringBytes = _cache.getSerializedValue(value);
|
||||
_writable.writeShort((short) _stringBytes.length);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterableConfig");
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
this.name = struct.getString("name");
|
||||
this.value = struct.getString("value");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterableConfig");
|
||||
}
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("name", this.name);
|
||||
struct.set("value", this.value);
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't size version " + _version + " of AlterableConfig");
|
||||
}
|
||||
{
|
||||
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'name' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(name, _stringBytes);
|
||||
_size += _stringBytes.length + 2;
|
||||
}
|
||||
if (value == null) {
|
||||
_size += 2;
|
||||
} else {
|
||||
byte[] _stringBytes = value.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'value' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(value, _stringBytes);
|
||||
_size += _stringBytes.length + 2;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AlterableConfig)) return false;
|
||||
AlterableConfig other = (AlterableConfig) obj;
|
||||
if (this.name == null) {
|
||||
if (other.name != null) return false;
|
||||
} else {
|
||||
if (!this.name.equals(other.name)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlterableConfig("
|
||||
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
|
||||
+ ", value=" + ((value == null) ? "null" : "'" + value.toString() + "'")
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public String value() {
|
||||
return this.value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int next() {
|
||||
return this.next;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int prev() {
|
||||
return this.prev;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AlterableConfig setName(String v) {
|
||||
this.name = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterableConfig setValue(String v) {
|
||||
this.value = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNext(int v) {
|
||||
this.next = v;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setPrev(int v) {
|
||||
this.prev = v;
|
||||
}
|
||||
}
|
||||
|
||||
public static class AlterableConfigCollection extends ImplicitLinkedHashMultiCollection<AlterableConfig> {
|
||||
public AlterableConfigCollection() {
|
||||
super();
|
||||
}
|
||||
|
||||
public AlterableConfigCollection(int expectedNumElements) {
|
||||
super(expectedNumElements);
|
||||
}
|
||||
|
||||
public AlterableConfigCollection(Iterator<AlterableConfig> iterator) {
|
||||
super(iterator);
|
||||
}
|
||||
|
||||
public AlterableConfig find(String name) {
|
||||
AlterableConfig _key = new AlterableConfig();
|
||||
_key.setName(name);
|
||||
return find(_key);
|
||||
}
|
||||
|
||||
public List<AlterableConfig> findAll(String name) {
|
||||
AlterableConfig _key = new AlterableConfig();
|
||||
_key.setName(name);
|
||||
return findAll(_key);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class AlterConfigsResourceCollection extends ImplicitLinkedHashMultiCollection<AlterConfigsResource> {
|
||||
public AlterConfigsResourceCollection() {
|
||||
super();
|
||||
}
|
||||
|
||||
public AlterConfigsResourceCollection(int expectedNumElements) {
|
||||
super(expectedNumElements);
|
||||
}
|
||||
|
||||
public AlterConfigsResourceCollection(Iterator<AlterConfigsResource> iterator) {
|
||||
super(iterator);
|
||||
}
|
||||
|
||||
public AlterConfigsResource find(byte resourceType, String resourceName) {
|
||||
AlterConfigsResource _key = new AlterConfigsResource();
|
||||
_key.setResourceType(resourceType);
|
||||
_key.setResourceName(resourceName);
|
||||
return find(_key);
|
||||
}
|
||||
|
||||
public List<AlterConfigsResource> findAll(byte resourceType, String resourceName) {
|
||||
AlterConfigsResource _key = new AlterConfigsResource();
|
||||
_key.setResourceType(resourceType);
|
||||
_key.setResourceName(resourceName);
|
||||
return findAll(_key);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,491 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package org.apache.kafka.common.message;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.protocol.ApiMessage;
|
||||
import org.apache.kafka.common.protocol.Message;
|
||||
import org.apache.kafka.common.protocol.MessageUtil;
|
||||
import org.apache.kafka.common.protocol.ObjectSerializationCache;
|
||||
import org.apache.kafka.common.protocol.Readable;
|
||||
import org.apache.kafka.common.protocol.Writable;
|
||||
import org.apache.kafka.common.protocol.types.ArrayOf;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedField;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
import org.apache.kafka.common.utils.ByteUtils;
|
||||
|
||||
|
||||
public class AlterConfigsResponseData implements ApiMessage {
|
||||
private int throttleTimeMs;
|
||||
private List<AlterConfigsResourceResponse> responses;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
|
||||
new Field("responses", new ArrayOf(AlterConfigsResourceResponse.SCHEMA_0), "The responses for each resource.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AlterConfigsResponseData(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public AlterConfigsResponseData(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public AlterConfigsResponseData() {
|
||||
this.throttleTimeMs = 0;
|
||||
this.responses = new ArrayList<AlterConfigsResourceResponse>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public short apiKey() {
|
||||
return 33;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
this.throttleTimeMs = _readable.readInt();
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readInt();
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field responses was serialized as null");
|
||||
} else {
|
||||
ArrayList<AlterConfigsResourceResponse> newCollection = new ArrayList<AlterConfigsResourceResponse>(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new AlterConfigsResourceResponse(_readable, _version));
|
||||
}
|
||||
this.responses = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeInt(throttleTimeMs);
|
||||
_writable.writeInt(responses.size());
|
||||
for (AlterConfigsResourceResponse responsesElement : responses) {
|
||||
responsesElement.write(_writable, _cache, _version);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
this._unknownTaggedFields = null;
|
||||
this.throttleTimeMs = struct.getInt("throttle_time_ms");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("responses");
|
||||
this.responses = new ArrayList<AlterConfigsResourceResponse>(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.responses.add(new AlterConfigsResourceResponse((Struct) nestedObject, _version));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("throttle_time_ms", this.throttleTimeMs);
|
||||
{
|
||||
Struct[] _nestedObjects = new Struct[responses.size()];
|
||||
int i = 0;
|
||||
for (AlterConfigsResourceResponse element : this.responses) {
|
||||
_nestedObjects[i++] = element.toStruct(_version);
|
||||
}
|
||||
struct.set("responses", (Object[]) _nestedObjects);
|
||||
}
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
_size += 4;
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += 4;
|
||||
for (AlterConfigsResourceResponse responsesElement : responses) {
|
||||
_arraySize += responsesElement.size(_cache, _version);
|
||||
}
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AlterConfigsResponseData)) return false;
|
||||
AlterConfigsResponseData other = (AlterConfigsResponseData) obj;
|
||||
if (throttleTimeMs != other.throttleTimeMs) return false;
|
||||
if (this.responses == null) {
|
||||
if (other.responses != null) return false;
|
||||
} else {
|
||||
if (!this.responses.equals(other.responses)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + throttleTimeMs;
|
||||
hashCode = 31 * hashCode + (responses == null ? 0 : responses.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlterConfigsResponseData("
|
||||
+ "throttleTimeMs=" + throttleTimeMs
|
||||
+ ", responses=" + MessageUtil.deepToString(responses.iterator())
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public int throttleTimeMs() {
|
||||
return this.throttleTimeMs;
|
||||
}
|
||||
|
||||
public List<AlterConfigsResourceResponse> responses() {
|
||||
return this.responses;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AlterConfigsResponseData setThrottleTimeMs(int v) {
|
||||
this.throttleTimeMs = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterConfigsResponseData setResponses(List<AlterConfigsResourceResponse> v) {
|
||||
this.responses = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
static public class AlterConfigsResourceResponse implements Message {
|
||||
private short errorCode;
|
||||
private String errorMessage;
|
||||
private byte resourceType;
|
||||
private String resourceName;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("error_code", Type.INT16, "The resource error code."),
|
||||
new Field("error_message", Type.NULLABLE_STRING, "The resource error message, or null if there was no error."),
|
||||
new Field("resource_type", Type.INT8, "The resource type."),
|
||||
new Field("resource_name", Type.STRING, "The resource name.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AlterConfigsResourceResponse(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public AlterConfigsResourceResponse(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public AlterConfigsResourceResponse() {
|
||||
this.errorCode = (short) 0;
|
||||
this.errorMessage = "";
|
||||
this.resourceType = (byte) 0;
|
||||
this.resourceName = "";
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterConfigsResourceResponse");
|
||||
}
|
||||
this.errorCode = _readable.readShort();
|
||||
{
|
||||
int length;
|
||||
length = _readable.readShort();
|
||||
if (length < 0) {
|
||||
this.errorMessage = null;
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field errorMessage had invalid length " + length);
|
||||
} else {
|
||||
this.errorMessage = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
this.resourceType = _readable.readByte();
|
||||
{
|
||||
int length;
|
||||
length = _readable.readShort();
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field resourceName was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field resourceName had invalid length " + length);
|
||||
} else {
|
||||
this.resourceName = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterConfigsResourceResponse");
|
||||
}
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeShort(errorCode);
|
||||
if (errorMessage == null) {
|
||||
_writable.writeShort((short) -1);
|
||||
} else {
|
||||
byte[] _stringBytes = _cache.getSerializedValue(errorMessage);
|
||||
_writable.writeShort((short) _stringBytes.length);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
_writable.writeByte(resourceType);
|
||||
{
|
||||
byte[] _stringBytes = _cache.getSerializedValue(resourceName);
|
||||
_writable.writeShort((short) _stringBytes.length);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterConfigsResourceResponse");
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
this.errorCode = struct.getShort("error_code");
|
||||
this.errorMessage = struct.getString("error_message");
|
||||
this.resourceType = struct.getByte("resource_type");
|
||||
this.resourceName = struct.getString("resource_name");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterConfigsResourceResponse");
|
||||
}
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("error_code", this.errorCode);
|
||||
struct.set("error_message", this.errorMessage);
|
||||
struct.set("resource_type", this.resourceType);
|
||||
struct.set("resource_name", this.resourceName);
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't size version " + _version + " of AlterConfigsResourceResponse");
|
||||
}
|
||||
_size += 2;
|
||||
if (errorMessage == null) {
|
||||
_size += 2;
|
||||
} else {
|
||||
byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'errorMessage' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(errorMessage, _stringBytes);
|
||||
_size += _stringBytes.length + 2;
|
||||
}
|
||||
_size += 1;
|
||||
{
|
||||
byte[] _stringBytes = resourceName.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'resourceName' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(resourceName, _stringBytes);
|
||||
_size += _stringBytes.length + 2;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AlterConfigsResourceResponse)) return false;
|
||||
AlterConfigsResourceResponse other = (AlterConfigsResourceResponse) obj;
|
||||
if (errorCode != other.errorCode) return false;
|
||||
if (this.errorMessage == null) {
|
||||
if (other.errorMessage != null) return false;
|
||||
} else {
|
||||
if (!this.errorMessage.equals(other.errorMessage)) return false;
|
||||
}
|
||||
if (resourceType != other.resourceType) return false;
|
||||
if (this.resourceName == null) {
|
||||
if (other.resourceName != null) return false;
|
||||
} else {
|
||||
if (!this.resourceName.equals(other.resourceName)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + errorCode;
|
||||
hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode());
|
||||
hashCode = 31 * hashCode + resourceType;
|
||||
hashCode = 31 * hashCode + (resourceName == null ? 0 : resourceName.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlterConfigsResourceResponse("
|
||||
+ "errorCode=" + errorCode
|
||||
+ ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'")
|
||||
+ ", resourceType=" + resourceType
|
||||
+ ", resourceName=" + ((resourceName == null) ? "null" : "'" + resourceName.toString() + "'")
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public short errorCode() {
|
||||
return this.errorCode;
|
||||
}
|
||||
|
||||
public String errorMessage() {
|
||||
return this.errorMessage;
|
||||
}
|
||||
|
||||
public byte resourceType() {
|
||||
return this.resourceType;
|
||||
}
|
||||
|
||||
public String resourceName() {
|
||||
return this.resourceName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AlterConfigsResourceResponse setErrorCode(short v) {
|
||||
this.errorCode = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterConfigsResourceResponse setErrorMessage(String v) {
|
||||
this.errorMessage = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterConfigsResourceResponse setResourceType(byte v) {
|
||||
this.resourceType = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterConfigsResourceResponse setResourceName(String v) {
|
||||
this.resourceName = v;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,728 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package org.apache.kafka.common.message;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.TreeMap;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.protocol.ApiMessage;
|
||||
import org.apache.kafka.common.protocol.Message;
|
||||
import org.apache.kafka.common.protocol.MessageUtil;
|
||||
import org.apache.kafka.common.protocol.ObjectSerializationCache;
|
||||
import org.apache.kafka.common.protocol.Readable;
|
||||
import org.apache.kafka.common.protocol.Writable;
|
||||
import org.apache.kafka.common.protocol.types.CompactArrayOf;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedField;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
import org.apache.kafka.common.utils.ByteUtils;
|
||||
|
||||
import static java.util.Map.Entry;
|
||||
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
|
||||
|
||||
|
||||
public class AlterPartitionReassignmentsRequestData implements ApiMessage {
|
||||
private int timeoutMs;
|
||||
private List<ReassignableTopic> topics;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("timeout_ms", Type.INT32, "The time in ms to wait for the request to complete."),
|
||||
new Field("topics", new CompactArrayOf(ReassignableTopic.SCHEMA_0), "The topics to reassign."),
|
||||
TaggedFieldsSection.of(
|
||||
)
|
||||
);
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0
|
||||
};
|
||||
|
||||
public AlterPartitionReassignmentsRequestData(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public AlterPartitionReassignmentsRequestData(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public AlterPartitionReassignmentsRequestData() {
|
||||
this.timeoutMs = 60000;
|
||||
this.topics = new ArrayList<ReassignableTopic>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public short apiKey() {
|
||||
return 45;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
this.timeoutMs = _readable.readInt();
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readUnsignedVarint() - 1;
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field topics was serialized as null");
|
||||
} else {
|
||||
ArrayList<ReassignableTopic> newCollection = new ArrayList<ReassignableTopic>(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new ReassignableTopic(_readable, _version));
|
||||
}
|
||||
this.topics = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
int _numTaggedFields = _readable.readUnsignedVarint();
|
||||
for (int _i = 0; _i < _numTaggedFields; _i++) {
|
||||
int _tag = _readable.readUnsignedVarint();
|
||||
int _size = _readable.readUnsignedVarint();
|
||||
switch (_tag) {
|
||||
default:
|
||||
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeInt(timeoutMs);
|
||||
_writable.writeUnsignedVarint(topics.size() + 1);
|
||||
for (ReassignableTopic topicsElement : topics) {
|
||||
topicsElement.write(_writable, _cache, _version);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
_writable.writeUnsignedVarint(_numTaggedFields);
|
||||
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
NavigableMap<Integer, Object> _taggedFields = null;
|
||||
this._unknownTaggedFields = null;
|
||||
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
|
||||
this.timeoutMs = struct.getInt("timeout_ms");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("topics");
|
||||
this.topics = new ArrayList<ReassignableTopic>(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.topics.add(new ReassignableTopic((Struct) nestedObject, _version));
|
||||
}
|
||||
}
|
||||
if (!_taggedFields.isEmpty()) {
|
||||
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
|
||||
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
|
||||
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
_taggedFields = new TreeMap<>();
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("timeout_ms", this.timeoutMs);
|
||||
{
|
||||
Struct[] _nestedObjects = new Struct[topics.size()];
|
||||
int i = 0;
|
||||
for (ReassignableTopic element : this.topics) {
|
||||
_nestedObjects[i++] = element.toStruct(_version);
|
||||
}
|
||||
struct.set("topics", (Object[]) _nestedObjects);
|
||||
}
|
||||
struct.set("_tagged_fields", _taggedFields);
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
_size += 4;
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += ByteUtils.sizeOfUnsignedVarint(topics.size() + 1);
|
||||
for (ReassignableTopic topicsElement : topics) {
|
||||
_arraySize += topicsElement.size(_cache, _version);
|
||||
}
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AlterPartitionReassignmentsRequestData)) return false;
|
||||
AlterPartitionReassignmentsRequestData other = (AlterPartitionReassignmentsRequestData) obj;
|
||||
if (timeoutMs != other.timeoutMs) return false;
|
||||
if (this.topics == null) {
|
||||
if (other.topics != null) return false;
|
||||
} else {
|
||||
if (!this.topics.equals(other.topics)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + timeoutMs;
|
||||
hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlterPartitionReassignmentsRequestData("
|
||||
+ "timeoutMs=" + timeoutMs
|
||||
+ ", topics=" + MessageUtil.deepToString(topics.iterator())
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public int timeoutMs() {
|
||||
return this.timeoutMs;
|
||||
}
|
||||
|
||||
public List<ReassignableTopic> topics() {
|
||||
return this.topics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AlterPartitionReassignmentsRequestData setTimeoutMs(int v) {
|
||||
this.timeoutMs = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterPartitionReassignmentsRequestData setTopics(List<ReassignableTopic> v) {
|
||||
this.topics = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
static public class ReassignableTopic implements Message {
|
||||
private String name;
|
||||
private List<ReassignablePartition> partitions;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("name", Type.COMPACT_STRING, "The topic name."),
|
||||
new Field("partitions", new CompactArrayOf(ReassignablePartition.SCHEMA_0), "The partitions to reassign."),
|
||||
TaggedFieldsSection.of(
|
||||
)
|
||||
);
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0
|
||||
};
|
||||
|
||||
public ReassignableTopic(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public ReassignableTopic(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public ReassignableTopic() {
|
||||
this.name = "";
|
||||
this.partitions = new ArrayList<ReassignablePartition>();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignableTopic");
|
||||
}
|
||||
{
|
||||
int length;
|
||||
length = _readable.readUnsignedVarint() - 1;
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field name was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field name had invalid length " + length);
|
||||
} else {
|
||||
this.name = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readUnsignedVarint() - 1;
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field partitions was serialized as null");
|
||||
} else {
|
||||
ArrayList<ReassignablePartition> newCollection = new ArrayList<ReassignablePartition>(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new ReassignablePartition(_readable, _version));
|
||||
}
|
||||
this.partitions = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
int _numTaggedFields = _readable.readUnsignedVarint();
|
||||
for (int _i = 0; _i < _numTaggedFields; _i++) {
|
||||
int _tag = _readable.readUnsignedVarint();
|
||||
int _size = _readable.readUnsignedVarint();
|
||||
switch (_tag) {
|
||||
default:
|
||||
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignableTopic");
|
||||
}
|
||||
int _numTaggedFields = 0;
|
||||
{
|
||||
byte[] _stringBytes = _cache.getSerializedValue(name);
|
||||
_writable.writeUnsignedVarint(_stringBytes.length + 1);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
_writable.writeUnsignedVarint(partitions.size() + 1);
|
||||
for (ReassignablePartition partitionsElement : partitions) {
|
||||
partitionsElement.write(_writable, _cache, _version);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
_writable.writeUnsignedVarint(_numTaggedFields);
|
||||
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignableTopic");
|
||||
}
|
||||
NavigableMap<Integer, Object> _taggedFields = null;
|
||||
this._unknownTaggedFields = null;
|
||||
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
|
||||
this.name = struct.getString("name");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("partitions");
|
||||
this.partitions = new ArrayList<ReassignablePartition>(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.partitions.add(new ReassignablePartition((Struct) nestedObject, _version));
|
||||
}
|
||||
}
|
||||
if (!_taggedFields.isEmpty()) {
|
||||
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
|
||||
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
|
||||
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignableTopic");
|
||||
}
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
_taggedFields = new TreeMap<>();
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("name", this.name);
|
||||
{
|
||||
Struct[] _nestedObjects = new Struct[partitions.size()];
|
||||
int i = 0;
|
||||
for (ReassignablePartition element : this.partitions) {
|
||||
_nestedObjects[i++] = element.toStruct(_version);
|
||||
}
|
||||
struct.set("partitions", (Object[]) _nestedObjects);
|
||||
}
|
||||
struct.set("_tagged_fields", _taggedFields);
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't size version " + _version + " of ReassignableTopic");
|
||||
}
|
||||
{
|
||||
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'name' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(name, _stringBytes);
|
||||
_size += _stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1);
|
||||
}
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1);
|
||||
for (ReassignablePartition partitionsElement : partitions) {
|
||||
_arraySize += partitionsElement.size(_cache, _version);
|
||||
}
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof ReassignableTopic)) return false;
|
||||
ReassignableTopic other = (ReassignableTopic) obj;
|
||||
if (this.name == null) {
|
||||
if (other.name != null) return false;
|
||||
} else {
|
||||
if (!this.name.equals(other.name)) return false;
|
||||
}
|
||||
if (this.partitions == null) {
|
||||
if (other.partitions != null) return false;
|
||||
} else {
|
||||
if (!this.partitions.equals(other.partitions)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
|
||||
hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ReassignableTopic("
|
||||
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
|
||||
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public List<ReassignablePartition> partitions() {
|
||||
return this.partitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public ReassignableTopic setName(String v) {
|
||||
this.name = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ReassignableTopic setPartitions(List<ReassignablePartition> v) {
|
||||
this.partitions = v;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
static public class ReassignablePartition implements Message {
|
||||
private int partitionIndex;
|
||||
private List<Integer> replicas;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("partition_index", Type.INT32, "The partition index."),
|
||||
new Field("replicas", CompactArrayOf.nullable(Type.INT32), "The replicas to place the partitions on, or null to cancel a pending reassignment for this partition."),
|
||||
TaggedFieldsSection.of(
|
||||
)
|
||||
);
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0
|
||||
};
|
||||
|
||||
public ReassignablePartition(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public ReassignablePartition(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public ReassignablePartition() {
|
||||
this.partitionIndex = 0;
|
||||
this.replicas = null;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignablePartition");
|
||||
}
|
||||
this.partitionIndex = _readable.readInt();
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readUnsignedVarint() - 1;
|
||||
if (arrayLength < 0) {
|
||||
this.replicas = null;
|
||||
} else {
|
||||
ArrayList<Integer> newCollection = new ArrayList<Integer>(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(_readable.readInt());
|
||||
}
|
||||
this.replicas = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
int _numTaggedFields = _readable.readUnsignedVarint();
|
||||
for (int _i = 0; _i < _numTaggedFields; _i++) {
|
||||
int _tag = _readable.readUnsignedVarint();
|
||||
int _size = _readable.readUnsignedVarint();
|
||||
switch (_tag) {
|
||||
default:
|
||||
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignablePartition");
|
||||
}
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeInt(partitionIndex);
|
||||
if (replicas == null) {
|
||||
_writable.writeUnsignedVarint(0);
|
||||
} else {
|
||||
_writable.writeUnsignedVarint(replicas.size() + 1);
|
||||
for (Integer replicasElement : replicas) {
|
||||
_writable.writeInt(replicasElement);
|
||||
}
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
_writable.writeUnsignedVarint(_numTaggedFields);
|
||||
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignablePartition");
|
||||
}
|
||||
NavigableMap<Integer, Object> _taggedFields = null;
|
||||
this._unknownTaggedFields = null;
|
||||
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
|
||||
this.partitionIndex = struct.getInt("partition_index");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("replicas");
|
||||
if (_nestedObjects == null) {
|
||||
this.replicas = null;
|
||||
} else {
|
||||
this.replicas = new ArrayList<Integer>(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.replicas.add((Integer) nestedObject);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!_taggedFields.isEmpty()) {
|
||||
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
|
||||
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
|
||||
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignablePartition");
|
||||
}
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
_taggedFields = new TreeMap<>();
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("partition_index", this.partitionIndex);
|
||||
{
|
||||
if (replicas == null) {
|
||||
struct.set("replicas", null);
|
||||
} else {
|
||||
Integer[] _nestedObjects = new Integer[replicas.size()];
|
||||
int i = 0;
|
||||
for (Integer element : this.replicas) {
|
||||
_nestedObjects[i++] = element;
|
||||
}
|
||||
struct.set("replicas", (Object[]) _nestedObjects);
|
||||
}
|
||||
}
|
||||
struct.set("_tagged_fields", _taggedFields);
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't size version " + _version + " of ReassignablePartition");
|
||||
}
|
||||
_size += 4;
|
||||
if (replicas == null) {
|
||||
_size += 1;
|
||||
} else {
|
||||
int _arraySize = 0;
|
||||
_arraySize += ByteUtils.sizeOfUnsignedVarint(replicas.size() + 1);
|
||||
_arraySize += replicas.size() * 4;
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof ReassignablePartition)) return false;
|
||||
ReassignablePartition other = (ReassignablePartition) obj;
|
||||
if (partitionIndex != other.partitionIndex) return false;
|
||||
if (this.replicas == null) {
|
||||
if (other.replicas != null) return false;
|
||||
} else {
|
||||
if (!this.replicas.equals(other.replicas)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + partitionIndex;
|
||||
hashCode = 31 * hashCode + (replicas == null ? 0 : replicas.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ReassignablePartition("
|
||||
+ "partitionIndex=" + partitionIndex
|
||||
+ ", replicas=" + ((replicas == null) ? "null" : MessageUtil.deepToString(replicas.iterator()))
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public int partitionIndex() {
|
||||
return this.partitionIndex;
|
||||
}
|
||||
|
||||
public List<Integer> replicas() {
|
||||
return this.replicas;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public ReassignablePartition setPartitionIndex(int v) {
|
||||
this.partitionIndex = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ReassignablePartition setReplicas(List<Integer> v) {
|
||||
this.replicas = v;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,795 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package org.apache.kafka.common.message;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.TreeMap;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.protocol.ApiMessage;
|
||||
import org.apache.kafka.common.protocol.Message;
|
||||
import org.apache.kafka.common.protocol.MessageUtil;
|
||||
import org.apache.kafka.common.protocol.ObjectSerializationCache;
|
||||
import org.apache.kafka.common.protocol.Readable;
|
||||
import org.apache.kafka.common.protocol.Writable;
|
||||
import org.apache.kafka.common.protocol.types.CompactArrayOf;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedField;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
import org.apache.kafka.common.utils.ByteUtils;
|
||||
|
||||
import static java.util.Map.Entry;
|
||||
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
|
||||
|
||||
|
||||
public class AlterPartitionReassignmentsResponseData implements ApiMessage {
|
||||
private int throttleTimeMs;
|
||||
private short errorCode;
|
||||
private String errorMessage;
|
||||
private List<ReassignableTopicResponse> responses;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
|
||||
new Field("error_code", Type.INT16, "The top-level error code, or 0 if there was no error."),
|
||||
new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The top-level error message, or null if there was no error."),
|
||||
new Field("responses", new CompactArrayOf(ReassignableTopicResponse.SCHEMA_0), "The responses to topics to reassign."),
|
||||
TaggedFieldsSection.of(
|
||||
)
|
||||
);
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0
|
||||
};
|
||||
|
||||
public AlterPartitionReassignmentsResponseData(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public AlterPartitionReassignmentsResponseData(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public AlterPartitionReassignmentsResponseData() {
|
||||
this.throttleTimeMs = 0;
|
||||
this.errorCode = (short) 0;
|
||||
this.errorMessage = "";
|
||||
this.responses = new ArrayList<ReassignableTopicResponse>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public short apiKey() {
|
||||
return 45;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
this.throttleTimeMs = _readable.readInt();
|
||||
this.errorCode = _readable.readShort();
|
||||
{
|
||||
int length;
|
||||
length = _readable.readUnsignedVarint() - 1;
|
||||
if (length < 0) {
|
||||
this.errorMessage = null;
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field errorMessage had invalid length " + length);
|
||||
} else {
|
||||
this.errorMessage = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readUnsignedVarint() - 1;
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field responses was serialized as null");
|
||||
} else {
|
||||
ArrayList<ReassignableTopicResponse> newCollection = new ArrayList<ReassignableTopicResponse>(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new ReassignableTopicResponse(_readable, _version));
|
||||
}
|
||||
this.responses = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
int _numTaggedFields = _readable.readUnsignedVarint();
|
||||
for (int _i = 0; _i < _numTaggedFields; _i++) {
|
||||
int _tag = _readable.readUnsignedVarint();
|
||||
int _size = _readable.readUnsignedVarint();
|
||||
switch (_tag) {
|
||||
default:
|
||||
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeInt(throttleTimeMs);
|
||||
_writable.writeShort(errorCode);
|
||||
if (errorMessage == null) {
|
||||
_writable.writeUnsignedVarint(0);
|
||||
} else {
|
||||
byte[] _stringBytes = _cache.getSerializedValue(errorMessage);
|
||||
_writable.writeUnsignedVarint(_stringBytes.length + 1);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
_writable.writeUnsignedVarint(responses.size() + 1);
|
||||
for (ReassignableTopicResponse responsesElement : responses) {
|
||||
responsesElement.write(_writable, _cache, _version);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
_writable.writeUnsignedVarint(_numTaggedFields);
|
||||
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
NavigableMap<Integer, Object> _taggedFields = null;
|
||||
this._unknownTaggedFields = null;
|
||||
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
|
||||
this.throttleTimeMs = struct.getInt("throttle_time_ms");
|
||||
this.errorCode = struct.getShort("error_code");
|
||||
this.errorMessage = struct.getString("error_message");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("responses");
|
||||
this.responses = new ArrayList<ReassignableTopicResponse>(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.responses.add(new ReassignableTopicResponse((Struct) nestedObject, _version));
|
||||
}
|
||||
}
|
||||
if (!_taggedFields.isEmpty()) {
|
||||
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
|
||||
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
|
||||
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
_taggedFields = new TreeMap<>();
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("throttle_time_ms", this.throttleTimeMs);
|
||||
struct.set("error_code", this.errorCode);
|
||||
struct.set("error_message", this.errorMessage);
|
||||
{
|
||||
Struct[] _nestedObjects = new Struct[responses.size()];
|
||||
int i = 0;
|
||||
for (ReassignableTopicResponse element : this.responses) {
|
||||
_nestedObjects[i++] = element.toStruct(_version);
|
||||
}
|
||||
struct.set("responses", (Object[]) _nestedObjects);
|
||||
}
|
||||
struct.set("_tagged_fields", _taggedFields);
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
_size += 4;
|
||||
_size += 2;
|
||||
if (errorMessage == null) {
|
||||
_size += 1;
|
||||
} else {
|
||||
byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'errorMessage' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(errorMessage, _stringBytes);
|
||||
_size += _stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1);
|
||||
}
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += ByteUtils.sizeOfUnsignedVarint(responses.size() + 1);
|
||||
for (ReassignableTopicResponse responsesElement : responses) {
|
||||
_arraySize += responsesElement.size(_cache, _version);
|
||||
}
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AlterPartitionReassignmentsResponseData)) return false;
|
||||
AlterPartitionReassignmentsResponseData other = (AlterPartitionReassignmentsResponseData) obj;
|
||||
if (throttleTimeMs != other.throttleTimeMs) return false;
|
||||
if (errorCode != other.errorCode) return false;
|
||||
if (this.errorMessage == null) {
|
||||
if (other.errorMessage != null) return false;
|
||||
} else {
|
||||
if (!this.errorMessage.equals(other.errorMessage)) return false;
|
||||
}
|
||||
if (this.responses == null) {
|
||||
if (other.responses != null) return false;
|
||||
} else {
|
||||
if (!this.responses.equals(other.responses)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + throttleTimeMs;
|
||||
hashCode = 31 * hashCode + errorCode;
|
||||
hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode());
|
||||
hashCode = 31 * hashCode + (responses == null ? 0 : responses.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlterPartitionReassignmentsResponseData("
|
||||
+ "throttleTimeMs=" + throttleTimeMs
|
||||
+ ", errorCode=" + errorCode
|
||||
+ ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'")
|
||||
+ ", responses=" + MessageUtil.deepToString(responses.iterator())
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public int throttleTimeMs() {
|
||||
return this.throttleTimeMs;
|
||||
}
|
||||
|
||||
public short errorCode() {
|
||||
return this.errorCode;
|
||||
}
|
||||
|
||||
public String errorMessage() {
|
||||
return this.errorMessage;
|
||||
}
|
||||
|
||||
public List<ReassignableTopicResponse> responses() {
|
||||
return this.responses;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AlterPartitionReassignmentsResponseData setThrottleTimeMs(int v) {
|
||||
this.throttleTimeMs = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterPartitionReassignmentsResponseData setErrorCode(short v) {
|
||||
this.errorCode = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterPartitionReassignmentsResponseData setErrorMessage(String v) {
|
||||
this.errorMessage = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterPartitionReassignmentsResponseData setResponses(List<ReassignableTopicResponse> v) {
|
||||
this.responses = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
static public class ReassignableTopicResponse implements Message {
|
||||
private String name;
|
||||
private List<ReassignablePartitionResponse> partitions;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("name", Type.COMPACT_STRING, "The topic name"),
|
||||
new Field("partitions", new CompactArrayOf(ReassignablePartitionResponse.SCHEMA_0), "The responses to partitions to reassign"),
|
||||
TaggedFieldsSection.of(
|
||||
)
|
||||
);
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0
|
||||
};
|
||||
|
||||
public ReassignableTopicResponse(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public ReassignableTopicResponse(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public ReassignableTopicResponse() {
|
||||
this.name = "";
|
||||
this.partitions = new ArrayList<ReassignablePartitionResponse>();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignableTopicResponse");
|
||||
}
|
||||
{
|
||||
int length;
|
||||
length = _readable.readUnsignedVarint() - 1;
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field name was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field name had invalid length " + length);
|
||||
} else {
|
||||
this.name = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readUnsignedVarint() - 1;
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field partitions was serialized as null");
|
||||
} else {
|
||||
ArrayList<ReassignablePartitionResponse> newCollection = new ArrayList<ReassignablePartitionResponse>(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new ReassignablePartitionResponse(_readable, _version));
|
||||
}
|
||||
this.partitions = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
int _numTaggedFields = _readable.readUnsignedVarint();
|
||||
for (int _i = 0; _i < _numTaggedFields; _i++) {
|
||||
int _tag = _readable.readUnsignedVarint();
|
||||
int _size = _readable.readUnsignedVarint();
|
||||
switch (_tag) {
|
||||
default:
|
||||
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignableTopicResponse");
|
||||
}
|
||||
int _numTaggedFields = 0;
|
||||
{
|
||||
byte[] _stringBytes = _cache.getSerializedValue(name);
|
||||
_writable.writeUnsignedVarint(_stringBytes.length + 1);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
_writable.writeUnsignedVarint(partitions.size() + 1);
|
||||
for (ReassignablePartitionResponse partitionsElement : partitions) {
|
||||
partitionsElement.write(_writable, _cache, _version);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
_writable.writeUnsignedVarint(_numTaggedFields);
|
||||
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignableTopicResponse");
|
||||
}
|
||||
NavigableMap<Integer, Object> _taggedFields = null;
|
||||
this._unknownTaggedFields = null;
|
||||
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
|
||||
this.name = struct.getString("name");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("partitions");
|
||||
this.partitions = new ArrayList<ReassignablePartitionResponse>(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.partitions.add(new ReassignablePartitionResponse((Struct) nestedObject, _version));
|
||||
}
|
||||
}
|
||||
if (!_taggedFields.isEmpty()) {
|
||||
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
|
||||
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
|
||||
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignableTopicResponse");
|
||||
}
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
_taggedFields = new TreeMap<>();
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("name", this.name);
|
||||
{
|
||||
Struct[] _nestedObjects = new Struct[partitions.size()];
|
||||
int i = 0;
|
||||
for (ReassignablePartitionResponse element : this.partitions) {
|
||||
_nestedObjects[i++] = element.toStruct(_version);
|
||||
}
|
||||
struct.set("partitions", (Object[]) _nestedObjects);
|
||||
}
|
||||
struct.set("_tagged_fields", _taggedFields);
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't size version " + _version + " of ReassignableTopicResponse");
|
||||
}
|
||||
{
|
||||
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'name' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(name, _stringBytes);
|
||||
_size += _stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1);
|
||||
}
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1);
|
||||
for (ReassignablePartitionResponse partitionsElement : partitions) {
|
||||
_arraySize += partitionsElement.size(_cache, _version);
|
||||
}
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof ReassignableTopicResponse)) return false;
|
||||
ReassignableTopicResponse other = (ReassignableTopicResponse) obj;
|
||||
if (this.name == null) {
|
||||
if (other.name != null) return false;
|
||||
} else {
|
||||
if (!this.name.equals(other.name)) return false;
|
||||
}
|
||||
if (this.partitions == null) {
|
||||
if (other.partitions != null) return false;
|
||||
} else {
|
||||
if (!this.partitions.equals(other.partitions)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
|
||||
hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ReassignableTopicResponse("
|
||||
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
|
||||
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public List<ReassignablePartitionResponse> partitions() {
|
||||
return this.partitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public ReassignableTopicResponse setName(String v) {
|
||||
this.name = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ReassignableTopicResponse setPartitions(List<ReassignablePartitionResponse> v) {
|
||||
this.partitions = v;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
static public class ReassignablePartitionResponse implements Message {
|
||||
private int partitionIndex;
|
||||
private short errorCode;
|
||||
private String errorMessage;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("partition_index", Type.INT32, "The partition index."),
|
||||
new Field("error_code", Type.INT16, "The error code for this partition, or 0 if there was no error."),
|
||||
new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The error message for this partition, or null if there was no error."),
|
||||
TaggedFieldsSection.of(
|
||||
)
|
||||
);
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0
|
||||
};
|
||||
|
||||
public ReassignablePartitionResponse(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public ReassignablePartitionResponse(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public ReassignablePartitionResponse() {
|
||||
this.partitionIndex = 0;
|
||||
this.errorCode = (short) 0;
|
||||
this.errorMessage = "";
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignablePartitionResponse");
|
||||
}
|
||||
this.partitionIndex = _readable.readInt();
|
||||
this.errorCode = _readable.readShort();
|
||||
{
|
||||
int length;
|
||||
length = _readable.readUnsignedVarint() - 1;
|
||||
if (length < 0) {
|
||||
this.errorMessage = null;
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field errorMessage had invalid length " + length);
|
||||
} else {
|
||||
this.errorMessage = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
int _numTaggedFields = _readable.readUnsignedVarint();
|
||||
for (int _i = 0; _i < _numTaggedFields; _i++) {
|
||||
int _tag = _readable.readUnsignedVarint();
|
||||
int _size = _readable.readUnsignedVarint();
|
||||
switch (_tag) {
|
||||
default:
|
||||
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignablePartitionResponse");
|
||||
}
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeInt(partitionIndex);
|
||||
_writable.writeShort(errorCode);
|
||||
if (errorMessage == null) {
|
||||
_writable.writeUnsignedVarint(0);
|
||||
} else {
|
||||
byte[] _stringBytes = _cache.getSerializedValue(errorMessage);
|
||||
_writable.writeUnsignedVarint(_stringBytes.length + 1);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
_writable.writeUnsignedVarint(_numTaggedFields);
|
||||
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignablePartitionResponse");
|
||||
}
|
||||
NavigableMap<Integer, Object> _taggedFields = null;
|
||||
this._unknownTaggedFields = null;
|
||||
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
|
||||
this.partitionIndex = struct.getInt("partition_index");
|
||||
this.errorCode = struct.getShort("error_code");
|
||||
this.errorMessage = struct.getString("error_message");
|
||||
if (!_taggedFields.isEmpty()) {
|
||||
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
|
||||
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
|
||||
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of ReassignablePartitionResponse");
|
||||
}
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
_taggedFields = new TreeMap<>();
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("partition_index", this.partitionIndex);
|
||||
struct.set("error_code", this.errorCode);
|
||||
struct.set("error_message", this.errorMessage);
|
||||
struct.set("_tagged_fields", _taggedFields);
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version > 0) {
|
||||
throw new UnsupportedVersionException("Can't size version " + _version + " of ReassignablePartitionResponse");
|
||||
}
|
||||
_size += 4;
|
||||
_size += 2;
|
||||
if (errorMessage == null) {
|
||||
_size += 1;
|
||||
} else {
|
||||
byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'errorMessage' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(errorMessage, _stringBytes);
|
||||
_size += _stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1);
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof ReassignablePartitionResponse)) return false;
|
||||
ReassignablePartitionResponse other = (ReassignablePartitionResponse) obj;
|
||||
if (partitionIndex != other.partitionIndex) return false;
|
||||
if (errorCode != other.errorCode) return false;
|
||||
if (this.errorMessage == null) {
|
||||
if (other.errorMessage != null) return false;
|
||||
} else {
|
||||
if (!this.errorMessage.equals(other.errorMessage)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + partitionIndex;
|
||||
hashCode = 31 * hashCode + errorCode;
|
||||
hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ReassignablePartitionResponse("
|
||||
+ "partitionIndex=" + partitionIndex
|
||||
+ ", errorCode=" + errorCode
|
||||
+ ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'")
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public int partitionIndex() {
|
||||
return this.partitionIndex;
|
||||
}
|
||||
|
||||
public short errorCode() {
|
||||
return this.errorCode;
|
||||
}
|
||||
|
||||
public String errorMessage() {
|
||||
return this.errorMessage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public ReassignablePartitionResponse setPartitionIndex(int v) {
|
||||
this.partitionIndex = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ReassignablePartitionResponse setErrorCode(short v) {
|
||||
this.errorCode = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ReassignablePartitionResponse setErrorMessage(String v) {
|
||||
this.errorMessage = v;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,768 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package org.apache.kafka.common.message;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.protocol.ApiMessage;
|
||||
import org.apache.kafka.common.protocol.Message;
|
||||
import org.apache.kafka.common.protocol.MessageUtil;
|
||||
import org.apache.kafka.common.protocol.ObjectSerializationCache;
|
||||
import org.apache.kafka.common.protocol.Readable;
|
||||
import org.apache.kafka.common.protocol.Writable;
|
||||
import org.apache.kafka.common.protocol.types.ArrayOf;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedField;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
import org.apache.kafka.common.utils.ByteUtils;
|
||||
import org.apache.kafka.common.utils.ImplicitLinkedHashCollection;
|
||||
import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection;
|
||||
|
||||
|
||||
public class AlterReplicaLogDirsRequestData implements ApiMessage {
|
||||
private AlterReplicaLogDirCollection dirs;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("dirs", new ArrayOf(AlterReplicaLogDir.SCHEMA_0), "The alterations to make for each directory.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AlterReplicaLogDirsRequestData(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirsRequestData(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirsRequestData() {
|
||||
this.dirs = new AlterReplicaLogDirCollection(0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public short apiKey() {
|
||||
return 34;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readInt();
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field dirs was serialized as null");
|
||||
} else {
|
||||
AlterReplicaLogDirCollection newCollection = new AlterReplicaLogDirCollection(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new AlterReplicaLogDir(_readable, _version));
|
||||
}
|
||||
this.dirs = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeInt(dirs.size());
|
||||
for (AlterReplicaLogDir dirsElement : dirs) {
|
||||
dirsElement.write(_writable, _cache, _version);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
this._unknownTaggedFields = null;
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("dirs");
|
||||
this.dirs = new AlterReplicaLogDirCollection(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.dirs.add(new AlterReplicaLogDir((Struct) nestedObject, _version));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
{
|
||||
Struct[] _nestedObjects = new Struct[dirs.size()];
|
||||
int i = 0;
|
||||
for (AlterReplicaLogDir element : this.dirs) {
|
||||
_nestedObjects[i++] = element.toStruct(_version);
|
||||
}
|
||||
struct.set("dirs", (Object[]) _nestedObjects);
|
||||
}
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += 4;
|
||||
for (AlterReplicaLogDir dirsElement : dirs) {
|
||||
_arraySize += dirsElement.size(_cache, _version);
|
||||
}
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AlterReplicaLogDirsRequestData)) return false;
|
||||
AlterReplicaLogDirsRequestData other = (AlterReplicaLogDirsRequestData) obj;
|
||||
if (this.dirs == null) {
|
||||
if (other.dirs != null) return false;
|
||||
} else {
|
||||
if (!this.dirs.equals(other.dirs)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + (dirs == null ? 0 : dirs.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlterReplicaLogDirsRequestData("
|
||||
+ "dirs=" + MessageUtil.deepToString(dirs.iterator())
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirCollection dirs() {
|
||||
return this.dirs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirsRequestData setDirs(AlterReplicaLogDirCollection v) {
|
||||
this.dirs = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
static public class AlterReplicaLogDir implements Message, ImplicitLinkedHashMultiCollection.Element {
|
||||
private String path;
|
||||
private AlterReplicaLogDirTopicCollection topics;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
private int next;
|
||||
private int prev;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("path", Type.STRING, "The absolute directory path."),
|
||||
new Field("topics", new ArrayOf(AlterReplicaLogDirTopic.SCHEMA_0), "The topics to add to the directory.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AlterReplicaLogDir(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDir(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDir() {
|
||||
this.path = "";
|
||||
this.topics = new AlterReplicaLogDirTopicCollection(0);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDir");
|
||||
}
|
||||
{
|
||||
int length;
|
||||
length = _readable.readShort();
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field path was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field path had invalid length " + length);
|
||||
} else {
|
||||
this.path = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readInt();
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field topics was serialized as null");
|
||||
} else {
|
||||
AlterReplicaLogDirTopicCollection newCollection = new AlterReplicaLogDirTopicCollection(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new AlterReplicaLogDirTopic(_readable, _version));
|
||||
}
|
||||
this.topics = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDir");
|
||||
}
|
||||
int _numTaggedFields = 0;
|
||||
{
|
||||
byte[] _stringBytes = _cache.getSerializedValue(path);
|
||||
_writable.writeShort((short) _stringBytes.length);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
_writable.writeInt(topics.size());
|
||||
for (AlterReplicaLogDirTopic topicsElement : topics) {
|
||||
topicsElement.write(_writable, _cache, _version);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDir");
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
this.path = struct.getString("path");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("topics");
|
||||
this.topics = new AlterReplicaLogDirTopicCollection(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.topics.add(new AlterReplicaLogDirTopic((Struct) nestedObject, _version));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDir");
|
||||
}
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("path", this.path);
|
||||
{
|
||||
Struct[] _nestedObjects = new Struct[topics.size()];
|
||||
int i = 0;
|
||||
for (AlterReplicaLogDirTopic element : this.topics) {
|
||||
_nestedObjects[i++] = element.toStruct(_version);
|
||||
}
|
||||
struct.set("topics", (Object[]) _nestedObjects);
|
||||
}
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't size version " + _version + " of AlterReplicaLogDir");
|
||||
}
|
||||
{
|
||||
byte[] _stringBytes = path.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'path' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(path, _stringBytes);
|
||||
_size += _stringBytes.length + 2;
|
||||
}
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += 4;
|
||||
for (AlterReplicaLogDirTopic topicsElement : topics) {
|
||||
_arraySize += topicsElement.size(_cache, _version);
|
||||
}
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AlterReplicaLogDir)) return false;
|
||||
AlterReplicaLogDir other = (AlterReplicaLogDir) obj;
|
||||
if (this.path == null) {
|
||||
if (other.path != null) return false;
|
||||
} else {
|
||||
if (!this.path.equals(other.path)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + (path == null ? 0 : path.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlterReplicaLogDir("
|
||||
+ "path=" + ((path == null) ? "null" : "'" + path.toString() + "'")
|
||||
+ ", topics=" + MessageUtil.deepToString(topics.iterator())
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public String path() {
|
||||
return this.path;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirTopicCollection topics() {
|
||||
return this.topics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int next() {
|
||||
return this.next;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int prev() {
|
||||
return this.prev;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDir setPath(String v) {
|
||||
this.path = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDir setTopics(AlterReplicaLogDirTopicCollection v) {
|
||||
this.topics = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNext(int v) {
|
||||
this.next = v;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setPrev(int v) {
|
||||
this.prev = v;
|
||||
}
|
||||
}
|
||||
|
||||
static public class AlterReplicaLogDirTopic implements Message, ImplicitLinkedHashMultiCollection.Element {
|
||||
private String name;
|
||||
private List<Integer> partitions;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
private int next;
|
||||
private int prev;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("name", Type.STRING, "The topic name."),
|
||||
new Field("partitions", new ArrayOf(Type.INT32), "The partition indexes.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AlterReplicaLogDirTopic(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirTopic(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirTopic() {
|
||||
this.name = "";
|
||||
this.partitions = new ArrayList<Integer>();
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDirTopic");
|
||||
}
|
||||
{
|
||||
int length;
|
||||
length = _readable.readShort();
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field name was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field name had invalid length " + length);
|
||||
} else {
|
||||
this.name = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readInt();
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field partitions was serialized as null");
|
||||
} else {
|
||||
ArrayList<Integer> newCollection = new ArrayList<Integer>(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(_readable.readInt());
|
||||
}
|
||||
this.partitions = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDirTopic");
|
||||
}
|
||||
int _numTaggedFields = 0;
|
||||
{
|
||||
byte[] _stringBytes = _cache.getSerializedValue(name);
|
||||
_writable.writeShort((short) _stringBytes.length);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
_writable.writeInt(partitions.size());
|
||||
for (Integer partitionsElement : partitions) {
|
||||
_writable.writeInt(partitionsElement);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDirTopic");
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
this.name = struct.getString("name");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("partitions");
|
||||
this.partitions = new ArrayList<Integer>(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.partitions.add((Integer) nestedObject);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDirTopic");
|
||||
}
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("name", this.name);
|
||||
{
|
||||
Integer[] _nestedObjects = new Integer[partitions.size()];
|
||||
int i = 0;
|
||||
for (Integer element : this.partitions) {
|
||||
_nestedObjects[i++] = element;
|
||||
}
|
||||
struct.set("partitions", (Object[]) _nestedObjects);
|
||||
}
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't size version " + _version + " of AlterReplicaLogDirTopic");
|
||||
}
|
||||
{
|
||||
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'name' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(name, _stringBytes);
|
||||
_size += _stringBytes.length + 2;
|
||||
}
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += 4;
|
||||
_arraySize += partitions.size() * 4;
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AlterReplicaLogDirTopic)) return false;
|
||||
AlterReplicaLogDirTopic other = (AlterReplicaLogDirTopic) obj;
|
||||
if (this.name == null) {
|
||||
if (other.name != null) return false;
|
||||
} else {
|
||||
if (!this.name.equals(other.name)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlterReplicaLogDirTopic("
|
||||
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
|
||||
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public List<Integer> partitions() {
|
||||
return this.partitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int next() {
|
||||
return this.next;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int prev() {
|
||||
return this.prev;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirTopic setName(String v) {
|
||||
this.name = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirTopic setPartitions(List<Integer> v) {
|
||||
this.partitions = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNext(int v) {
|
||||
this.next = v;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setPrev(int v) {
|
||||
this.prev = v;
|
||||
}
|
||||
}
|
||||
|
||||
public static class AlterReplicaLogDirTopicCollection extends ImplicitLinkedHashMultiCollection<AlterReplicaLogDirTopic> {
|
||||
public AlterReplicaLogDirTopicCollection() {
|
||||
super();
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirTopicCollection(int expectedNumElements) {
|
||||
super(expectedNumElements);
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirTopicCollection(Iterator<AlterReplicaLogDirTopic> iterator) {
|
||||
super(iterator);
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirTopic find(String name) {
|
||||
AlterReplicaLogDirTopic _key = new AlterReplicaLogDirTopic();
|
||||
_key.setName(name);
|
||||
return find(_key);
|
||||
}
|
||||
|
||||
public List<AlterReplicaLogDirTopic> findAll(String name) {
|
||||
AlterReplicaLogDirTopic _key = new AlterReplicaLogDirTopic();
|
||||
_key.setName(name);
|
||||
return findAll(_key);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static class AlterReplicaLogDirCollection extends ImplicitLinkedHashMultiCollection<AlterReplicaLogDir> {
|
||||
public AlterReplicaLogDirCollection() {
|
||||
super();
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirCollection(int expectedNumElements) {
|
||||
super(expectedNumElements);
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirCollection(Iterator<AlterReplicaLogDir> iterator) {
|
||||
super(iterator);
|
||||
}
|
||||
|
||||
public AlterReplicaLogDir find(String path) {
|
||||
AlterReplicaLogDir _key = new AlterReplicaLogDir();
|
||||
_key.setPath(path);
|
||||
return find(_key);
|
||||
}
|
||||
|
||||
public List<AlterReplicaLogDir> findAll(String path) {
|
||||
AlterReplicaLogDir _key = new AlterReplicaLogDir();
|
||||
_key.setPath(path);
|
||||
return findAll(_key);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,625 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package org.apache.kafka.common.message;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.protocol.ApiMessage;
|
||||
import org.apache.kafka.common.protocol.Message;
|
||||
import org.apache.kafka.common.protocol.MessageUtil;
|
||||
import org.apache.kafka.common.protocol.ObjectSerializationCache;
|
||||
import org.apache.kafka.common.protocol.Readable;
|
||||
import org.apache.kafka.common.protocol.Writable;
|
||||
import org.apache.kafka.common.protocol.types.ArrayOf;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedField;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
import org.apache.kafka.common.utils.ByteUtils;
|
||||
|
||||
|
||||
public class AlterReplicaLogDirsResponseData implements ApiMessage {
|
||||
private int throttleTimeMs;
|
||||
private List<AlterReplicaLogDirTopicResult> results;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
|
||||
new Field("results", new ArrayOf(AlterReplicaLogDirTopicResult.SCHEMA_0), "The results for each topic.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AlterReplicaLogDirsResponseData(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirsResponseData(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirsResponseData() {
|
||||
this.throttleTimeMs = 0;
|
||||
this.results = new ArrayList<AlterReplicaLogDirTopicResult>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public short apiKey() {
|
||||
return 34;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
this.throttleTimeMs = _readable.readInt();
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readInt();
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field results was serialized as null");
|
||||
} else {
|
||||
ArrayList<AlterReplicaLogDirTopicResult> newCollection = new ArrayList<AlterReplicaLogDirTopicResult>(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new AlterReplicaLogDirTopicResult(_readable, _version));
|
||||
}
|
||||
this.results = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeInt(throttleTimeMs);
|
||||
_writable.writeInt(results.size());
|
||||
for (AlterReplicaLogDirTopicResult resultsElement : results) {
|
||||
resultsElement.write(_writable, _cache, _version);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
this._unknownTaggedFields = null;
|
||||
this.throttleTimeMs = struct.getInt("throttle_time_ms");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("results");
|
||||
this.results = new ArrayList<AlterReplicaLogDirTopicResult>(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.results.add(new AlterReplicaLogDirTopicResult((Struct) nestedObject, _version));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("throttle_time_ms", this.throttleTimeMs);
|
||||
{
|
||||
Struct[] _nestedObjects = new Struct[results.size()];
|
||||
int i = 0;
|
||||
for (AlterReplicaLogDirTopicResult element : this.results) {
|
||||
_nestedObjects[i++] = element.toStruct(_version);
|
||||
}
|
||||
struct.set("results", (Object[]) _nestedObjects);
|
||||
}
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
_size += 4;
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += 4;
|
||||
for (AlterReplicaLogDirTopicResult resultsElement : results) {
|
||||
_arraySize += resultsElement.size(_cache, _version);
|
||||
}
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AlterReplicaLogDirsResponseData)) return false;
|
||||
AlterReplicaLogDirsResponseData other = (AlterReplicaLogDirsResponseData) obj;
|
||||
if (throttleTimeMs != other.throttleTimeMs) return false;
|
||||
if (this.results == null) {
|
||||
if (other.results != null) return false;
|
||||
} else {
|
||||
if (!this.results.equals(other.results)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + throttleTimeMs;
|
||||
hashCode = 31 * hashCode + (results == null ? 0 : results.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlterReplicaLogDirsResponseData("
|
||||
+ "throttleTimeMs=" + throttleTimeMs
|
||||
+ ", results=" + MessageUtil.deepToString(results.iterator())
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public int throttleTimeMs() {
|
||||
return this.throttleTimeMs;
|
||||
}
|
||||
|
||||
public List<AlterReplicaLogDirTopicResult> results() {
|
||||
return this.results;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirsResponseData setThrottleTimeMs(int v) {
|
||||
this.throttleTimeMs = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirsResponseData setResults(List<AlterReplicaLogDirTopicResult> v) {
|
||||
this.results = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
static public class AlterReplicaLogDirTopicResult implements Message {
|
||||
private String topicName;
|
||||
private List<AlterReplicaLogDirPartitionResult> partitions;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("topic_name", Type.STRING, "The name of the topic."),
|
||||
new Field("partitions", new ArrayOf(AlterReplicaLogDirPartitionResult.SCHEMA_0), "The results for each partition.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AlterReplicaLogDirTopicResult(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirTopicResult(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirTopicResult() {
|
||||
this.topicName = "";
|
||||
this.partitions = new ArrayList<AlterReplicaLogDirPartitionResult>();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDirTopicResult");
|
||||
}
|
||||
{
|
||||
int length;
|
||||
length = _readable.readShort();
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field topicName was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field topicName had invalid length " + length);
|
||||
} else {
|
||||
this.topicName = _readable.readString(length);
|
||||
}
|
||||
}
|
||||
{
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readInt();
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field partitions was serialized as null");
|
||||
} else {
|
||||
ArrayList<AlterReplicaLogDirPartitionResult> newCollection = new ArrayList<AlterReplicaLogDirPartitionResult>(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new AlterReplicaLogDirPartitionResult(_readable, _version));
|
||||
}
|
||||
this.partitions = newCollection;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDirTopicResult");
|
||||
}
|
||||
int _numTaggedFields = 0;
|
||||
{
|
||||
byte[] _stringBytes = _cache.getSerializedValue(topicName);
|
||||
_writable.writeShort((short) _stringBytes.length);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
_writable.writeInt(partitions.size());
|
||||
for (AlterReplicaLogDirPartitionResult partitionsElement : partitions) {
|
||||
partitionsElement.write(_writable, _cache, _version);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDirTopicResult");
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
this.topicName = struct.getString("topic_name");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("partitions");
|
||||
this.partitions = new ArrayList<AlterReplicaLogDirPartitionResult>(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.partitions.add(new AlterReplicaLogDirPartitionResult((Struct) nestedObject, _version));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDirTopicResult");
|
||||
}
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("topic_name", this.topicName);
|
||||
{
|
||||
Struct[] _nestedObjects = new Struct[partitions.size()];
|
||||
int i = 0;
|
||||
for (AlterReplicaLogDirPartitionResult element : this.partitions) {
|
||||
_nestedObjects[i++] = element.toStruct(_version);
|
||||
}
|
||||
struct.set("partitions", (Object[]) _nestedObjects);
|
||||
}
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't size version " + _version + " of AlterReplicaLogDirTopicResult");
|
||||
}
|
||||
{
|
||||
byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'topicName' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(topicName, _stringBytes);
|
||||
_size += _stringBytes.length + 2;
|
||||
}
|
||||
{
|
||||
int _arraySize = 0;
|
||||
_arraySize += 4;
|
||||
for (AlterReplicaLogDirPartitionResult partitionsElement : partitions) {
|
||||
_arraySize += partitionsElement.size(_cache, _version);
|
||||
}
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AlterReplicaLogDirTopicResult)) return false;
|
||||
AlterReplicaLogDirTopicResult other = (AlterReplicaLogDirTopicResult) obj;
|
||||
if (this.topicName == null) {
|
||||
if (other.topicName != null) return false;
|
||||
} else {
|
||||
if (!this.topicName.equals(other.topicName)) return false;
|
||||
}
|
||||
if (this.partitions == null) {
|
||||
if (other.partitions != null) return false;
|
||||
} else {
|
||||
if (!this.partitions.equals(other.partitions)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode());
|
||||
hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlterReplicaLogDirTopicResult("
|
||||
+ "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'")
|
||||
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public String topicName() {
|
||||
return this.topicName;
|
||||
}
|
||||
|
||||
public List<AlterReplicaLogDirPartitionResult> partitions() {
|
||||
return this.partitions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirTopicResult setTopicName(String v) {
|
||||
this.topicName = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirTopicResult setPartitions(List<AlterReplicaLogDirPartitionResult> v) {
|
||||
this.partitions = v;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
static public class AlterReplicaLogDirPartitionResult implements Message {
|
||||
private int partitionIndex;
|
||||
private short errorCode;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("partition_index", Type.INT32, "The partition index."),
|
||||
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1
|
||||
};
|
||||
|
||||
public AlterReplicaLogDirPartitionResult(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirPartitionResult(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirPartitionResult() {
|
||||
this.partitionIndex = 0;
|
||||
this.errorCode = (short) 0;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDirPartitionResult");
|
||||
}
|
||||
this.partitionIndex = _readable.readInt();
|
||||
this.errorCode = _readable.readShort();
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDirPartitionResult");
|
||||
}
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeInt(partitionIndex);
|
||||
_writable.writeShort(errorCode);
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDirPartitionResult");
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
this.partitionIndex = struct.getInt("partition_index");
|
||||
this.errorCode = struct.getShort("error_code");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of AlterReplicaLogDirPartitionResult");
|
||||
}
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("partition_index", this.partitionIndex);
|
||||
struct.set("error_code", this.errorCode);
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version > 1) {
|
||||
throw new UnsupportedVersionException("Can't size version " + _version + " of AlterReplicaLogDirPartitionResult");
|
||||
}
|
||||
_size += 4;
|
||||
_size += 2;
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof AlterReplicaLogDirPartitionResult)) return false;
|
||||
AlterReplicaLogDirPartitionResult other = (AlterReplicaLogDirPartitionResult) obj;
|
||||
if (partitionIndex != other.partitionIndex) return false;
|
||||
if (errorCode != other.errorCode) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + partitionIndex;
|
||||
hashCode = 31 * hashCode + errorCode;
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "AlterReplicaLogDirPartitionResult("
|
||||
+ "partitionIndex=" + partitionIndex
|
||||
+ ", errorCode=" + errorCode
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public int partitionIndex() {
|
||||
return this.partitionIndex;
|
||||
}
|
||||
|
||||
public short errorCode() {
|
||||
return this.errorCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirPartitionResult setPartitionIndex(int v) {
|
||||
this.partitionIndex = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AlterReplicaLogDirPartitionResult setErrorCode(short v) {
|
||||
this.errorCode = v;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,877 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package org.apache.kafka.common.message;
|
||||
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.protocol.ApiMessage;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
|
||||
public enum ApiMessageType {
|
||||
PRODUCE("Produce", (short) 0, ProduceRequestData.SCHEMAS, ProduceResponseData.SCHEMAS),
|
||||
FETCH("Fetch", (short) 1, FetchRequestData.SCHEMAS, FetchResponseData.SCHEMAS),
|
||||
LIST_OFFSET("ListOffset", (short) 2, ListOffsetRequestData.SCHEMAS, ListOffsetResponseData.SCHEMAS),
|
||||
METADATA("Metadata", (short) 3, MetadataRequestData.SCHEMAS, MetadataResponseData.SCHEMAS),
|
||||
LEADER_AND_ISR("LeaderAndIsr", (short) 4, LeaderAndIsrRequestData.SCHEMAS, LeaderAndIsrResponseData.SCHEMAS),
|
||||
STOP_REPLICA("StopReplica", (short) 5, StopReplicaRequestData.SCHEMAS, StopReplicaResponseData.SCHEMAS),
|
||||
UPDATE_METADATA("UpdateMetadata", (short) 6, UpdateMetadataRequestData.SCHEMAS, UpdateMetadataResponseData.SCHEMAS),
|
||||
CONTROLLED_SHUTDOWN("ControlledShutdown", (short) 7, ControlledShutdownRequestData.SCHEMAS, ControlledShutdownResponseData.SCHEMAS),
|
||||
OFFSET_COMMIT("OffsetCommit", (short) 8, OffsetCommitRequestData.SCHEMAS, OffsetCommitResponseData.SCHEMAS),
|
||||
OFFSET_FETCH("OffsetFetch", (short) 9, OffsetFetchRequestData.SCHEMAS, OffsetFetchResponseData.SCHEMAS),
|
||||
FIND_COORDINATOR("FindCoordinator", (short) 10, FindCoordinatorRequestData.SCHEMAS, FindCoordinatorResponseData.SCHEMAS),
|
||||
JOIN_GROUP("JoinGroup", (short) 11, JoinGroupRequestData.SCHEMAS, JoinGroupResponseData.SCHEMAS),
|
||||
HEARTBEAT("Heartbeat", (short) 12, HeartbeatRequestData.SCHEMAS, HeartbeatResponseData.SCHEMAS),
|
||||
LEAVE_GROUP("LeaveGroup", (short) 13, LeaveGroupRequestData.SCHEMAS, LeaveGroupResponseData.SCHEMAS),
|
||||
SYNC_GROUP("SyncGroup", (short) 14, SyncGroupRequestData.SCHEMAS, SyncGroupResponseData.SCHEMAS),
|
||||
DESCRIBE_GROUPS("DescribeGroups", (short) 15, DescribeGroupsRequestData.SCHEMAS, DescribeGroupsResponseData.SCHEMAS),
|
||||
LIST_GROUPS("ListGroups", (short) 16, ListGroupsRequestData.SCHEMAS, ListGroupsResponseData.SCHEMAS),
|
||||
SASL_HANDSHAKE("SaslHandshake", (short) 17, SaslHandshakeRequestData.SCHEMAS, SaslHandshakeResponseData.SCHEMAS),
|
||||
API_VERSIONS("ApiVersions", (short) 18, ApiVersionsRequestData.SCHEMAS, ApiVersionsResponseData.SCHEMAS),
|
||||
CREATE_TOPICS("CreateTopics", (short) 19, CreateTopicsRequestData.SCHEMAS, CreateTopicsResponseData.SCHEMAS),
|
||||
DELETE_TOPICS("DeleteTopics", (short) 20, DeleteTopicsRequestData.SCHEMAS, DeleteTopicsResponseData.SCHEMAS),
|
||||
DELETE_RECORDS("DeleteRecords", (short) 21, DeleteRecordsRequestData.SCHEMAS, DeleteRecordsResponseData.SCHEMAS),
|
||||
INIT_PRODUCER_ID("InitProducerId", (short) 22, InitProducerIdRequestData.SCHEMAS, InitProducerIdResponseData.SCHEMAS),
|
||||
OFFSET_FOR_LEADER_EPOCH("OffsetForLeaderEpoch", (short) 23, OffsetForLeaderEpochRequestData.SCHEMAS, OffsetForLeaderEpochResponseData.SCHEMAS),
|
||||
ADD_PARTITIONS_TO_TXN("AddPartitionsToTxn", (short) 24, AddPartitionsToTxnRequestData.SCHEMAS, AddPartitionsToTxnResponseData.SCHEMAS),
|
||||
ADD_OFFSETS_TO_TXN("AddOffsetsToTxn", (short) 25, AddOffsetsToTxnRequestData.SCHEMAS, AddOffsetsToTxnResponseData.SCHEMAS),
|
||||
END_TXN("EndTxn", (short) 26, EndTxnRequestData.SCHEMAS, EndTxnResponseData.SCHEMAS),
|
||||
WRITE_TXN_MARKERS("WriteTxnMarkers", (short) 27, WriteTxnMarkersRequestData.SCHEMAS, WriteTxnMarkersResponseData.SCHEMAS),
|
||||
TXN_OFFSET_COMMIT("TxnOffsetCommit", (short) 28, TxnOffsetCommitRequestData.SCHEMAS, TxnOffsetCommitResponseData.SCHEMAS),
|
||||
DESCRIBE_ACLS("DescribeAcls", (short) 29, DescribeAclsRequestData.SCHEMAS, DescribeAclsResponseData.SCHEMAS),
|
||||
CREATE_ACLS("CreateAcls", (short) 30, CreateAclsRequestData.SCHEMAS, CreateAclsResponseData.SCHEMAS),
|
||||
DELETE_ACLS("DeleteAcls", (short) 31, DeleteAclsRequestData.SCHEMAS, DeleteAclsResponseData.SCHEMAS),
|
||||
DESCRIBE_CONFIGS("DescribeConfigs", (short) 32, DescribeConfigsRequestData.SCHEMAS, DescribeConfigsResponseData.SCHEMAS),
|
||||
ALTER_CONFIGS("AlterConfigs", (short) 33, AlterConfigsRequestData.SCHEMAS, AlterConfigsResponseData.SCHEMAS),
|
||||
ALTER_REPLICA_LOG_DIRS("AlterReplicaLogDirs", (short) 34, AlterReplicaLogDirsRequestData.SCHEMAS, AlterReplicaLogDirsResponseData.SCHEMAS),
|
||||
DESCRIBE_LOG_DIRS("DescribeLogDirs", (short) 35, DescribeLogDirsRequestData.SCHEMAS, DescribeLogDirsResponseData.SCHEMAS),
|
||||
SASL_AUTHENTICATE("SaslAuthenticate", (short) 36, SaslAuthenticateRequestData.SCHEMAS, SaslAuthenticateResponseData.SCHEMAS),
|
||||
CREATE_PARTITIONS("CreatePartitions", (short) 37, CreatePartitionsRequestData.SCHEMAS, CreatePartitionsResponseData.SCHEMAS),
|
||||
CREATE_DELEGATION_TOKEN("CreateDelegationToken", (short) 38, CreateDelegationTokenRequestData.SCHEMAS, CreateDelegationTokenResponseData.SCHEMAS),
|
||||
RENEW_DELEGATION_TOKEN("RenewDelegationToken", (short) 39, RenewDelegationTokenRequestData.SCHEMAS, RenewDelegationTokenResponseData.SCHEMAS),
|
||||
EXPIRE_DELEGATION_TOKEN("ExpireDelegationToken", (short) 40, ExpireDelegationTokenRequestData.SCHEMAS, ExpireDelegationTokenResponseData.SCHEMAS),
|
||||
DESCRIBE_DELEGATION_TOKEN("DescribeDelegationToken", (short) 41, DescribeDelegationTokenRequestData.SCHEMAS, DescribeDelegationTokenResponseData.SCHEMAS),
|
||||
DELETE_GROUPS("DeleteGroups", (short) 42, DeleteGroupsRequestData.SCHEMAS, DeleteGroupsResponseData.SCHEMAS),
|
||||
ELECT_LEADERS("ElectLeaders", (short) 43, ElectLeadersRequestData.SCHEMAS, ElectLeadersResponseData.SCHEMAS),
|
||||
INCREMENTAL_ALTER_CONFIGS("IncrementalAlterConfigs", (short) 44, IncrementalAlterConfigsRequestData.SCHEMAS, IncrementalAlterConfigsResponseData.SCHEMAS),
|
||||
ALTER_PARTITION_REASSIGNMENTS("AlterPartitionReassignments", (short) 45, AlterPartitionReassignmentsRequestData.SCHEMAS, AlterPartitionReassignmentsResponseData.SCHEMAS),
|
||||
LIST_PARTITION_REASSIGNMENTS("ListPartitionReassignments", (short) 46, ListPartitionReassignmentsRequestData.SCHEMAS, ListPartitionReassignmentsResponseData.SCHEMAS),
|
||||
OFFSET_DELETE("OffsetDelete", (short) 47, OffsetDeleteRequestData.SCHEMAS, OffsetDeleteResponseData.SCHEMAS),
|
||||
BUFFER("Buffer", (short) 1000, BufferRequestData.SCHEMAS, BufferResponseData.SCHEMAS);
|
||||
|
||||
private final String name;
|
||||
private final short apiKey;
|
||||
private final Schema[] requestSchemas;
|
||||
private final Schema[] responseSchemas;
|
||||
|
||||
ApiMessageType(String name, short apiKey, Schema[] requestSchemas, Schema[] responseSchemas) {
|
||||
this.name = name;
|
||||
this.apiKey = apiKey;
|
||||
this.requestSchemas = requestSchemas;
|
||||
this.responseSchemas = responseSchemas;
|
||||
}
|
||||
|
||||
public static ApiMessageType fromApiKey(short apiKey) {
|
||||
switch (apiKey) {
|
||||
case 0:
|
||||
return PRODUCE;
|
||||
case 1:
|
||||
return FETCH;
|
||||
case 2:
|
||||
return LIST_OFFSET;
|
||||
case 3:
|
||||
return METADATA;
|
||||
case 4:
|
||||
return LEADER_AND_ISR;
|
||||
case 5:
|
||||
return STOP_REPLICA;
|
||||
case 6:
|
||||
return UPDATE_METADATA;
|
||||
case 7:
|
||||
return CONTROLLED_SHUTDOWN;
|
||||
case 8:
|
||||
return OFFSET_COMMIT;
|
||||
case 9:
|
||||
return OFFSET_FETCH;
|
||||
case 10:
|
||||
return FIND_COORDINATOR;
|
||||
case 11:
|
||||
return JOIN_GROUP;
|
||||
case 12:
|
||||
return HEARTBEAT;
|
||||
case 13:
|
||||
return LEAVE_GROUP;
|
||||
case 14:
|
||||
return SYNC_GROUP;
|
||||
case 15:
|
||||
return DESCRIBE_GROUPS;
|
||||
case 16:
|
||||
return LIST_GROUPS;
|
||||
case 17:
|
||||
return SASL_HANDSHAKE;
|
||||
case 18:
|
||||
return API_VERSIONS;
|
||||
case 19:
|
||||
return CREATE_TOPICS;
|
||||
case 20:
|
||||
return DELETE_TOPICS;
|
||||
case 21:
|
||||
return DELETE_RECORDS;
|
||||
case 22:
|
||||
return INIT_PRODUCER_ID;
|
||||
case 23:
|
||||
return OFFSET_FOR_LEADER_EPOCH;
|
||||
case 24:
|
||||
return ADD_PARTITIONS_TO_TXN;
|
||||
case 25:
|
||||
return ADD_OFFSETS_TO_TXN;
|
||||
case 26:
|
||||
return END_TXN;
|
||||
case 27:
|
||||
return WRITE_TXN_MARKERS;
|
||||
case 28:
|
||||
return TXN_OFFSET_COMMIT;
|
||||
case 29:
|
||||
return DESCRIBE_ACLS;
|
||||
case 30:
|
||||
return CREATE_ACLS;
|
||||
case 31:
|
||||
return DELETE_ACLS;
|
||||
case 32:
|
||||
return DESCRIBE_CONFIGS;
|
||||
case 33:
|
||||
return ALTER_CONFIGS;
|
||||
case 34:
|
||||
return ALTER_REPLICA_LOG_DIRS;
|
||||
case 35:
|
||||
return DESCRIBE_LOG_DIRS;
|
||||
case 36:
|
||||
return SASL_AUTHENTICATE;
|
||||
case 37:
|
||||
return CREATE_PARTITIONS;
|
||||
case 38:
|
||||
return CREATE_DELEGATION_TOKEN;
|
||||
case 39:
|
||||
return RENEW_DELEGATION_TOKEN;
|
||||
case 40:
|
||||
return EXPIRE_DELEGATION_TOKEN;
|
||||
case 41:
|
||||
return DESCRIBE_DELEGATION_TOKEN;
|
||||
case 42:
|
||||
return DELETE_GROUPS;
|
||||
case 43:
|
||||
return ELECT_LEADERS;
|
||||
case 44:
|
||||
return INCREMENTAL_ALTER_CONFIGS;
|
||||
case 45:
|
||||
return ALTER_PARTITION_REASSIGNMENTS;
|
||||
case 46:
|
||||
return LIST_PARTITION_REASSIGNMENTS;
|
||||
case 47:
|
||||
return OFFSET_DELETE;
|
||||
case 1000:
|
||||
return BUFFER;
|
||||
default:
|
||||
throw new UnsupportedVersionException("Unsupported API key " + apiKey);
|
||||
}
|
||||
}
|
||||
|
||||
public ApiMessage newRequest() {
|
||||
switch (apiKey) {
|
||||
case 0:
|
||||
return new ProduceRequestData();
|
||||
case 1:
|
||||
return new FetchRequestData();
|
||||
case 2:
|
||||
return new ListOffsetRequestData();
|
||||
case 3:
|
||||
return new MetadataRequestData();
|
||||
case 4:
|
||||
return new LeaderAndIsrRequestData();
|
||||
case 5:
|
||||
return new StopReplicaRequestData();
|
||||
case 6:
|
||||
return new UpdateMetadataRequestData();
|
||||
case 7:
|
||||
return new ControlledShutdownRequestData();
|
||||
case 8:
|
||||
return new OffsetCommitRequestData();
|
||||
case 9:
|
||||
return new OffsetFetchRequestData();
|
||||
case 10:
|
||||
return new FindCoordinatorRequestData();
|
||||
case 11:
|
||||
return new JoinGroupRequestData();
|
||||
case 12:
|
||||
return new HeartbeatRequestData();
|
||||
case 13:
|
||||
return new LeaveGroupRequestData();
|
||||
case 14:
|
||||
return new SyncGroupRequestData();
|
||||
case 15:
|
||||
return new DescribeGroupsRequestData();
|
||||
case 16:
|
||||
return new ListGroupsRequestData();
|
||||
case 17:
|
||||
return new SaslHandshakeRequestData();
|
||||
case 18:
|
||||
return new ApiVersionsRequestData();
|
||||
case 19:
|
||||
return new CreateTopicsRequestData();
|
||||
case 20:
|
||||
return new DeleteTopicsRequestData();
|
||||
case 21:
|
||||
return new DeleteRecordsRequestData();
|
||||
case 22:
|
||||
return new InitProducerIdRequestData();
|
||||
case 23:
|
||||
return new OffsetForLeaderEpochRequestData();
|
||||
case 24:
|
||||
return new AddPartitionsToTxnRequestData();
|
||||
case 25:
|
||||
return new AddOffsetsToTxnRequestData();
|
||||
case 26:
|
||||
return new EndTxnRequestData();
|
||||
case 27:
|
||||
return new WriteTxnMarkersRequestData();
|
||||
case 28:
|
||||
return new TxnOffsetCommitRequestData();
|
||||
case 29:
|
||||
return new DescribeAclsRequestData();
|
||||
case 30:
|
||||
return new CreateAclsRequestData();
|
||||
case 31:
|
||||
return new DeleteAclsRequestData();
|
||||
case 32:
|
||||
return new DescribeConfigsRequestData();
|
||||
case 33:
|
||||
return new AlterConfigsRequestData();
|
||||
case 34:
|
||||
return new AlterReplicaLogDirsRequestData();
|
||||
case 35:
|
||||
return new DescribeLogDirsRequestData();
|
||||
case 36:
|
||||
return new SaslAuthenticateRequestData();
|
||||
case 37:
|
||||
return new CreatePartitionsRequestData();
|
||||
case 38:
|
||||
return new CreateDelegationTokenRequestData();
|
||||
case 39:
|
||||
return new RenewDelegationTokenRequestData();
|
||||
case 40:
|
||||
return new ExpireDelegationTokenRequestData();
|
||||
case 41:
|
||||
return new DescribeDelegationTokenRequestData();
|
||||
case 42:
|
||||
return new DeleteGroupsRequestData();
|
||||
case 43:
|
||||
return new ElectLeadersRequestData();
|
||||
case 44:
|
||||
return new IncrementalAlterConfigsRequestData();
|
||||
case 45:
|
||||
return new AlterPartitionReassignmentsRequestData();
|
||||
case 46:
|
||||
return new ListPartitionReassignmentsRequestData();
|
||||
case 47:
|
||||
return new OffsetDeleteRequestData();
|
||||
case 1000:
|
||||
return new BufferRequestData();
|
||||
default:
|
||||
throw new UnsupportedVersionException("Unsupported request API key " + apiKey);
|
||||
}
|
||||
}
|
||||
|
||||
public ApiMessage newResponse() {
|
||||
switch (apiKey) {
|
||||
case 0:
|
||||
return new ProduceResponseData();
|
||||
case 1:
|
||||
return new FetchResponseData();
|
||||
case 2:
|
||||
return new ListOffsetResponseData();
|
||||
case 3:
|
||||
return new MetadataResponseData();
|
||||
case 4:
|
||||
return new LeaderAndIsrResponseData();
|
||||
case 5:
|
||||
return new StopReplicaResponseData();
|
||||
case 6:
|
||||
return new UpdateMetadataResponseData();
|
||||
case 7:
|
||||
return new ControlledShutdownResponseData();
|
||||
case 8:
|
||||
return new OffsetCommitResponseData();
|
||||
case 9:
|
||||
return new OffsetFetchResponseData();
|
||||
case 10:
|
||||
return new FindCoordinatorResponseData();
|
||||
case 11:
|
||||
return new JoinGroupResponseData();
|
||||
case 12:
|
||||
return new HeartbeatResponseData();
|
||||
case 13:
|
||||
return new LeaveGroupResponseData();
|
||||
case 14:
|
||||
return new SyncGroupResponseData();
|
||||
case 15:
|
||||
return new DescribeGroupsResponseData();
|
||||
case 16:
|
||||
return new ListGroupsResponseData();
|
||||
case 17:
|
||||
return new SaslHandshakeResponseData();
|
||||
case 18:
|
||||
return new ApiVersionsResponseData();
|
||||
case 19:
|
||||
return new CreateTopicsResponseData();
|
||||
case 20:
|
||||
return new DeleteTopicsResponseData();
|
||||
case 21:
|
||||
return new DeleteRecordsResponseData();
|
||||
case 22:
|
||||
return new InitProducerIdResponseData();
|
||||
case 23:
|
||||
return new OffsetForLeaderEpochResponseData();
|
||||
case 24:
|
||||
return new AddPartitionsToTxnResponseData();
|
||||
case 25:
|
||||
return new AddOffsetsToTxnResponseData();
|
||||
case 26:
|
||||
return new EndTxnResponseData();
|
||||
case 27:
|
||||
return new WriteTxnMarkersResponseData();
|
||||
case 28:
|
||||
return new TxnOffsetCommitResponseData();
|
||||
case 29:
|
||||
return new DescribeAclsResponseData();
|
||||
case 30:
|
||||
return new CreateAclsResponseData();
|
||||
case 31:
|
||||
return new DeleteAclsResponseData();
|
||||
case 32:
|
||||
return new DescribeConfigsResponseData();
|
||||
case 33:
|
||||
return new AlterConfigsResponseData();
|
||||
case 34:
|
||||
return new AlterReplicaLogDirsResponseData();
|
||||
case 35:
|
||||
return new DescribeLogDirsResponseData();
|
||||
case 36:
|
||||
return new SaslAuthenticateResponseData();
|
||||
case 37:
|
||||
return new CreatePartitionsResponseData();
|
||||
case 38:
|
||||
return new CreateDelegationTokenResponseData();
|
||||
case 39:
|
||||
return new RenewDelegationTokenResponseData();
|
||||
case 40:
|
||||
return new ExpireDelegationTokenResponseData();
|
||||
case 41:
|
||||
return new DescribeDelegationTokenResponseData();
|
||||
case 42:
|
||||
return new DeleteGroupsResponseData();
|
||||
case 43:
|
||||
return new ElectLeadersResponseData();
|
||||
case 44:
|
||||
return new IncrementalAlterConfigsResponseData();
|
||||
case 45:
|
||||
return new AlterPartitionReassignmentsResponseData();
|
||||
case 46:
|
||||
return new ListPartitionReassignmentsResponseData();
|
||||
case 47:
|
||||
return new OffsetDeleteResponseData();
|
||||
case 1000:
|
||||
return new BufferResponseData();
|
||||
default:
|
||||
throw new UnsupportedVersionException("Unsupported response API key " + apiKey);
|
||||
}
|
||||
}
|
||||
|
||||
public short apiKey() {
|
||||
return this.apiKey;
|
||||
}
|
||||
|
||||
public Schema[] requestSchemas() {
|
||||
return this.requestSchemas;
|
||||
}
|
||||
|
||||
public Schema[] responseSchemas() {
|
||||
return this.responseSchemas;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return this.name();
|
||||
}
|
||||
|
||||
public short requestHeaderVersion(short _version) {
|
||||
switch (apiKey) {
|
||||
case 0:
|
||||
return (short) 1;
|
||||
case 1:
|
||||
return (short) 1;
|
||||
case 2:
|
||||
return (short) 1;
|
||||
case 3:
|
||||
if (_version >= 9) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 4:
|
||||
if (_version >= 4) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 5:
|
||||
if (_version >= 2) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 6:
|
||||
if (_version >= 6) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 7:
|
||||
if (_version == 0) {
|
||||
return (short) 0;
|
||||
}
|
||||
if (_version >= 3) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 8:
|
||||
if (_version >= 8) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 9:
|
||||
if (_version >= 6) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 10:
|
||||
if (_version >= 3) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 11:
|
||||
if (_version >= 6) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 12:
|
||||
if (_version >= 4) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 13:
|
||||
if (_version >= 4) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 14:
|
||||
if (_version >= 4) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 15:
|
||||
if (_version >= 5) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 16:
|
||||
if (_version >= 3) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 17:
|
||||
return (short) 1;
|
||||
case 18:
|
||||
if (_version >= 3) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 19:
|
||||
if (_version >= 5) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 20:
|
||||
if (_version >= 4) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 21:
|
||||
return (short) 1;
|
||||
case 22:
|
||||
if (_version >= 2) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 23:
|
||||
return (short) 1;
|
||||
case 24:
|
||||
return (short) 1;
|
||||
case 25:
|
||||
return (short) 1;
|
||||
case 26:
|
||||
return (short) 1;
|
||||
case 27:
|
||||
return (short) 1;
|
||||
case 28:
|
||||
if (_version >= 3) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 29:
|
||||
if (_version >= 2) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 30:
|
||||
if (_version >= 2) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 31:
|
||||
if (_version >= 2) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 32:
|
||||
return (short) 1;
|
||||
case 33:
|
||||
return (short) 1;
|
||||
case 34:
|
||||
return (short) 1;
|
||||
case 35:
|
||||
return (short) 1;
|
||||
case 36:
|
||||
if (_version >= 2) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 37:
|
||||
if (_version >= 2) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 38:
|
||||
if (_version >= 2) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 39:
|
||||
if (_version >= 2) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 40:
|
||||
if (_version >= 2) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 41:
|
||||
if (_version >= 2) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 42:
|
||||
if (_version >= 2) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 43:
|
||||
if (_version >= 2) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 44:
|
||||
if (_version >= 1) {
|
||||
return (short) 2;
|
||||
} else {
|
||||
return (short) 1;
|
||||
}
|
||||
case 45:
|
||||
return (short) 2;
|
||||
case 46:
|
||||
return (short) 2;
|
||||
case 47:
|
||||
return (short) 1;
|
||||
case 1000:
|
||||
return (short) 1;
|
||||
default:
|
||||
throw new UnsupportedVersionException("Unsupported API key " + apiKey);
|
||||
}
|
||||
}
|
||||
|
||||
public short responseHeaderVersion(short _version) {
|
||||
switch (apiKey) {
|
||||
case 0:
|
||||
return (short) 0;
|
||||
case 1:
|
||||
return (short) 0;
|
||||
case 2:
|
||||
return (short) 0;
|
||||
case 3:
|
||||
if (_version >= 9) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 4:
|
||||
if (_version >= 4) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 5:
|
||||
if (_version >= 2) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 6:
|
||||
if (_version >= 6) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 7:
|
||||
if (_version >= 3) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 8:
|
||||
if (_version >= 8) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 9:
|
||||
if (_version >= 6) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 10:
|
||||
if (_version >= 3) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 11:
|
||||
if (_version >= 6) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 12:
|
||||
if (_version >= 4) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 13:
|
||||
if (_version >= 4) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 14:
|
||||
if (_version >= 4) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 15:
|
||||
if (_version >= 5) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 16:
|
||||
if (_version >= 3) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 17:
|
||||
return (short) 0;
|
||||
case 18:
|
||||
return (short) 0;
|
||||
case 19:
|
||||
if (_version >= 5) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 20:
|
||||
if (_version >= 4) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 21:
|
||||
return (short) 0;
|
||||
case 22:
|
||||
if (_version >= 2) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 23:
|
||||
return (short) 0;
|
||||
case 24:
|
||||
return (short) 0;
|
||||
case 25:
|
||||
return (short) 0;
|
||||
case 26:
|
||||
return (short) 0;
|
||||
case 27:
|
||||
return (short) 0;
|
||||
case 28:
|
||||
if (_version >= 3) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 29:
|
||||
if (_version >= 2) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 30:
|
||||
if (_version >= 2) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 31:
|
||||
if (_version >= 2) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 32:
|
||||
return (short) 0;
|
||||
case 33:
|
||||
return (short) 0;
|
||||
case 34:
|
||||
return (short) 0;
|
||||
case 35:
|
||||
return (short) 0;
|
||||
case 36:
|
||||
if (_version >= 2) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 37:
|
||||
if (_version >= 2) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 38:
|
||||
if (_version >= 2) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 39:
|
||||
if (_version >= 2) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 40:
|
||||
if (_version >= 2) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 41:
|
||||
if (_version >= 2) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 42:
|
||||
if (_version >= 2) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 43:
|
||||
if (_version >= 2) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 44:
|
||||
if (_version >= 1) {
|
||||
return (short) 1;
|
||||
} else {
|
||||
return (short) 0;
|
||||
}
|
||||
case 45:
|
||||
return (short) 1;
|
||||
case 46:
|
||||
return (short) 1;
|
||||
case 47:
|
||||
return (short) 0;
|
||||
case 1000:
|
||||
return (short) 0;
|
||||
default:
|
||||
throw new UnsupportedVersionException("Unsupported API key " + apiKey);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,318 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package org.apache.kafka.common.message;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.TreeMap;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.protocol.ApiMessage;
|
||||
import org.apache.kafka.common.protocol.ObjectSerializationCache;
|
||||
import org.apache.kafka.common.protocol.Readable;
|
||||
import org.apache.kafka.common.protocol.Writable;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedField;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
import org.apache.kafka.common.utils.ByteUtils;
|
||||
|
||||
import static java.util.Map.Entry;
|
||||
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
|
||||
|
||||
|
||||
public class ApiVersionsRequestData implements ApiMessage {
|
||||
private String clientSoftwareName;
|
||||
private String clientSoftwareVersion;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema SCHEMA_2 = SCHEMA_1;
|
||||
|
||||
public static final Schema SCHEMA_3 =
|
||||
new Schema(
|
||||
new Field("client_software_name", Type.COMPACT_STRING, "The name of the client."),
|
||||
new Field("client_software_version", Type.COMPACT_STRING, "The version of the client."),
|
||||
TaggedFieldsSection.of(
|
||||
)
|
||||
);
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1,
|
||||
SCHEMA_2,
|
||||
SCHEMA_3
|
||||
};
|
||||
|
||||
public ApiVersionsRequestData(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public ApiVersionsRequestData(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public ApiVersionsRequestData() {
|
||||
this.clientSoftwareName = "";
|
||||
this.clientSoftwareVersion = "";
|
||||
}
|
||||
|
||||
@Override
|
||||
public short apiKey() {
|
||||
return 18;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 3;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version >= 3) {
|
||||
int length;
|
||||
length = _readable.readUnsignedVarint() - 1;
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field clientSoftwareName was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field clientSoftwareName had invalid length " + length);
|
||||
} else {
|
||||
this.clientSoftwareName = _readable.readString(length);
|
||||
}
|
||||
} else {
|
||||
this.clientSoftwareName = "";
|
||||
}
|
||||
if (_version >= 3) {
|
||||
int length;
|
||||
length = _readable.readUnsignedVarint() - 1;
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field clientSoftwareVersion was serialized as null");
|
||||
} else if (length > 0x7fff) {
|
||||
throw new RuntimeException("string field clientSoftwareVersion had invalid length " + length);
|
||||
} else {
|
||||
this.clientSoftwareVersion = _readable.readString(length);
|
||||
}
|
||||
} else {
|
||||
this.clientSoftwareVersion = "";
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
if (_version >= 3) {
|
||||
int _numTaggedFields = _readable.readUnsignedVarint();
|
||||
for (int _i = 0; _i < _numTaggedFields; _i++) {
|
||||
int _tag = _readable.readUnsignedVarint();
|
||||
int _size = _readable.readUnsignedVarint();
|
||||
switch (_tag) {
|
||||
default:
|
||||
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
int _numTaggedFields = 0;
|
||||
if (_version >= 3) {
|
||||
{
|
||||
byte[] _stringBytes = _cache.getSerializedValue(clientSoftwareName);
|
||||
_writable.writeUnsignedVarint(_stringBytes.length + 1);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
}
|
||||
if (_version >= 3) {
|
||||
{
|
||||
byte[] _stringBytes = _cache.getSerializedValue(clientSoftwareVersion);
|
||||
_writable.writeUnsignedVarint(_stringBytes.length + 1);
|
||||
_writable.writeByteArray(_stringBytes);
|
||||
}
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_version >= 3) {
|
||||
_writable.writeUnsignedVarint(_numTaggedFields);
|
||||
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
|
||||
} else {
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
NavigableMap<Integer, Object> _taggedFields = null;
|
||||
this._unknownTaggedFields = null;
|
||||
if (_version >= 3) {
|
||||
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
|
||||
}
|
||||
if (_version >= 3) {
|
||||
this.clientSoftwareName = struct.getString("client_software_name");
|
||||
} else {
|
||||
this.clientSoftwareName = "";
|
||||
}
|
||||
if (_version >= 3) {
|
||||
this.clientSoftwareVersion = struct.getString("client_software_version");
|
||||
} else {
|
||||
this.clientSoftwareVersion = "";
|
||||
}
|
||||
if (_version >= 3) {
|
||||
if (!_taggedFields.isEmpty()) {
|
||||
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
|
||||
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
|
||||
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
if (_version >= 3) {
|
||||
_taggedFields = new TreeMap<>();
|
||||
}
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
if (_version >= 3) {
|
||||
struct.set("client_software_name", this.clientSoftwareName);
|
||||
}
|
||||
if (_version >= 3) {
|
||||
struct.set("client_software_version", this.clientSoftwareVersion);
|
||||
}
|
||||
if (_version >= 3) {
|
||||
struct.set("_tagged_fields", _taggedFields);
|
||||
}
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version >= 3) {
|
||||
{
|
||||
byte[] _stringBytes = clientSoftwareName.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'clientSoftwareName' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(clientSoftwareName, _stringBytes);
|
||||
_size += _stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1);
|
||||
}
|
||||
}
|
||||
if (_version >= 3) {
|
||||
{
|
||||
byte[] _stringBytes = clientSoftwareVersion.getBytes(StandardCharsets.UTF_8);
|
||||
if (_stringBytes.length > 0x7fff) {
|
||||
throw new RuntimeException("'clientSoftwareVersion' field is too long to be serialized");
|
||||
}
|
||||
_cache.cacheSerializedValue(clientSoftwareVersion, _stringBytes);
|
||||
_size += _stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1);
|
||||
}
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_version >= 3) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
|
||||
} else {
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof ApiVersionsRequestData)) return false;
|
||||
ApiVersionsRequestData other = (ApiVersionsRequestData) obj;
|
||||
if (this.clientSoftwareName == null) {
|
||||
if (other.clientSoftwareName != null) return false;
|
||||
} else {
|
||||
if (!this.clientSoftwareName.equals(other.clientSoftwareName)) return false;
|
||||
}
|
||||
if (this.clientSoftwareVersion == null) {
|
||||
if (other.clientSoftwareVersion != null) return false;
|
||||
} else {
|
||||
if (!this.clientSoftwareVersion.equals(other.clientSoftwareVersion)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + (clientSoftwareName == null ? 0 : clientSoftwareName.hashCode());
|
||||
hashCode = 31 * hashCode + (clientSoftwareVersion == null ? 0 : clientSoftwareVersion.hashCode());
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ApiVersionsRequestData("
|
||||
+ "clientSoftwareName=" + ((clientSoftwareName == null) ? "null" : "'" + clientSoftwareName.toString() + "'")
|
||||
+ ", clientSoftwareVersion=" + ((clientSoftwareVersion == null) ? "null" : "'" + clientSoftwareVersion.toString() + "'")
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public String clientSoftwareName() {
|
||||
return this.clientSoftwareName;
|
||||
}
|
||||
|
||||
public String clientSoftwareVersion() {
|
||||
return this.clientSoftwareVersion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public ApiVersionsRequestData setClientSoftwareName(String v) {
|
||||
this.clientSoftwareName = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ApiVersionsRequestData setClientSoftwareVersion(String v) {
|
||||
this.clientSoftwareVersion = v;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,642 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package org.apache.kafka.common.message;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.TreeMap;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.protocol.ApiMessage;
|
||||
import org.apache.kafka.common.protocol.Message;
|
||||
import org.apache.kafka.common.protocol.MessageUtil;
|
||||
import org.apache.kafka.common.protocol.ObjectSerializationCache;
|
||||
import org.apache.kafka.common.protocol.Readable;
|
||||
import org.apache.kafka.common.protocol.Writable;
|
||||
import org.apache.kafka.common.protocol.types.ArrayOf;
|
||||
import org.apache.kafka.common.protocol.types.CompactArrayOf;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedField;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
import org.apache.kafka.common.utils.ByteUtils;
|
||||
import org.apache.kafka.common.utils.ImplicitLinkedHashCollection;
|
||||
import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection;
|
||||
|
||||
import static java.util.Map.Entry;
|
||||
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
|
||||
|
||||
|
||||
public class ApiVersionsResponseData implements ApiMessage {
|
||||
private short errorCode;
|
||||
private ApiVersionsResponseKeyCollection apiKeys;
|
||||
private int throttleTimeMs;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("error_code", Type.INT16, "The top-level error code."),
|
||||
new Field("api_keys", new ArrayOf(ApiVersionsResponseKey.SCHEMA_0), "The APIs supported by the broker.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 =
|
||||
new Schema(
|
||||
new Field("error_code", Type.INT16, "The top-level error code."),
|
||||
new Field("api_keys", new ArrayOf(ApiVersionsResponseKey.SCHEMA_0), "The APIs supported by the broker."),
|
||||
new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_2 = SCHEMA_1;
|
||||
|
||||
public static final Schema SCHEMA_3 =
|
||||
new Schema(
|
||||
new Field("error_code", Type.INT16, "The top-level error code."),
|
||||
new Field("api_keys", new CompactArrayOf(ApiVersionsResponseKey.SCHEMA_3), "The APIs supported by the broker."),
|
||||
new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
|
||||
TaggedFieldsSection.of(
|
||||
)
|
||||
);
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1,
|
||||
SCHEMA_2,
|
||||
SCHEMA_3
|
||||
};
|
||||
|
||||
public ApiVersionsResponseData(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public ApiVersionsResponseData(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public ApiVersionsResponseData() {
|
||||
this.errorCode = (short) 0;
|
||||
this.apiKeys = new ApiVersionsResponseKeyCollection(0);
|
||||
this.throttleTimeMs = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short apiKey() {
|
||||
return 18;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 3;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
this.errorCode = _readable.readShort();
|
||||
{
|
||||
if (_version >= 3) {
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readUnsignedVarint() - 1;
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field apiKeys was serialized as null");
|
||||
} else {
|
||||
ApiVersionsResponseKeyCollection newCollection = new ApiVersionsResponseKeyCollection(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new ApiVersionsResponseKey(_readable, _version));
|
||||
}
|
||||
this.apiKeys = newCollection;
|
||||
}
|
||||
} else {
|
||||
int arrayLength;
|
||||
arrayLength = _readable.readInt();
|
||||
if (arrayLength < 0) {
|
||||
throw new RuntimeException("non-nullable field apiKeys was serialized as null");
|
||||
} else {
|
||||
ApiVersionsResponseKeyCollection newCollection = new ApiVersionsResponseKeyCollection(arrayLength);
|
||||
for (int i = 0; i < arrayLength; i++) {
|
||||
newCollection.add(new ApiVersionsResponseKey(_readable, _version));
|
||||
}
|
||||
this.apiKeys = newCollection;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
this.throttleTimeMs = _readable.readInt();
|
||||
} else {
|
||||
this.throttleTimeMs = 0;
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
if (_version >= 3) {
|
||||
int _numTaggedFields = _readable.readUnsignedVarint();
|
||||
for (int _i = 0; _i < _numTaggedFields; _i++) {
|
||||
int _tag = _readable.readUnsignedVarint();
|
||||
int _size = _readable.readUnsignedVarint();
|
||||
switch (_tag) {
|
||||
default:
|
||||
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeShort(errorCode);
|
||||
if (_version >= 3) {
|
||||
_writable.writeUnsignedVarint(apiKeys.size() + 1);
|
||||
for (ApiVersionsResponseKey apiKeysElement : apiKeys) {
|
||||
apiKeysElement.write(_writable, _cache, _version);
|
||||
}
|
||||
} else {
|
||||
_writable.writeInt(apiKeys.size());
|
||||
for (ApiVersionsResponseKey apiKeysElement : apiKeys) {
|
||||
apiKeysElement.write(_writable, _cache, _version);
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
_writable.writeInt(throttleTimeMs);
|
||||
}
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_version >= 3) {
|
||||
_writable.writeUnsignedVarint(_numTaggedFields);
|
||||
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
|
||||
} else {
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
NavigableMap<Integer, Object> _taggedFields = null;
|
||||
this._unknownTaggedFields = null;
|
||||
if (_version >= 3) {
|
||||
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
|
||||
}
|
||||
this.errorCode = struct.getShort("error_code");
|
||||
{
|
||||
Object[] _nestedObjects = struct.getArray("api_keys");
|
||||
this.apiKeys = new ApiVersionsResponseKeyCollection(_nestedObjects.length);
|
||||
for (Object nestedObject : _nestedObjects) {
|
||||
this.apiKeys.add(new ApiVersionsResponseKey((Struct) nestedObject, _version));
|
||||
}
|
||||
}
|
||||
if (_version >= 1) {
|
||||
this.throttleTimeMs = struct.getInt("throttle_time_ms");
|
||||
} else {
|
||||
this.throttleTimeMs = 0;
|
||||
}
|
||||
if (_version >= 3) {
|
||||
if (!_taggedFields.isEmpty()) {
|
||||
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
|
||||
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
|
||||
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
if (_version >= 3) {
|
||||
_taggedFields = new TreeMap<>();
|
||||
}
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("error_code", this.errorCode);
|
||||
{
|
||||
Struct[] _nestedObjects = new Struct[apiKeys.size()];
|
||||
int i = 0;
|
||||
for (ApiVersionsResponseKey element : this.apiKeys) {
|
||||
_nestedObjects[i++] = element.toStruct(_version);
|
||||
}
|
||||
struct.set("api_keys", (Object[]) _nestedObjects);
|
||||
}
|
||||
if (_version >= 1) {
|
||||
struct.set("throttle_time_ms", this.throttleTimeMs);
|
||||
}
|
||||
if (_version >= 3) {
|
||||
struct.set("_tagged_fields", _taggedFields);
|
||||
}
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
_size += 2;
|
||||
{
|
||||
int _arraySize = 0;
|
||||
if (_version >= 3) {
|
||||
_arraySize += ByteUtils.sizeOfUnsignedVarint(apiKeys.size() + 1);
|
||||
} else {
|
||||
_arraySize += 4;
|
||||
}
|
||||
for (ApiVersionsResponseKey apiKeysElement : apiKeys) {
|
||||
_arraySize += apiKeysElement.size(_cache, _version);
|
||||
}
|
||||
_size += _arraySize;
|
||||
}
|
||||
if (_version >= 1) {
|
||||
_size += 4;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_version >= 3) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
|
||||
} else {
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof ApiVersionsResponseData)) return false;
|
||||
ApiVersionsResponseData other = (ApiVersionsResponseData) obj;
|
||||
if (errorCode != other.errorCode) return false;
|
||||
if (this.apiKeys == null) {
|
||||
if (other.apiKeys != null) return false;
|
||||
} else {
|
||||
if (!this.apiKeys.equals(other.apiKeys)) return false;
|
||||
}
|
||||
if (throttleTimeMs != other.throttleTimeMs) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + errorCode;
|
||||
hashCode = 31 * hashCode + (apiKeys == null ? 0 : apiKeys.hashCode());
|
||||
hashCode = 31 * hashCode + throttleTimeMs;
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ApiVersionsResponseData("
|
||||
+ "errorCode=" + errorCode
|
||||
+ ", apiKeys=" + MessageUtil.deepToString(apiKeys.iterator())
|
||||
+ ", throttleTimeMs=" + throttleTimeMs
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public short errorCode() {
|
||||
return this.errorCode;
|
||||
}
|
||||
|
||||
public ApiVersionsResponseKeyCollection apiKeys() {
|
||||
return this.apiKeys;
|
||||
}
|
||||
|
||||
public int throttleTimeMs() {
|
||||
return this.throttleTimeMs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public ApiVersionsResponseData setErrorCode(short v) {
|
||||
this.errorCode = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ApiVersionsResponseData setApiKeys(ApiVersionsResponseKeyCollection v) {
|
||||
this.apiKeys = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ApiVersionsResponseData setThrottleTimeMs(int v) {
|
||||
this.throttleTimeMs = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
static public class ApiVersionsResponseKey implements Message, ImplicitLinkedHashMultiCollection.Element {
|
||||
private short apiKey;
|
||||
private short minVersion;
|
||||
private short maxVersion;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
private int next;
|
||||
private int prev;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("api_key", Type.INT16, "The API index."),
|
||||
new Field("min_version", Type.INT16, "The minimum supported version, inclusive."),
|
||||
new Field("max_version", Type.INT16, "The maximum supported version, inclusive.")
|
||||
);
|
||||
|
||||
public static final Schema SCHEMA_1 = SCHEMA_0;
|
||||
|
||||
public static final Schema SCHEMA_2 = SCHEMA_1;
|
||||
|
||||
public static final Schema SCHEMA_3 =
|
||||
new Schema(
|
||||
new Field("api_key", Type.INT16, "The API index."),
|
||||
new Field("min_version", Type.INT16, "The minimum supported version, inclusive."),
|
||||
new Field("max_version", Type.INT16, "The maximum supported version, inclusive."),
|
||||
TaggedFieldsSection.of(
|
||||
)
|
||||
);
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0,
|
||||
SCHEMA_1,
|
||||
SCHEMA_2,
|
||||
SCHEMA_3
|
||||
};
|
||||
|
||||
public ApiVersionsResponseKey(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public ApiVersionsResponseKey(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
public ApiVersionsResponseKey() {
|
||||
this.apiKey = (short) 0;
|
||||
this.minVersion = (short) 0;
|
||||
this.maxVersion = (short) 0;
|
||||
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 3;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
if (_version > 3) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of ApiVersionsResponseKey");
|
||||
}
|
||||
this.apiKey = _readable.readShort();
|
||||
this.minVersion = _readable.readShort();
|
||||
this.maxVersion = _readable.readShort();
|
||||
this._unknownTaggedFields = null;
|
||||
if (_version >= 3) {
|
||||
int _numTaggedFields = _readable.readUnsignedVarint();
|
||||
for (int _i = 0; _i < _numTaggedFields; _i++) {
|
||||
int _tag = _readable.readUnsignedVarint();
|
||||
int _size = _readable.readUnsignedVarint();
|
||||
switch (_tag) {
|
||||
default:
|
||||
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
if (_version > 3) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of ApiVersionsResponseKey");
|
||||
}
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeShort(apiKey);
|
||||
_writable.writeShort(minVersion);
|
||||
_writable.writeShort(maxVersion);
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_version >= 3) {
|
||||
_writable.writeUnsignedVarint(_numTaggedFields);
|
||||
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
|
||||
} else {
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
if (_version > 3) {
|
||||
throw new UnsupportedVersionException("Can't read version " + _version + " of ApiVersionsResponseKey");
|
||||
}
|
||||
NavigableMap<Integer, Object> _taggedFields = null;
|
||||
this._unknownTaggedFields = null;
|
||||
if (_version >= 3) {
|
||||
_taggedFields = (NavigableMap<Integer, Object>) struct.get("_tagged_fields");
|
||||
}
|
||||
this.apiKey = struct.getShort("api_key");
|
||||
this.minVersion = struct.getShort("min_version");
|
||||
this.maxVersion = struct.getShort("max_version");
|
||||
if (_version >= 3) {
|
||||
if (!_taggedFields.isEmpty()) {
|
||||
this._unknownTaggedFields = new ArrayList<>(_taggedFields.size());
|
||||
for (Entry<Integer, Object> entry : _taggedFields.entrySet()) {
|
||||
this._unknownTaggedFields.add((RawTaggedField) entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
if (_version > 3) {
|
||||
throw new UnsupportedVersionException("Can't write version " + _version + " of ApiVersionsResponseKey");
|
||||
}
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
if (_version >= 3) {
|
||||
_taggedFields = new TreeMap<>();
|
||||
}
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.set("api_key", this.apiKey);
|
||||
struct.set("min_version", this.minVersion);
|
||||
struct.set("max_version", this.maxVersion);
|
||||
if (_version >= 3) {
|
||||
struct.set("_tagged_fields", _taggedFields);
|
||||
}
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
if (_version > 3) {
|
||||
throw new UnsupportedVersionException("Can't size version " + _version + " of ApiVersionsResponseKey");
|
||||
}
|
||||
_size += 2;
|
||||
_size += 2;
|
||||
_size += 2;
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_version >= 3) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_numTaggedFields);
|
||||
} else {
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof ApiVersionsResponseKey)) return false;
|
||||
ApiVersionsResponseKey other = (ApiVersionsResponseKey) obj;
|
||||
if (apiKey != other.apiKey) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + apiKey;
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ApiVersionsResponseKey("
|
||||
+ "apiKey=" + apiKey
|
||||
+ ", minVersion=" + minVersion
|
||||
+ ", maxVersion=" + maxVersion
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public short apiKey() {
|
||||
return this.apiKey;
|
||||
}
|
||||
|
||||
public short minVersion() {
|
||||
return this.minVersion;
|
||||
}
|
||||
|
||||
public short maxVersion() {
|
||||
return this.maxVersion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int next() {
|
||||
return this.next;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int prev() {
|
||||
return this.prev;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public ApiVersionsResponseKey setApiKey(short v) {
|
||||
this.apiKey = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ApiVersionsResponseKey setMinVersion(short v) {
|
||||
this.minVersion = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ApiVersionsResponseKey setMaxVersion(short v) {
|
||||
this.maxVersion = v;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNext(int v) {
|
||||
this.next = v;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setPrev(int v) {
|
||||
this.prev = v;
|
||||
}
|
||||
}
|
||||
|
||||
public static class ApiVersionsResponseKeyCollection extends ImplicitLinkedHashMultiCollection<ApiVersionsResponseKey> {
|
||||
public ApiVersionsResponseKeyCollection() {
|
||||
super();
|
||||
}
|
||||
|
||||
public ApiVersionsResponseKeyCollection(int expectedNumElements) {
|
||||
super(expectedNumElements);
|
||||
}
|
||||
|
||||
public ApiVersionsResponseKeyCollection(Iterator<ApiVersionsResponseKey> iterator) {
|
||||
super(iterator);
|
||||
}
|
||||
|
||||
public ApiVersionsResponseKey find(short apiKey) {
|
||||
ApiVersionsResponseKey _key = new ApiVersionsResponseKey();
|
||||
_key.setApiKey(apiKey);
|
||||
return find(_key);
|
||||
}
|
||||
|
||||
public List<ApiVersionsResponseKey> findAll(short apiKey) {
|
||||
ApiVersionsResponseKey _key = new ApiVersionsResponseKey();
|
||||
_key.setApiKey(apiKey);
|
||||
return findAll(_key);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,184 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
|
||||
|
||||
package org.apache.kafka.common.message;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.protocol.ApiMessage;
|
||||
import org.apache.kafka.common.protocol.ObjectSerializationCache;
|
||||
import org.apache.kafka.common.protocol.Readable;
|
||||
import org.apache.kafka.common.protocol.Writable;
|
||||
import org.apache.kafka.common.protocol.types.Field;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedField;
|
||||
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
|
||||
import org.apache.kafka.common.protocol.types.Schema;
|
||||
import org.apache.kafka.common.protocol.types.Struct;
|
||||
import org.apache.kafka.common.protocol.types.Type;
|
||||
import org.apache.kafka.common.utils.ByteUtils;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
|
||||
public class BufferRequestData implements ApiMessage {
|
||||
private byte[] buffer;
|
||||
private List<RawTaggedField> _unknownTaggedFields;
|
||||
|
||||
public static final Schema SCHEMA_0 =
|
||||
new Schema(
|
||||
new Field("buffer", Type.BYTES, "The data buffer.")
|
||||
);
|
||||
|
||||
public static final Schema[] SCHEMAS = new Schema[] {
|
||||
SCHEMA_0
|
||||
};
|
||||
|
||||
public BufferRequestData(Readable _readable, short _version) {
|
||||
read(_readable, _version);
|
||||
}
|
||||
|
||||
public BufferRequestData(Struct struct, short _version) {
|
||||
fromStruct(struct, _version);
|
||||
}
|
||||
|
||||
public BufferRequestData() {
|
||||
this.buffer = Bytes.EMPTY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short apiKey() {
|
||||
return 1000;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short lowestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public short highestSupportedVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void read(Readable _readable, short _version) {
|
||||
{
|
||||
int length;
|
||||
length = _readable.readInt();
|
||||
if (length < 0) {
|
||||
throw new RuntimeException("non-nullable field buffer was serialized as null");
|
||||
} else {
|
||||
byte[] newBytes = new byte[length];
|
||||
_readable.readArray(newBytes);
|
||||
this.buffer = newBytes;
|
||||
}
|
||||
}
|
||||
this._unknownTaggedFields = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
|
||||
int _numTaggedFields = 0;
|
||||
_writable.writeInt(buffer.length);
|
||||
_writable.writeByteArray(buffer);
|
||||
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
|
||||
_numTaggedFields += _rawWriter.numFields();
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void fromStruct(Struct struct, short _version) {
|
||||
this._unknownTaggedFields = null;
|
||||
this.buffer = struct.getByteArray("buffer");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Struct toStruct(short _version) {
|
||||
TreeMap<Integer, Object> _taggedFields = null;
|
||||
Struct struct = new Struct(SCHEMAS[_version]);
|
||||
struct.setByteArray("buffer", this.buffer);
|
||||
return struct;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size(ObjectSerializationCache _cache, short _version) {
|
||||
int _size = 0, _numTaggedFields = 0;
|
||||
{
|
||||
int _bytesSize = buffer.length;
|
||||
_bytesSize += 4;
|
||||
_size += _bytesSize;
|
||||
}
|
||||
if (_unknownTaggedFields != null) {
|
||||
_numTaggedFields += _unknownTaggedFields.size();
|
||||
for (RawTaggedField _field : _unknownTaggedFields) {
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.tag());
|
||||
_size += ByteUtils.sizeOfUnsignedVarint(_field.size());
|
||||
_size += _field.size();
|
||||
}
|
||||
}
|
||||
if (_numTaggedFields > 0) {
|
||||
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
|
||||
}
|
||||
return _size;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (!(obj instanceof BufferRequestData)) return false;
|
||||
BufferRequestData other = (BufferRequestData) obj;
|
||||
if (!Arrays.equals(this.buffer, other.buffer)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hashCode = 0;
|
||||
hashCode = 31 * hashCode + Arrays.hashCode(buffer);
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "BufferRequestData("
|
||||
+ "buffer=" + Arrays.toString(buffer)
|
||||
+ ")";
|
||||
}
|
||||
|
||||
public byte[] buffer() {
|
||||
return this.buffer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RawTaggedField> unknownTaggedFields() {
|
||||
if (_unknownTaggedFields == null) {
|
||||
_unknownTaggedFields = new ArrayList<>(0);
|
||||
}
|
||||
return _unknownTaggedFields;
|
||||
}
|
||||
|
||||
public BufferRequestData setBuffer(byte[] v) {
|
||||
this.buffer = v;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user