mirror of
https://github.com/didi/KnowStreaming.git
synced 2025-12-24 11:52:08 +08:00
Compare commits
2 Commits
ve_demo_3.
...
ve_kafka_d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
da978cd25f | ||
|
|
0b8160a714 |
173
.gitignore
vendored
173
.gitignore
vendored
@@ -1,111 +1,62 @@
|
||||
### Intellij ###
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
|
||||
|
||||
*.iml
|
||||
|
||||
## Directory-based project format:
|
||||
.idea/
|
||||
# if you remove the above rule, at least ignore the following:
|
||||
|
||||
# User-specific stuff:
|
||||
# .idea/workspace.xml
|
||||
# .idea/tasks.xml
|
||||
# .idea/dictionaries
|
||||
# .idea/shelf
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
.idea/dataSources.ids
|
||||
.idea/dataSources.xml
|
||||
.idea/sqlDataSources.xml
|
||||
.idea/dynamic.xml
|
||||
.idea/uiDesigner.xml
|
||||
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
.idea/mongoSettings.xml
|
||||
|
||||
## File-based project format:
|
||||
*.ipr
|
||||
*.iws
|
||||
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
/out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
|
||||
|
||||
### Java ###
|
||||
*.class
|
||||
|
||||
# Mobile Tools for Java (J2ME)
|
||||
.mtj.tmp/
|
||||
|
||||
# Package Files #
|
||||
*.jar
|
||||
*.war
|
||||
*.ear
|
||||
|
||||
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
|
||||
hs_err_pid*
|
||||
|
||||
|
||||
### OSX ###
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
# Icon must end with two \r
|
||||
Icon
|
||||
|
||||
|
||||
# Thumbnails
|
||||
._*
|
||||
|
||||
# Files that might appear in the root of a volume
|
||||
.DocumentRevisions-V100
|
||||
.fseventsd
|
||||
.Spotlight-V100
|
||||
.TemporaryItems
|
||||
.Trashes
|
||||
.VolumeIcon.icns
|
||||
|
||||
# Directories potentially created on remote AFP share
|
||||
.AppleDB
|
||||
.AppleDesktop
|
||||
Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
|
||||
/target
|
||||
target/
|
||||
*.log
|
||||
*.log.*
|
||||
*.bak
|
||||
*.vscode
|
||||
*/.vscode/*
|
||||
*/.vscode
|
||||
*/velocity.log*
|
||||
*/*.log
|
||||
*/*.log.*
|
||||
web/node_modules/
|
||||
web/node_modules/*
|
||||
workspace.xml
|
||||
/output/*
|
||||
.gitversion
|
||||
*/node_modules/*
|
||||
*/templates/*
|
||||
*/out/*
|
||||
*/dist/*
|
||||
.DS_Store
|
||||
|
||||
.gradle/
|
||||
dist
|
||||
*classes
|
||||
*.class
|
||||
target/
|
||||
build/
|
||||
build_eclipse/
|
||||
out/
|
||||
.gradle/
|
||||
lib_managed/
|
||||
src_managed/
|
||||
project/boot/
|
||||
project/plugins/project/
|
||||
patch-process/*
|
||||
.idea
|
||||
.svn
|
||||
.classpath
|
||||
/.metadata
|
||||
/.recommenders
|
||||
*~
|
||||
*#
|
||||
.#*
|
||||
rat.out
|
||||
TAGS
|
||||
*.iml
|
||||
.project
|
||||
.settings
|
||||
*.ipr
|
||||
*.iws
|
||||
.vagrant
|
||||
Vagrantfile.local
|
||||
/logs
|
||||
.DS_Store
|
||||
|
||||
config/server-*
|
||||
config/zookeeper-*
|
||||
core/data/*
|
||||
#gradle/wrapper/*.jar
|
||||
gradlew.bat
|
||||
|
||||
results
|
||||
tests/results
|
||||
.ducktape
|
||||
tests/.ducktape
|
||||
tests/venv
|
||||
.cache
|
||||
|
||||
docs/generated/
|
||||
|
||||
.release-settings.json
|
||||
|
||||
kafkatest.egg-info/
|
||||
systest/
|
||||
*.swp
|
||||
clients/src/generated
|
||||
clients/src/generated-test
|
||||
jmh-benchmarks/generated
|
||||
streams/src/generated
|
||||
kafka-logs
|
||||
clients/src/generated-test/
|
||||
clients/src/generated/
|
||||
|
||||
840
CONTRIBUTING.md
840
CONTRIBUTING.md
@@ -1,835 +1,11 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<link rel="dns-prefetch" href="https://assets-cdn.github.com">
|
||||
<link rel="dns-prefetch" href="https://avatars0.githubusercontent.com">
|
||||
<link rel="dns-prefetch" href="https://avatars1.githubusercontent.com">
|
||||
<link rel="dns-prefetch" href="https://avatars2.githubusercontent.com">
|
||||
<link rel="dns-prefetch" href="https://avatars3.githubusercontent.com">
|
||||
<link rel="dns-prefetch" href="https://github-cloud.s3.amazonaws.com">
|
||||
<link rel="dns-prefetch" href="https://user-images.githubusercontent.com/">
|
||||
## Contributing to Kafka
|
||||
|
||||
*Before opening a pull request*, review the [Contributing](https://kafka.apache.org/contributing.html) and [Contributing Code Changes](https://cwiki.apache.org/confluence/display/KAFKA/Contributing+Code+Changes) pages.
|
||||
|
||||
It lists steps that are required before creating a PR.
|
||||
|
||||
<link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/frameworks-77c3b874f32e71b14cded5a120f42f5c7288fa52e0a37f2d5919fbd8bcfca63c.css" integrity="sha256-d8O4dPMucbFM3tWhIPQvXHKI+lLgo38tWRn72Lz8pjw=" media="all" rel="stylesheet" />
|
||||
<link crossorigin="anonymous" href="https://assets-cdn.github.com/assets/github-7e91a10736ac37b832677484a258d697fe931ba402ccccab1f893155686ad976.css" integrity="sha256-fpGhBzasN7gyZ3SEoljWl/6TG6QCzMyrH4kxVWhq2XY=" media="all" rel="stylesheet" />
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<meta name="viewport" content="width=device-width">
|
||||
|
||||
<title>VirtualAPK/CONTRIBUTING.md at master · didi/VirtualAPK</title>
|
||||
<link rel="search" type="application/opensearchdescription+xml" href="/opensearch.xml" title="GitHub">
|
||||
<link rel="fluid-icon" href="https://github.com/fluidicon.png" title="GitHub">
|
||||
<meta property="fb:app_id" content="1401488693436528">
|
||||
|
||||
|
||||
<meta content="https://avatars2.githubusercontent.com/u/27521938?v=4&s=400" property="og:image" /><meta content="GitHub" property="og:site_name" /><meta content="object" property="og:type" /><meta content="didi/VirtualAPK" property="og:title" /><meta content="https://github.com/didi/VirtualAPK" property="og:url" /><meta content="VirtualAPK - A powerful and lightweight plugin framework for Android" property="og:description" />
|
||||
|
||||
<link rel="assets" href="https://assets-cdn.github.com/">
|
||||
<link rel="web-socket" href="wss://live.github.com/_sockets/VjI6MTgwNzY5MzEwOmE5N2I2Yjk2MGRmYmU0ZjZhMTFiOTA5M2Y5MzU5OGEzMmExNjZkYzAzNjYzNTQwMzMzMzQzMGJkMDRmZjNmNmU=--c366cc4384d45c90027d65f977134da25c41a884">
|
||||
<meta name="pjax-timeout" content="1000">
|
||||
<link rel="sudo-modal" href="/sessions/sudo_modal">
|
||||
<meta name="request-id" content="4E29:2AD46:6CCCCFC:BBFD27E:5984674E" data-pjax-transient>
|
||||
|
||||
|
||||
<meta name="selected-link" value="repo_source" data-pjax-transient>
|
||||
|
||||
<meta name="google-site-verification" content="KT5gs8h0wvaagLKAVWq8bbeNwnZZK1r1XQysX3xurLU">
|
||||
<meta name="google-site-verification" content="ZzhVyEFwb7w3e0-uOTltm8Jsck2F5StVihD0exw2fsA">
|
||||
<meta name="google-analytics" content="UA-3769691-2">
|
||||
|
||||
<meta content="collector.githubapp.com" name="octolytics-host" /><meta content="github" name="octolytics-app-id" /><meta content="https://collector.githubapp.com/github-external/browser_event" name="octolytics-event-url" /><meta content="4E29:2AD46:6CCCCFC:BBFD27E:5984674E" name="octolytics-dimension-request_id" /><meta content="iad" name="octolytics-dimension-region_edge" /><meta content="iad" name="octolytics-dimension-region_render" /><meta content="4598761" name="octolytics-actor-id" /><meta content="wbtiger" name="octolytics-actor-login" /><meta content="275cc19fd75d0968078b4b33bd2ac5fac4ab12873218083aeaa8dbc2df60c39f" name="octolytics-actor-hash" />
|
||||
<meta content="/<user-name>/<repo-name>/blob/show" data-pjax-transient="true" name="analytics-location" />
|
||||
|
||||
|
||||
|
||||
|
||||
<meta class="js-ga-set" name="dimension1" content="Logged In">
|
||||
|
||||
|
||||
|
||||
|
||||
<meta name="hostname" content="github.com">
|
||||
<meta name="user-login" content="wbtiger">
|
||||
|
||||
<meta name="expected-hostname" content="github.com">
|
||||
<meta name="js-proxy-site-detection-payload" content="YTJlMTZlMWNkMzY4M2E1ZTVhMWNhODEyNDZmODkwODM5NzQwYjhhMGY5ODRlM2E1OTQyODA0YmZjNDdjZDQwMHx7InJlbW90ZV9hZGRyZXNzIjoiMjEwLjEzLjI0Mi4xIiwicmVxdWVzdF9pZCI6IjRFMjk6MkFENDY6NkNDQ0NGQzpCQkZEMjdFOjU5ODQ2NzRFIiwidGltZXN0YW1wIjoxNTAxODQ5NDI2LCJob3N0IjoiZ2l0aHViLmNvbSJ9">
|
||||
|
||||
<meta name="enabled-features" content="UNIVERSE_BANNER">
|
||||
|
||||
<meta name="html-safe-nonce" content="b2f561be57f18272195251bb7029b59e07e187fe">
|
||||
|
||||
<meta http-equiv="x-pjax-version" content="2a581bbaf84cb76d91db41147f2e3400">
|
||||
|
||||
|
||||
<link href="https://github.com/didi/VirtualAPK/commits/master.atom" rel="alternate" title="Recent Commits to VirtualAPK:master" type="application/atom+xml">
|
||||
|
||||
<meta name="description" content="VirtualAPK - A powerful and lightweight plugin framework for Android">
|
||||
<meta name="go-import" content="github.com/didi/VirtualAPK git https://github.com/didi/VirtualAPK.git">
|
||||
|
||||
<meta content="27521938" name="octolytics-dimension-user_id" /><meta content="didi" name="octolytics-dimension-user_login" /><meta content="95750391" name="octolytics-dimension-repository_id" /><meta content="didi/VirtualAPK" name="octolytics-dimension-repository_nwo" /><meta content="true" name="octolytics-dimension-repository_public" /><meta content="false" name="octolytics-dimension-repository_is_fork" /><meta content="95750391" name="octolytics-dimension-repository_network_root_id" /><meta content="didi/VirtualAPK" name="octolytics-dimension-repository_network_root_nwo" /><meta content="true" name="octolytics-dimension-repository_explore_github_marketplace_ci_cta_shown" />
|
||||
|
||||
|
||||
<link rel="canonical" href="https://github.com/didi/VirtualAPK/blob/master/CONTRIBUTING.md" data-pjax-transient>
|
||||
|
||||
|
||||
<meta name="browser-stats-url" content="https://api.github.com/_private/browser/stats">
|
||||
|
||||
<meta name="browser-errors-url" content="https://api.github.com/_private/browser/errors">
|
||||
|
||||
<link rel="mask-icon" href="https://assets-cdn.github.com/pinned-octocat.svg" color="#000000">
|
||||
<link rel="icon" type="image/x-icon" href="https://assets-cdn.github.com/favicon.ico">
|
||||
|
||||
<meta name="theme-color" content="#1e2327">
|
||||
|
||||
|
||||
<meta name="u2f-support" content="true">
|
||||
|
||||
</head>
|
||||
|
||||
<body class="logged-in env-production page-blob">
|
||||
|
||||
|
||||
<div class="position-relative js-header-wrapper ">
|
||||
<a href="#start-of-content" tabindex="1" class="bg-black text-white p-3 show-on-focus js-skip-to-content">Skip to content</a>
|
||||
<div id="js-pjax-loader-bar" class="pjax-loader-bar"><div class="progress"></div></div>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div class="header" role="banner">
|
||||
<div class="container-lg px-3 clearfix">
|
||||
<div class="d-flex flex-justify-between">
|
||||
<div class="d-flex">
|
||||
<a class="header-logo-invertocat" href="https://github.com/" data-hotkey="g d" aria-label="Homepage" data-ga-click="Header, go to dashboard, icon:logo">
|
||||
<svg aria-hidden="true" class="octicon octicon-mark-github" height="32" version="1.1" viewBox="0 0 16 16" width="32"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"/></svg>
|
||||
</a>
|
||||
|
||||
|
||||
<div class="mr-2">
|
||||
<div class="header-search scoped-search site-scoped-search js-site-search" role="search">
|
||||
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/didi/VirtualAPK/search" class="js-site-search-form" data-scoped-search-url="/didi/VirtualAPK/search" data-unscoped-search-url="/search" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /></div>
|
||||
<label class="form-control header-search-wrapper js-chromeless-input-container">
|
||||
<a href="/didi/VirtualAPK/blob/master/CONTRIBUTING.md" class="header-search-scope no-underline">This repository</a>
|
||||
<input type="text"
|
||||
class="form-control header-search-input js-site-search-focus js-site-search-field is-clearable"
|
||||
data-hotkey="s"
|
||||
name="q"
|
||||
value=""
|
||||
placeholder="Search"
|
||||
aria-label="Search this repository"
|
||||
data-unscoped-placeholder="Search GitHub"
|
||||
data-scoped-placeholder="Search"
|
||||
autocapitalize="off">
|
||||
<input type="hidden" class="js-site-search-type-field" name="type" >
|
||||
</label>
|
||||
</form></div>
|
||||
|
||||
</div>
|
||||
|
||||
<ul class="d-flex list-style-none" role="navigation">
|
||||
<li>
|
||||
<a href="/pulls" aria-label="Pull requests you created" class="js-selected-navigation-item header-navlink" data-ga-click="Header, click, Nav menu - item:pulls context:user" data-hotkey="g p" data-selected-links="/pulls /pulls/assigned /pulls/mentioned /pulls">
|
||||
Pull requests
|
||||
</a> </li>
|
||||
<li>
|
||||
<a href="/issues" aria-label="Issues you created" class="js-selected-navigation-item header-navlink" data-ga-click="Header, click, Nav menu - item:issues context:user" data-hotkey="g i" data-selected-links="/issues /issues/assigned /issues/mentioned /issues">
|
||||
Issues
|
||||
</a> </li>
|
||||
<li>
|
||||
<a href="/marketplace" class="js-selected-navigation-item header-navlink" data-ga-click="Header, click, Nav menu - item:marketplace context:user" data-selected-links=" /marketplace">
|
||||
Marketplace
|
||||
</a> </li>
|
||||
<li>
|
||||
<a class="header-navlink" href="https://gist.github.com/" data-ga-click="Header, go to gist, text:gist">Gist</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div class="d-flex">
|
||||
|
||||
<ul class="d-flex user-nav list-style-none" id="user-links">
|
||||
<li class="dropdown js-menu-container">
|
||||
|
||||
<a href="/notifications" aria-label="You have unread notifications" class="header-navlink notification-indicator tooltipped tooltipped-s js-socket-channel js-notification-indicator" data-channel="notification-changed:4598761" data-ga-click="Header, go to notifications, icon:unread" data-hotkey="g n">
|
||||
<span class="mail-status unread"></span>
|
||||
<svg aria-hidden="true" class="octicon octicon-bell" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M14 12v1H0v-1l.73-.58c.77-.77.81-2.55 1.19-4.42C2.69 3.23 6 2 6 2c0-.55.45-1 1-1s1 .45 1 1c0 0 3.39 1.23 4.16 5 .38 1.88.42 3.66 1.19 4.42l.66.58H14zm-7 4c1.11 0 2-.89 2-2H5c0 1.11.89 2 2 2z"/></svg>
|
||||
</a>
|
||||
</li>
|
||||
|
||||
<li class="dropdown js-menu-container">
|
||||
<a class="header-navlink tooltipped tooltipped-s js-menu-target" href="/new"
|
||||
aria-label="Create new…"
|
||||
aria-expanded="false"
|
||||
aria-haspopup="true"
|
||||
data-ga-click="Header, create new, icon:add">
|
||||
<svg aria-hidden="true" class="octicon octicon-plus float-left" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M12 9H7v5H5V9H0V7h5V2h2v5h5z"/></svg>
|
||||
<span class="dropdown-caret"></span>
|
||||
</a>
|
||||
|
||||
<div class="dropdown-menu-content js-menu-content">
|
||||
<ul class="dropdown-menu dropdown-menu-sw">
|
||||
|
||||
<a class="dropdown-item" href="/new" data-ga-click="Header, create new repository">
|
||||
New repository
|
||||
</a>
|
||||
|
||||
<a class="dropdown-item" href="/new/import" data-ga-click="Header, import a repository">
|
||||
Import repository
|
||||
</a>
|
||||
|
||||
<a class="dropdown-item" href="https://gist.github.com/" data-ga-click="Header, create new gist">
|
||||
New gist
|
||||
</a>
|
||||
|
||||
<a class="dropdown-item" href="/organizations/new" data-ga-click="Header, create new organization">
|
||||
New organization
|
||||
</a>
|
||||
|
||||
|
||||
|
||||
<div class="dropdown-divider"></div>
|
||||
<div class="dropdown-header">
|
||||
<span title="didi/VirtualAPK">This repository</span>
|
||||
</div>
|
||||
<a class="dropdown-item" href="/didi/VirtualAPK/issues/new" data-ga-click="Header, create new issue">
|
||||
New issue
|
||||
</a>
|
||||
|
||||
</ul>
|
||||
</div>
|
||||
</li>
|
||||
|
||||
<li class="dropdown js-menu-container">
|
||||
<a class="header-navlink name tooltipped tooltipped-sw js-menu-target" href="/wbtiger"
|
||||
aria-label="View profile and more"
|
||||
aria-expanded="false"
|
||||
aria-haspopup="true"
|
||||
data-ga-click="Header, show menu, icon:avatar">
|
||||
<img alt="@wbtiger" class="avatar" src="https://avatars0.githubusercontent.com/u/4598761?v=4&s=40" height="20" width="20">
|
||||
<span class="dropdown-caret"></span>
|
||||
</a>
|
||||
|
||||
<div class="dropdown-menu-content js-menu-content">
|
||||
<div class="dropdown-menu dropdown-menu-sw">
|
||||
<div class="dropdown-header header-nav-current-user css-truncate">
|
||||
Signed in as <strong class="css-truncate-target">wbtiger</strong>
|
||||
</div>
|
||||
|
||||
<div class="dropdown-divider"></div>
|
||||
|
||||
<a class="dropdown-item" href="/wbtiger" data-ga-click="Header, go to profile, text:your profile">
|
||||
Your profile
|
||||
</a>
|
||||
<a class="dropdown-item" href="/wbtiger?tab=stars" data-ga-click="Header, go to starred repos, text:your stars">
|
||||
Your stars
|
||||
</a>
|
||||
<a class="dropdown-item" href="/explore" data-ga-click="Header, go to explore, text:explore">
|
||||
Explore
|
||||
</a>
|
||||
<a class="dropdown-item" href="https://help.github.com" data-ga-click="Header, go to help, text:help">
|
||||
Help
|
||||
</a>
|
||||
|
||||
<div class="dropdown-divider"></div>
|
||||
|
||||
<a class="dropdown-item" href="/settings/profile" data-ga-click="Header, go to settings, icon:settings">
|
||||
Settings
|
||||
</a>
|
||||
|
||||
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/logout" class="logout-form" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="iCUyFq9DsN8+PSAXXSFLLHUCaZibvNeiW95cHdbtTULm4wBjigzSEJhDeC8sdAk7tBKHgYY+dXaFjQr1jYhQqg==" /></div>
|
||||
<button type="submit" class="dropdown-item dropdown-signout" data-ga-click="Header, sign out, icon:logout">
|
||||
Sign out
|
||||
</button>
|
||||
</form> </div>
|
||||
</div>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
|
||||
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/logout" class="sr-only right-0" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="Flzhb/7RV6uQ+AzooTOZbPkQfbFDgFjQu64VxBLZMuh4mtMa2541ZDaGVNDQZtt7OACTqF4C+gRl/UMsSbwvAA==" /></div>
|
||||
<button type="submit" class="dropdown-item dropdown-signout" data-ga-click="Header, sign out, icon:logout">
|
||||
Sign out
|
||||
</button>
|
||||
</form> </div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
<div class="flash flash-full js-notice flash-warn">
|
||||
<div class="container">
|
||||
|
||||
|
||||
|
||||
<h4>
|
||||
We are having a problem billing the didichuxing organization.
|
||||
Please <a href="https://github.com/organizations/didichuxing/settings/billing/payment">update your payment method</a>
|
||||
or call your payment provider for details on why the transaction failed.
|
||||
If you don’t need access to your private repositories, you can <a href="http://github.com/organizations/didichuxing/settings/billing">downgrade to the Free plan</a>.
|
||||
</h4>
|
||||
You can always <a href="/contact">contact support</a> with any questions.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<div id="start-of-content" class="show-on-focus"></div>
|
||||
|
||||
<div id="js-flash-container">
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
<div role="main">
|
||||
<div itemscope itemtype="http://schema.org/SoftwareSourceCode">
|
||||
<div id="js-repo-pjax-container" data-pjax-container>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
<div class="pagehead repohead instapaper_ignore readability-menu experiment-repo-nav">
|
||||
<div class="container repohead-details-container">
|
||||
|
||||
<ul class="pagehead-actions">
|
||||
<li>
|
||||
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/notifications/subscribe" class="js-social-container" data-autosubmit="true" data-remote="true" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="ilxSXS07ThXVEaOic735Ui/Z1PnKcDW+esvwfuYtshLhLnKxtLDr9v8bIilW+EP5XZQWYBwRfNG6uSd62UFOJQ==" /></div> <input class="form-control" id="repository_id" name="repository_id" type="hidden" value="95750391" />
|
||||
|
||||
<div class="select-menu js-menu-container js-select-menu">
|
||||
<a href="/didi/VirtualAPK/subscription"
|
||||
class="btn btn-sm btn-with-count select-menu-button js-menu-target"
|
||||
role="button"
|
||||
aria-haspopup="true"
|
||||
aria-expanded="false"
|
||||
aria-label="Toggle repository notifications menu"
|
||||
data-ga-click="Repository, click Watch settings, action:blob#show">
|
||||
<span class="js-select-button">
|
||||
<svg aria-hidden="true" class="octicon octicon-eye" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"/></svg>
|
||||
Unwatch
|
||||
</span>
|
||||
</a>
|
||||
<a class="social-count js-social-count"
|
||||
href="/didi/VirtualAPK/watchers"
|
||||
aria-label="201 users are watching this repository">
|
||||
201
|
||||
</a>
|
||||
|
||||
<div class="select-menu-modal-holder">
|
||||
<div class="select-menu-modal subscription-menu-modal js-menu-content">
|
||||
<div class="select-menu-header js-navigation-enable" tabindex="-1">
|
||||
<svg aria-label="Close" class="octicon octicon-x js-menu-close" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"/></svg>
|
||||
<span class="select-menu-title">Notifications</span>
|
||||
</div>
|
||||
|
||||
<div class="select-menu-list js-navigation-container" role="menu">
|
||||
|
||||
<div class="select-menu-item js-navigation-item " role="menuitem" tabindex="0">
|
||||
<svg aria-hidden="true" class="octicon octicon-check select-menu-item-icon" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
|
||||
<div class="select-menu-item-text">
|
||||
<input id="do_included" name="do" type="radio" value="included" />
|
||||
<span class="select-menu-item-heading">Not watching</span>
|
||||
<span class="description">Be notified when participating or @mentioned.</span>
|
||||
<span class="js-select-button-text hidden-select-button-text">
|
||||
<svg aria-hidden="true" class="octicon octicon-eye" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"/></svg>
|
||||
Watch
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="select-menu-item js-navigation-item selected" role="menuitem" tabindex="0">
|
||||
<svg aria-hidden="true" class="octicon octicon-check select-menu-item-icon" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
|
||||
<div class="select-menu-item-text">
|
||||
<input checked="checked" id="do_subscribed" name="do" type="radio" value="subscribed" />
|
||||
<span class="select-menu-item-heading">Watching</span>
|
||||
<span class="description">Be notified of all conversations.</span>
|
||||
<span class="js-select-button-text hidden-select-button-text">
|
||||
<svg aria-hidden="true" class="octicon octicon-eye" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"/></svg>
|
||||
Unwatch
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="select-menu-item js-navigation-item " role="menuitem" tabindex="0">
|
||||
<svg aria-hidden="true" class="octicon octicon-check select-menu-item-icon" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
|
||||
<div class="select-menu-item-text">
|
||||
<input id="do_ignore" name="do" type="radio" value="ignore" />
|
||||
<span class="select-menu-item-heading">Ignoring</span>
|
||||
<span class="description">Never be notified.</span>
|
||||
<span class="js-select-button-text hidden-select-button-text">
|
||||
<svg aria-hidden="true" class="octicon octicon-mute" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M8 2.81v10.38c0 .67-.81 1-1.28.53L3 10H1c-.55 0-1-.45-1-1V7c0-.55.45-1 1-1h2l3.72-3.72C7.19 1.81 8 2.14 8 2.81zm7.53 3.22l-1.06-1.06-1.97 1.97-1.97-1.97-1.06 1.06L11.44 8 9.47 9.97l1.06 1.06 1.97-1.97 1.97 1.97 1.06-1.06L13.56 8l1.97-1.97z"/></svg>
|
||||
Stop ignoring
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</form>
|
||||
</li>
|
||||
|
||||
<li>
|
||||
|
||||
<div class="js-toggler-container js-social-container starring-container ">
|
||||
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/didi/VirtualAPK/unstar" class="starred" data-remote="true" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="Q+D8KWgxgtZRRzjMMMc9XHi7VS6G1YcZ06JQ7RcxZO8LHmloFHnOw2MryKAWIM4nirolh2QGKRJ1w9d2OhazBg==" /></div>
|
||||
<button
|
||||
type="submit"
|
||||
class="btn btn-sm btn-with-count js-toggler-target"
|
||||
aria-label="Unstar this repository" title="Unstar didi/VirtualAPK"
|
||||
data-ga-click="Repository, click unstar button, action:blob#show; text:Unstar">
|
||||
<svg aria-hidden="true" class="octicon octicon-star" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M14 6l-4.9-.64L7 1 4.9 5.36 0 6l3.6 3.26L2.67 14 7 11.67 11.33 14l-.93-4.74z"/></svg>
|
||||
Unstar
|
||||
</button>
|
||||
<a class="social-count js-social-count" href="/didi/VirtualAPK/stargazers"
|
||||
aria-label="4154 users starred this repository">
|
||||
4,154
|
||||
</a>
|
||||
</form>
|
||||
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/didi/VirtualAPK/star" class="unstarred" data-remote="true" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="5rEqD8GHiUII3nNr3fl0ZQis36UU6E62k7uQKTq8bRZr9grFDV61v7EICrRDkxCFz9ECivUVZfvPndKNUpOehA==" /></div>
|
||||
<button
|
||||
type="submit"
|
||||
class="btn btn-sm btn-with-count js-toggler-target"
|
||||
aria-label="Star this repository" title="Star didi/VirtualAPK"
|
||||
data-ga-click="Repository, click star button, action:blob#show; text:Star">
|
||||
<svg aria-hidden="true" class="octicon octicon-star" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M14 6l-4.9-.64L7 1 4.9 5.36 0 6l3.6 3.26L2.67 14 7 11.67 11.33 14l-.93-4.74z"/></svg>
|
||||
Star
|
||||
</button>
|
||||
<a class="social-count js-social-count" href="/didi/VirtualAPK/stargazers"
|
||||
aria-label="4154 users starred this repository">
|
||||
4,154
|
||||
</a>
|
||||
</form> </div>
|
||||
|
||||
</li>
|
||||
|
||||
<li>
|
||||
<a href="#fork-destination-box" class="btn btn-sm btn-with-count"
|
||||
title="Fork your own copy of didi/VirtualAPK to your account"
|
||||
aria-label="Fork your own copy of didi/VirtualAPK to your account"
|
||||
rel="facebox"
|
||||
data-ga-click="Repository, show fork modal, action:blob#show; text:Fork">
|
||||
<svg aria-hidden="true" class="octicon octicon-repo-forked" height="16" version="1.1" viewBox="0 0 10 16" width="10"><path fill-rule="evenodd" d="M8 1a1.993 1.993 0 0 0-1 3.72V6L5 8 3 6V4.72A1.993 1.993 0 0 0 2 1a1.993 1.993 0 0 0-1 3.72V6.5l3 3v1.78A1.993 1.993 0 0 0 5 15a1.993 1.993 0 0 0 1-3.72V9.5l3-3V4.72A1.993 1.993 0 0 0 8 1zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3 10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3-10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"/></svg>
|
||||
Fork
|
||||
</a>
|
||||
|
||||
<div id="fork-destination-box" style="display: none;">
|
||||
<h2 class="facebox-header" data-facebox-id="facebox-header">Where should we fork this repository?</h2>
|
||||
<include-fragment src=""
|
||||
class="js-fork-select-fragment fork-select-fragment"
|
||||
data-url="/didi/VirtualAPK/fork?fragment=1">
|
||||
<img alt="Loading" height="64" src="https://assets-cdn.github.com/images/spinners/octocat-spinner-128.gif" width="64" />
|
||||
</include-fragment>
|
||||
</div>
|
||||
|
||||
<a href="/didi/VirtualAPK/network" class="social-count"
|
||||
aria-label="562 users forked this repository">
|
||||
562
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
<h1 class="public ">
|
||||
<svg aria-hidden="true" class="octicon octicon-repo" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M4 9H3V8h1v1zm0-3H3v1h1V6zm0-2H3v1h1V4zm0-2H3v1h1V2zm8-1v12c0 .55-.45 1-1 1H6v2l-1.5-1.5L3 16v-2H1c-.55 0-1-.45-1-1V1c0-.55.45-1 1-1h10c.55 0 1 .45 1 1zm-1 10H1v2h2v-1h3v1h5v-2zm0-10H2v9h9V1z"/></svg>
|
||||
<span class="author" itemprop="author"><a href="/didi" class="url fn" rel="author">didi</a></span><!--
|
||||
--><span class="path-divider">/</span><!--
|
||||
--><strong itemprop="name"><a href="/didi/VirtualAPK" data-pjax="#js-repo-pjax-container">VirtualAPK</a></strong>
|
||||
|
||||
</h1>
|
||||
|
||||
</div>
|
||||
<div class="container">
|
||||
|
||||
<nav class="reponav js-repo-nav js-sidenav-container-pjax"
|
||||
itemscope
|
||||
itemtype="http://schema.org/BreadcrumbList"
|
||||
role="navigation"
|
||||
data-pjax="#js-repo-pjax-container">
|
||||
|
||||
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
|
||||
<a href="/didi/VirtualAPK" class="js-selected-navigation-item selected reponav-item" data-hotkey="g c" data-selected-links="repo_source repo_downloads repo_commits repo_releases repo_tags repo_branches /didi/VirtualAPK" itemprop="url">
|
||||
<svg aria-hidden="true" class="octicon octicon-code" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M9.5 3L8 4.5 11.5 8 8 11.5 9.5 13 14 8 9.5 3zm-5 0L0 8l4.5 5L6 11.5 2.5 8 6 4.5 4.5 3z"/></svg>
|
||||
<span itemprop="name">Code</span>
|
||||
<meta itemprop="position" content="1">
|
||||
</a> </span>
|
||||
|
||||
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
|
||||
<a href="/didi/VirtualAPK/issues" class="js-selected-navigation-item reponav-item" data-hotkey="g i" data-selected-links="repo_issues repo_labels repo_milestones /didi/VirtualAPK/issues" itemprop="url">
|
||||
<svg aria-hidden="true" class="octicon octicon-issue-opened" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 0 1 1.3 8c0-3.14 2.56-5.7 5.7-5.7zM7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7zm1 3H6v5h2V4zm0 6H6v2h2v-2z"/></svg>
|
||||
<span itemprop="name">Issues</span>
|
||||
<span class="Counter">44</span>
|
||||
<meta itemprop="position" content="2">
|
||||
</a> </span>
|
||||
|
||||
<span itemscope itemtype="http://schema.org/ListItem" itemprop="itemListElement">
|
||||
<a href="/didi/VirtualAPK/pulls" class="js-selected-navigation-item reponav-item" data-hotkey="g p" data-selected-links="repo_pulls /didi/VirtualAPK/pulls" itemprop="url">
|
||||
<svg aria-hidden="true" class="octicon octicon-git-pull-request" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M11 11.28V5c-.03-.78-.34-1.47-.94-2.06C9.46 2.35 8.78 2.03 8 2H7V0L4 3l3 3V4h1c.27.02.48.11.69.31.21.2.3.42.31.69v6.28A1.993 1.993 0 0 0 10 15a1.993 1.993 0 0 0 1-3.72zm-1 2.92c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zM4 3c0-1.11-.89-2-2-2a1.993 1.993 0 0 0-1 3.72v6.56A1.993 1.993 0 0 0 2 15a1.993 1.993 0 0 0 1-3.72V4.72c.59-.34 1-.98 1-1.72zm-.8 10c0 .66-.55 1.2-1.2 1.2-.65 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"/></svg>
|
||||
<span itemprop="name">Pull requests</span>
|
||||
<span class="Counter">3</span>
|
||||
<meta itemprop="position" content="3">
|
||||
</a> </span>
|
||||
|
||||
<a href="/didi/VirtualAPK/projects" class="js-selected-navigation-item reponav-item" data-selected-links="repo_projects new_repo_project repo_project /didi/VirtualAPK/projects">
|
||||
<svg aria-hidden="true" class="octicon octicon-project" height="16" version="1.1" viewBox="0 0 15 16" width="15"><path fill-rule="evenodd" d="M10 12h3V2h-3v10zm-4-2h3V2H6v8zm-4 4h3V2H2v12zm-1 1h13V1H1v14zM14 0H1a1 1 0 0 0-1 1v14a1 1 0 0 0 1 1h13a1 1 0 0 0 1-1V1a1 1 0 0 0-1-1z"/></svg>
|
||||
Projects
|
||||
<span class="Counter" >0</span>
|
||||
</a>
|
||||
<a href="/didi/VirtualAPK/wiki" class="js-selected-navigation-item reponav-item" data-hotkey="g w" data-selected-links="repo_wiki /didi/VirtualAPK/wiki">
|
||||
<svg aria-hidden="true" class="octicon octicon-book" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M3 5h4v1H3V5zm0 3h4V7H3v1zm0 2h4V9H3v1zm11-5h-4v1h4V5zm0 2h-4v1h4V7zm0 2h-4v1h4V9zm2-6v9c0 .55-.45 1-1 1H9.5l-1 1-1-1H2c-.55 0-1-.45-1-1V3c0-.55.45-1 1-1h5.5l1 1 1-1H15c.55 0 1 .45 1 1zm-8 .5L7.5 3H2v9h6V3.5zm7-.5H9.5l-.5.5V12h6V3z"/></svg>
|
||||
Wiki
|
||||
</a>
|
||||
<a href="/didi/VirtualAPK/settings" class="js-selected-navigation-item reponav-item" data-selected-links="repo_settings repo_branch_settings hooks integration_installations /didi/VirtualAPK/settings">
|
||||
<svg aria-hidden="true" class="octicon octicon-gear" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M14 8.77v-1.6l-1.94-.64-.45-1.09.88-1.84-1.13-1.13-1.81.91-1.09-.45-.69-1.92h-1.6l-.63 1.94-1.11.45-1.84-.88-1.13 1.13.91 1.81-.45 1.09L0 7.23v1.59l1.94.64.45 1.09-.88 1.84 1.13 1.13 1.81-.91 1.09.45.69 1.92h1.59l.63-1.94 1.11-.45 1.84.88 1.13-1.13-.92-1.81.47-1.09L14 8.75v.02zM7 11c-1.66 0-3-1.34-3-3s1.34-3 3-3 3 1.34 3 3-1.34 3-3 3z"/></svg>
|
||||
Settings
|
||||
</a>
|
||||
<div class="reponav-dropdown js-menu-container">
|
||||
<button type="button" class="btn-link reponav-item reponav-dropdown js-menu-target " data-no-toggle aria-expanded="false" aria-haspopup="true">
|
||||
Insights
|
||||
<svg aria-hidden="true" class="octicon octicon-triangle-down v-align-middle text-gray" height="11" version="1.1" viewBox="0 0 12 16" width="8"><path fill-rule="evenodd" d="M0 5l6 6 6-6z"/></svg>
|
||||
</button>
|
||||
<div class="dropdown-menu-content js-menu-content">
|
||||
<div class="dropdown-menu dropdown-menu-sw">
|
||||
<a class="dropdown-item" href="/didi/VirtualAPK/community" data-skip-pjax>
|
||||
<svg aria-hidden="true" class="octicon octicon-heart" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M11.2 3c-.52-.63-1.25-.95-2.2-1-.97 0-1.69.42-2.2 1-.51.58-.78.92-.8 1-.02-.08-.28-.42-.8-1-.52-.58-1.17-1-2.2-1-.95.05-1.69.38-2.2 1-.52.61-.78 1.28-.8 2 0 .52.09 1.52.67 2.67C1.25 8.82 3.01 10.61 6 13c2.98-2.39 4.77-4.17 5.34-5.33C11.91 6.51 12 5.5 12 5c-.02-.72-.28-1.39-.8-2.02V3z"/></svg>
|
||||
<span itemprop="name">Community</span>
|
||||
</a>
|
||||
<a class="dropdown-item" href="/didi/VirtualAPK/pulse" data-skip-pjax>
|
||||
<svg aria-hidden="true" class="octicon octicon-pulse" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M11.5 8L8.8 5.4 6.6 8.5 5.5 1.6 2.38 8H0v2h3.6l.9-1.8.9 5.4L9 8.5l1.6 1.5H14V8z"/></svg>
|
||||
Pulse
|
||||
</a>
|
||||
<a class="dropdown-item" href="/didi/VirtualAPK/graphs" data-skip-pjax>
|
||||
<svg aria-hidden="true" class="octicon octicon-graph" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M16 14v1H0V0h1v14h15zM5 13H3V8h2v5zm4 0H7V3h2v10zm4 0h-2V6h2v7z"/></svg>
|
||||
Graphs
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="container new-discussion-timeline experiment-repo-nav">
|
||||
<div class="repository-content">
|
||||
|
||||
|
||||
<a href="/didi/VirtualAPK/blob/0b0745edcd2b4adc411cb77de7c4099b898c23d8/CONTRIBUTING.md" class="d-none js-permalink-shortcut" data-hotkey="y">Permalink</a>
|
||||
|
||||
<!-- blob contrib key: blob_contributors:v21:25812bc73237cfe3552924ee8fa0aacd -->
|
||||
|
||||
<div class="file-navigation js-zeroclipboard-container">
|
||||
|
||||
<div class="select-menu branch-select-menu js-menu-container js-select-menu float-left">
|
||||
<button class=" btn btn-sm select-menu-button js-menu-target css-truncate" data-hotkey="w"
|
||||
|
||||
type="button" aria-label="Switch branches or tags" aria-expanded="false" aria-haspopup="true">
|
||||
<i>Branch:</i>
|
||||
<span class="js-select-button css-truncate-target">master</span>
|
||||
</button>
|
||||
|
||||
<div class="select-menu-modal-holder js-menu-content js-navigation-container" data-pjax>
|
||||
|
||||
<div class="select-menu-modal">
|
||||
<div class="select-menu-header">
|
||||
<svg aria-label="Close" class="octicon octicon-x js-menu-close" height="16" role="img" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"/></svg>
|
||||
<span class="select-menu-title">Switch branches/tags</span>
|
||||
</div>
|
||||
|
||||
<div class="select-menu-filters">
|
||||
<div class="select-menu-text-filter">
|
||||
<input type="text" aria-label="Find or create a branch…" id="context-commitish-filter-field" class="form-control js-filterable-field js-navigation-enable" placeholder="Find or create a branch…">
|
||||
</div>
|
||||
<div class="select-menu-tabs">
|
||||
<ul>
|
||||
<li class="select-menu-tab">
|
||||
<a href="#" data-tab-filter="branches" data-filter-placeholder="Find or create a branch…" class="js-select-menu-tab" role="tab">Branches</a>
|
||||
</li>
|
||||
<li class="select-menu-tab">
|
||||
<a href="#" data-tab-filter="tags" data-filter-placeholder="Find a tag…" class="js-select-menu-tab" role="tab">Tags</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="branches" role="menu">
|
||||
|
||||
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
|
||||
|
||||
|
||||
<a class="select-menu-item js-navigation-item js-navigation-open "
|
||||
href="/didi/VirtualAPK/blob/dev/CONTRIBUTING.md"
|
||||
data-name="dev"
|
||||
data-skip-pjax="true"
|
||||
rel="nofollow">
|
||||
<svg aria-hidden="true" class="octicon octicon-check select-menu-item-icon" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
|
||||
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
|
||||
dev
|
||||
</span>
|
||||
</a>
|
||||
<a class="select-menu-item js-navigation-item js-navigation-open selected"
|
||||
href="/didi/VirtualAPK/blob/master/CONTRIBUTING.md"
|
||||
data-name="master"
|
||||
data-skip-pjax="true"
|
||||
rel="nofollow">
|
||||
<svg aria-hidden="true" class="octicon octicon-check select-menu-item-icon" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5z"/></svg>
|
||||
<span class="select-menu-item-text css-truncate-target js-select-menu-filter-text">
|
||||
master
|
||||
</span>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/didi/VirtualAPK/branches" class="js-create-branch select-menu-item select-menu-new-item-form js-navigation-item js-new-item-form" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="Biopo2g/9XzE6zpe5jezjUisIeUSshY7es+KSqhR3toFPxwGTd8OXBdAvA6uShHQw4OpTPEpg0GDfZY+S0b0Sg==" /></div>
|
||||
<svg aria-hidden="true" class="octicon octicon-git-branch select-menu-item-icon" height="16" version="1.1" viewBox="0 0 10 16" width="10"><path fill-rule="evenodd" d="M10 5c0-1.11-.89-2-2-2a1.993 1.993 0 0 0-1 3.72v.3c-.02.52-.23.98-.63 1.38-.4.4-.86.61-1.38.63-.83.02-1.48.16-2 .45V4.72a1.993 1.993 0 0 0-1-3.72C.88 1 0 1.89 0 3a2 2 0 0 0 1 1.72v6.56c-.59.35-1 .99-1 1.72 0 1.11.89 2 2 2 1.11 0 2-.89 2-2 0-.53-.2-1-.53-1.36.09-.06.48-.41.59-.47.25-.11.56-.17.94-.17 1.05-.05 1.95-.45 2.75-1.25S8.95 7.77 9 6.73h-.02C9.59 6.37 10 5.73 10 5zM2 1.8c.66 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2C1.35 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2zm0 12.41c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm6-8c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"/></svg>
|
||||
<div class="select-menu-item-text">
|
||||
<span class="select-menu-item-heading">Create branch: <span class="js-new-item-name"></span></span>
|
||||
<span class="description">from ‘master’</span>
|
||||
</div>
|
||||
<input type="hidden" name="name" id="name" class="js-new-item-value">
|
||||
<input type="hidden" name="branch" id="branch" value="master">
|
||||
<input type="hidden" name="path" id="path" value="CONTRIBUTING.md">
|
||||
</form>
|
||||
</div>
|
||||
|
||||
<div class="select-menu-list select-menu-tab-bucket js-select-menu-tab-bucket" data-tab-filter="tags">
|
||||
<div data-filterable-for="context-commitish-filter-field" data-filterable-type="substring">
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
<div class="select-menu-no-results">Nothing to show</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="BtnGroup float-right">
|
||||
<a href="/didi/VirtualAPK/find/master"
|
||||
class="js-pjax-capture-input btn btn-sm BtnGroup-item"
|
||||
data-pjax
|
||||
data-hotkey="t">
|
||||
Find file
|
||||
</a>
|
||||
<button aria-label="Copy file path to clipboard" class="js-zeroclipboard btn btn-sm BtnGroup-item tooltipped tooltipped-s" data-copied-hint="Copied!" type="button">Copy path</button>
|
||||
</div>
|
||||
<div class="breadcrumb js-zeroclipboard-target">
|
||||
<span class="repo-root js-repo-root"><span class="js-path-segment"><a href="/didi/VirtualAPK"><span>VirtualAPK</span></a></span></span><span class="separator">/</span><strong class="final-path">CONTRIBUTING.md</strong>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
<div class="commit-tease">
|
||||
<span class="float-right">
|
||||
<a class="commit-tease-sha" href="/didi/VirtualAPK/commit/2740ca13cbba694cf3f435f5a8d6716fa1aa1abc" data-pjax>
|
||||
2740ca1
|
||||
</a>
|
||||
<relative-time datetime="2017-06-29T14:54:01Z">Jun 29, 2017</relative-time>
|
||||
</span>
|
||||
<div>
|
||||
<img alt="@huandu" class="avatar" height="20" src="https://avatars2.githubusercontent.com/u/239739?v=4&s=40" width="20" />
|
||||
<a href="/huandu" class="user-mention" rel="contributor">huandu</a>
|
||||
<a href="/didi/VirtualAPK/commit/2740ca13cbba694cf3f435f5a8d6716fa1aa1abc" class="message" data-pjax="true" title="polish documents">polish documents</a>
|
||||
</div>
|
||||
|
||||
<div class="commit-tease-contributors">
|
||||
<button type="button" class="btn-link muted-link contributors-toggle" data-facebox="#blob_contributors_box">
|
||||
<strong>2</strong>
|
||||
contributors
|
||||
</button>
|
||||
<a class="avatar-link tooltipped tooltipped-s" aria-label="singwhatiwanna" href="/didi/VirtualAPK/commits/master/CONTRIBUTING.md?author=singwhatiwanna"><img alt="@singwhatiwanna" class="avatar" height="20" src="https://avatars3.githubusercontent.com/u/3346272?v=4&s=40" width="20" /> </a>
|
||||
<a class="avatar-link tooltipped tooltipped-s" aria-label="huandu" href="/didi/VirtualAPK/commits/master/CONTRIBUTING.md?author=huandu"><img alt="@huandu" class="avatar" height="20" src="https://avatars2.githubusercontent.com/u/239739?v=4&s=40" width="20" /> </a>
|
||||
|
||||
|
||||
</div>
|
||||
|
||||
<div id="blob_contributors_box" style="display:none">
|
||||
<h2 class="facebox-header" data-facebox-id="facebox-header">Users who have contributed to this file</h2>
|
||||
<ul class="facebox-user-list" data-facebox-id="facebox-description">
|
||||
<li class="facebox-user-list-item">
|
||||
<img alt="@singwhatiwanna" height="24" src="https://avatars1.githubusercontent.com/u/3346272?v=4&s=48" width="24" />
|
||||
<a href="/singwhatiwanna">singwhatiwanna</a>
|
||||
</li>
|
||||
<li class="facebox-user-list-item">
|
||||
<img alt="@huandu" height="24" src="https://avatars0.githubusercontent.com/u/239739?v=4&s=48" width="24" />
|
||||
<a href="/huandu">huandu</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="file">
|
||||
<div class="file-header">
|
||||
<div class="file-actions">
|
||||
|
||||
<div class="BtnGroup">
|
||||
<a href="/didi/VirtualAPK/raw/master/CONTRIBUTING.md" class="btn btn-sm BtnGroup-item" id="raw-url">Raw</a>
|
||||
<a href="/didi/VirtualAPK/blame/master/CONTRIBUTING.md" class="btn btn-sm js-update-url-with-hash BtnGroup-item" data-hotkey="b">Blame</a>
|
||||
<a href="/didi/VirtualAPK/commits/master/CONTRIBUTING.md" class="btn btn-sm BtnGroup-item" rel="nofollow">History</a>
|
||||
</div>
|
||||
|
||||
<a class="btn-octicon tooltipped tooltipped-nw"
|
||||
href="https://desktop.github.com"
|
||||
aria-label="Open this file in GitHub Desktop"
|
||||
data-ga-click="Repository, open with desktop, type:windows">
|
||||
<svg aria-hidden="true" class="octicon octicon-device-desktop" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M15 2H1c-.55 0-1 .45-1 1v9c0 .55.45 1 1 1h5.34c-.25.61-.86 1.39-2.34 2h8c-1.48-.61-2.09-1.39-2.34-2H15c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1zm0 9H1V3h14v8z"/></svg>
|
||||
</a>
|
||||
|
||||
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/didi/VirtualAPK/edit/master/CONTRIBUTING.md" class="inline-form js-update-url-with-hash" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="QhTGbaFmDHoBMq25j4xu14cHKLdUX6jxgf2RJaWgKTBKa+wMA6PqLifa6IgZUUe41ZmGJxYr6eM37m0/vzfs6w==" /></div>
|
||||
<button class="btn-octicon tooltipped tooltipped-nw" type="submit"
|
||||
aria-label="Edit this file" data-hotkey="e" data-disable-with>
|
||||
<svg aria-hidden="true" class="octicon octicon-pencil" height="16" version="1.1" viewBox="0 0 14 16" width="14"><path fill-rule="evenodd" d="M0 12v3h3l8-8-3-3-8 8zm3 2H1v-2h1v1h1v1zm10.3-9.3L12 6 9 3l1.3-1.3a.996.996 0 0 1 1.41 0l1.59 1.59c.39.39.39 1.02 0 1.41z"/></svg>
|
||||
</button>
|
||||
</form> <!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="/didi/VirtualAPK/delete/master/CONTRIBUTING.md" class="inline-form" method="post"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /><input name="authenticity_token" type="hidden" value="ccXfryEyc1vQiyQxM36LxZBqUdDDFFxec9O+ylp0/H6Y2nFyHH1okgGAPkc47jJBd62nKEr0mFmdUIU+xSISFw==" /></div>
|
||||
<button class="btn-octicon btn-octicon-danger tooltipped tooltipped-nw" type="submit"
|
||||
aria-label="Delete this file" data-disable-with>
|
||||
<svg aria-hidden="true" class="octicon octicon-trashcan" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M11 2H9c0-.55-.45-1-1-1H5c-.55 0-1 .45-1 1H2c-.55 0-1 .45-1 1v1c0 .55.45 1 1 1v9c0 .55.45 1 1 1h7c.55 0 1-.45 1-1V5c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1zm-1 12H3V5h1v8h1V5h1v8h1V5h1v8h1V5h1v9zm1-10H2V3h9v1z"/></svg>
|
||||
</button>
|
||||
</form> </div>
|
||||
|
||||
<div class="file-info">
|
||||
29 lines (18 sloc)
|
||||
<span class="file-info-divider"></span>
|
||||
938 Bytes
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<div id="readme" class="readme blob instapaper_body">
|
||||
<article class="markdown-body entry-content" itemprop="text"><h1><a id="user-content-contribution-guideline" class="anchor" href="#contribution-guideline" aria-hidden="true"><svg aria-hidden="true" class="octicon octicon-link" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"></path></svg></a>Contribution Guideline</h1>
|
||||
<p>Thanks for considering to contribute this project. All issues and pull requests are highly appreciated.</p>
|
||||
<h2><a id="user-content-pull-requests" class="anchor" href="#pull-requests" aria-hidden="true"><svg aria-hidden="true" class="octicon octicon-link" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"></path></svg></a>Pull Requests</h2>
|
||||
<p>Before sending pull request to this project, please read and follow guidelines below.</p>
|
||||
<ol>
|
||||
<li>Branch: We only accept pull request on <code>dev</code> branch.</li>
|
||||
<li>Coding style: Follow the coding style used in VirtualAPK.</li>
|
||||
<li>Commit message: Use English and be aware of your spell.</li>
|
||||
<li>Test: Make sure to test your code.</li>
|
||||
</ol>
|
||||
<p>Add device mode, API version, related log, screenshots and other related information in your pull request if possible.</p>
|
||||
<p>NOTE: We assume all your contribution can be licensed under the <a href="https://github.com/didi/VirtualAPK/blob/master/LICENSE">Apache License 2.0</a>.</p>
|
||||
<h2><a id="user-content-issues" class="anchor" href="#issues" aria-hidden="true"><svg aria-hidden="true" class="octicon octicon-link" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"></path></svg></a>Issues</h2>
|
||||
<p>We love clearly described issues. :)</p>
|
||||
<p>Following information can help us to resolve the issue faster.</p>
|
||||
<ul>
|
||||
<li>Device mode and hardware information.</li>
|
||||
<li>API version.</li>
|
||||
<li>Logs.</li>
|
||||
<li>Screenshots.</li>
|
||||
<li>Steps to reproduce the issue.</li>
|
||||
</ul>
|
||||
</article>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
<button type="button" data-facebox="#jump-to-line" data-facebox-class="linejump" data-hotkey="l" class="d-none">Jump to Line</button>
|
||||
<div id="jump-to-line" style="display:none">
|
||||
<!-- '"` --><!-- </textarea></xmp> --></option></form><form accept-charset="UTF-8" action="" class="js-jump-to-line-form" method="get"><div style="margin:0;padding:0;display:inline"><input name="utf8" type="hidden" value="✓" /></div>
|
||||
<input class="form-control linejump-input js-jump-to-line-field" type="text" placeholder="Jump to line…" aria-label="Jump to line" autofocus>
|
||||
<button type="submit" class="btn">Go</button>
|
||||
</form> </div>
|
||||
|
||||
</div>
|
||||
<div class="modal-backdrop js-touch-events"></div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
|
||||
|
||||
<div class="container-lg site-footer-container">
|
||||
<div class="site-footer " role="contentinfo">
|
||||
<ul class="site-footer-links float-right">
|
||||
<li><a href="https://github.com/contact" data-ga-click="Footer, go to contact, text:contact">Contact GitHub</a></li>
|
||||
<li><a href="https://developer.github.com" data-ga-click="Footer, go to api, text:api">API</a></li>
|
||||
<li><a href="https://training.github.com" data-ga-click="Footer, go to training, text:training">Training</a></li>
|
||||
<li><a href="https://shop.github.com" data-ga-click="Footer, go to shop, text:shop">Shop</a></li>
|
||||
<li><a href="https://github.com/blog" data-ga-click="Footer, go to blog, text:blog">Blog</a></li>
|
||||
<li><a href="https://github.com/about" data-ga-click="Footer, go to about, text:about">About</a></li>
|
||||
|
||||
</ul>
|
||||
|
||||
<a href="https://github.com" aria-label="Homepage" class="site-footer-mark" title="GitHub">
|
||||
<svg aria-hidden="true" class="octicon octicon-mark-github" height="24" version="1.1" viewBox="0 0 16 16" width="24"><path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"/></svg>
|
||||
</a>
|
||||
<ul class="site-footer-links">
|
||||
<li>© 2017 <span title="0.18126s from unicorn-2592397628-tnzmf">GitHub</span>, Inc.</li>
|
||||
<li><a href="https://github.com/site/terms" data-ga-click="Footer, go to terms, text:terms">Terms</a></li>
|
||||
<li><a href="https://github.com/site/privacy" data-ga-click="Footer, go to privacy, text:privacy">Privacy</a></li>
|
||||
<li><a href="https://github.com/security" data-ga-click="Footer, go to security, text:security">Security</a></li>
|
||||
<li><a href="https://status.github.com/" data-ga-click="Footer, go to status, text:status">Status</a></li>
|
||||
<li><a href="https://help.github.com" data-ga-click="Footer, go to help, text:help">Help</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
<div id="ajax-error-message" class="ajax-error-message flash flash-error">
|
||||
<svg aria-hidden="true" class="octicon octicon-alert" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M8.865 1.52c-.18-.31-.51-.5-.87-.5s-.69.19-.87.5L.275 13.5c-.18.31-.18.69 0 1 .19.31.52.5.87.5h13.7c.36 0 .69-.19.86-.5.17-.31.18-.69.01-1L8.865 1.52zM8.995 13h-2v-2h2v2zm0-3h-2V6h2v4z"/></svg>
|
||||
<button type="button" class="flash-close js-flash-close js-ajax-error-dismiss" aria-label="Dismiss error">
|
||||
<svg aria-hidden="true" class="octicon octicon-x" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"/></svg>
|
||||
</button>
|
||||
You can't perform that action at this time.
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
<script crossorigin="anonymous" integrity="sha256-OiTVyGEpRWTLRqro9+PHTHqtjluGGBa/2WknMA8gP1E=" src="https://assets-cdn.github.com/assets/frameworks-3a24d5c861294564cb46aae8f7e3c74c7aad8e5b861816bfd96927300f203f51.js"></script>
|
||||
|
||||
<script async="async" crossorigin="anonymous" integrity="sha256-0u/yC9TgTIqxZRfYQjZg/tLvDPt0wXD1XA2s6QOFTaY=" src="https://assets-cdn.github.com/assets/github-d2eff20bd4e04c8ab16517d8423660fed2ef0cfb74c170f55c0dace903854da6.js"></script>
|
||||
|
||||
|
||||
|
||||
|
||||
<div class="js-stale-session-flash stale-session-flash flash flash-warn flash-banner d-none">
|
||||
<svg aria-hidden="true" class="octicon octicon-alert" height="16" version="1.1" viewBox="0 0 16 16" width="16"><path fill-rule="evenodd" d="M8.865 1.52c-.18-.31-.51-.5-.87-.5s-.69.19-.87.5L.275 13.5c-.18.31-.18.69 0 1 .19.31.52.5.87.5h13.7c.36 0 .69-.19.86-.5.17-.31.18-.69.01-1L8.865 1.52zM8.995 13h-2v-2h2v2zm0-3h-2V6h2v4z"/></svg>
|
||||
<span class="signed-in-tab-flash">You signed in with another tab or window. <a href="">Reload</a> to refresh your session.</span>
|
||||
<span class="signed-out-tab-flash">You signed out in another tab or window. <a href="">Reload</a> to refresh your session.</span>
|
||||
</div>
|
||||
<div class="facebox" id="facebox" style="display:none;">
|
||||
<div class="facebox-popup">
|
||||
<div class="facebox-content" role="dialog" aria-labelledby="facebox-header" aria-describedby="facebox-description">
|
||||
</div>
|
||||
<button type="button" class="facebox-close js-facebox-close" aria-label="Close modal">
|
||||
<svg aria-hidden="true" class="octicon octicon-x" height="16" version="1.1" viewBox="0 0 12 16" width="12"><path fill-rule="evenodd" d="M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48z"/></svg>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
When you contribute code, you affirm that the contribution is your original work and that you
|
||||
license the work to the project under the project's open source license. Whether or not you
|
||||
state this explicitly, by submitting any copyrighted material via pull request, email, or
|
||||
other means you agree to license the material under the project's open source license and
|
||||
warrant that you have the legal authority to do so.
|
||||
|
||||
14
HEADER
Normal file
14
HEADER
Normal file
@@ -0,0 +1,14 @@
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
982
LICENSE
982
LICENSE
@@ -1,433 +1,549 @@
|
||||
Apache License
|
||||
|
||||
Version 2.0, January 2004
|
||||
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
|
||||
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
|
||||
|
||||
|
||||
1. Definitions.
|
||||
|
||||
|
||||
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
|
||||
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
|
||||
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
|
||||
other entities that control, are controlled by, or are under common
|
||||
|
||||
control with that entity. For the purposes of this definition,
|
||||
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
|
||||
direction or management of such entity, whether by contract or
|
||||
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
|
||||
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
|
||||
exercising permissions granted by this License.
|
||||
|
||||
|
||||
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
|
||||
including but not limited to software source code, documentation
|
||||
|
||||
source, and configuration files.
|
||||
|
||||
|
||||
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
|
||||
transformation or translation of a Source form, including but
|
||||
|
||||
not limited to compiled object code, generated documentation,
|
||||
|
||||
and conversions to other media types.
|
||||
|
||||
|
||||
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
|
||||
Object form, made available under the License, as indicated by a
|
||||
|
||||
copyright notice that is included in or attached to the work
|
||||
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
|
||||
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
|
||||
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
|
||||
the original version of the Work and any modifications or additions
|
||||
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
|
||||
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
|
||||
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
|
||||
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
|
||||
where such license applies only to those patent claims licensable
|
||||
|
||||
by such Contributor that are necessarily infringed by their
|
||||
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
|
||||
institute patent litigation against any entity (including a
|
||||
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
|
||||
or contributory patent infringement, then any patent licenses
|
||||
|
||||
granted to You under this License for that Work shall terminate
|
||||
|
||||
as of the date such litigation is filed.
|
||||
|
||||
|
||||
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
|
||||
modifications, and in Source or Object form, provided that You
|
||||
|
||||
meet the following conditions:
|
||||
|
||||
|
||||
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
|
||||
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
|
||||
stating that You changed the files; and
|
||||
|
||||
|
||||
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
|
||||
attribution notices from the Source form of the Work,
|
||||
|
||||
excluding those notices that do not pertain to any part of
|
||||
|
||||
the Derivative Works; and
|
||||
|
||||
|
||||
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
|
||||
include a readable copy of the attribution notices contained
|
||||
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
|
||||
of the following places: within a NOTICE text file distributed
|
||||
|
||||
as part of the Derivative Works; within the Source form or
|
||||
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
|
||||
within a display generated by the Derivative Works, if and
|
||||
|
||||
wherever such third-party notices normally appear. The contents
|
||||
|
||||
of the NOTICE file are for informational purposes only and
|
||||
|
||||
do not modify the License. You may add Your own attribution
|
||||
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
|
||||
that such additional attribution notices cannot be construed
|
||||
|
||||
as modifying the License.
|
||||
|
||||
|
||||
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
|
||||
may provide additional or different license terms and conditions
|
||||
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
|
||||
the conditions stated in this License.
|
||||
|
||||
|
||||
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
|
||||
this License, without any additional terms or conditions.
|
||||
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
|
||||
the terms of any separate license agreement you may have executed
|
||||
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
|
||||
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
|
||||
except as required for reasonable and customary use in describing the
|
||||
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
|
||||
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
|
||||
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
|
||||
incidental, or consequential damages of any character arising as a
|
||||
|
||||
result of this License or out of the use or inability to use the
|
||||
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
|
||||
other commercial damages or losses), even if such Contributor
|
||||
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
|
||||
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
|
||||
or other liability obligations and/or rights consistent with this
|
||||
|
||||
License. However, in accepting such obligations, You may act only
|
||||
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
|
||||
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
|
||||
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
|
||||
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
|
||||
replaced with your own identifying information. (Don't include
|
||||
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
|
||||
comment syntax for the file format. We also recommend that a
|
||||
|
||||
file or class name and description of purpose be included on the
|
||||
|
||||
same "printed page" as the copyright notice for easier
|
||||
|
||||
identification within third-party archives.
|
||||
|
||||
|
||||
|
||||
|
||||
Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd. All rights reserved.
|
||||
|
||||
|
||||
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
You may obtain a copy of the License at
|
||||
|
||||
|
||||
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
|
||||
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
||||
See the License for the specific language governing permissions and
|
||||
|
||||
limitations under the License.
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
------------------------------------------------------------------------------------
|
||||
This distribution has a binary dependency on jersey, which is available under the EPLv2
|
||||
License as described below.
|
||||
|
||||
Eclipse Public License - v 2.0
|
||||
|
||||
THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE
|
||||
PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION
|
||||
OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
|
||||
|
||||
1. DEFINITIONS
|
||||
|
||||
"Contribution" means:
|
||||
|
||||
a) in the case of the initial Contributor, the initial content
|
||||
Distributed under this Agreement, and
|
||||
|
||||
b) in the case of each subsequent Contributor:
|
||||
i) changes to the Program, and
|
||||
ii) additions to the Program;
|
||||
where such changes and/or additions to the Program originate from
|
||||
and are Distributed by that particular Contributor. A Contribution
|
||||
"originates" from a Contributor if it was added to the Program by
|
||||
such Contributor itself or anyone acting on such Contributor's behalf.
|
||||
Contributions do not include changes or additions to the Program that
|
||||
are not Modified Works.
|
||||
|
||||
"Contributor" means any person or entity that Distributes the Program.
|
||||
|
||||
"Licensed Patents" mean patent claims licensable by a Contributor which
|
||||
are necessarily infringed by the use or sale of its Contribution alone
|
||||
or when combined with the Program.
|
||||
|
||||
"Program" means the Contributions Distributed in accordance with this
|
||||
Agreement.
|
||||
|
||||
"Recipient" means anyone who receives the Program under this Agreement
|
||||
or any Secondary License (as applicable), including Contributors.
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source Code or other
|
||||
form, that is based on (or derived from) the Program and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship.
|
||||
|
||||
"Modified Works" shall mean any work in Source Code or other form that
|
||||
results from an addition to, deletion from, or modification of the
|
||||
contents of the Program, including, for purposes of clarity any new file
|
||||
in Source Code form that contains any contents of the Program. Modified
|
||||
Works shall not include works that contain only declarations,
|
||||
interfaces, types, classes, structures, or files of the Program solely
|
||||
in each case in order to link to, bind by name, or subclass the Program
|
||||
or Modified Works thereof.
|
||||
|
||||
"Distribute" means the acts of a) distributing or b) making available
|
||||
in any manner that enables the transfer of a copy.
|
||||
|
||||
"Source Code" means the form of a Program preferred for making
|
||||
modifications, including but not limited to software source code,
|
||||
documentation source, and configuration files.
|
||||
|
||||
"Secondary License" means either the GNU General Public License,
|
||||
Version 2.0, or any later versions of that license, including any
|
||||
exceptions or additional permissions as identified by the initial
|
||||
Contributor.
|
||||
|
||||
2. GRANT OF RIGHTS
|
||||
|
||||
a) Subject to the terms of this Agreement, each Contributor hereby
|
||||
grants Recipient a non-exclusive, worldwide, royalty-free copyright
|
||||
license to reproduce, prepare Derivative Works of, publicly display,
|
||||
publicly perform, Distribute and sublicense the Contribution of such
|
||||
Contributor, if any, and such Derivative Works.
|
||||
|
||||
b) Subject to the terms of this Agreement, each Contributor hereby
|
||||
grants Recipient a non-exclusive, worldwide, royalty-free patent
|
||||
license under Licensed Patents to make, use, sell, offer to sell,
|
||||
import and otherwise transfer the Contribution of such Contributor,
|
||||
if any, in Source Code or other form. This patent license shall
|
||||
apply to the combination of the Contribution and the Program if, at
|
||||
the time the Contribution is added by the Contributor, such addition
|
||||
of the Contribution causes such combination to be covered by the
|
||||
Licensed Patents. The patent license shall not apply to any other
|
||||
combinations which include the Contribution. No hardware per se is
|
||||
licensed hereunder.
|
||||
|
||||
c) Recipient understands that although each Contributor grants the
|
||||
licenses to its Contributions set forth herein, no assurances are
|
||||
provided by any Contributor that the Program does not infringe the
|
||||
patent or other intellectual property rights of any other entity.
|
||||
Each Contributor disclaims any liability to Recipient for claims
|
||||
brought by any other entity based on infringement of intellectual
|
||||
property rights or otherwise. As a condition to exercising the
|
||||
rights and licenses granted hereunder, each Recipient hereby
|
||||
assumes sole responsibility to secure any other intellectual
|
||||
property rights needed, if any. For example, if a third party
|
||||
patent license is required to allow Recipient to Distribute the
|
||||
Program, it is Recipient's responsibility to acquire that license
|
||||
before distributing the Program.
|
||||
|
||||
d) Each Contributor represents that to its knowledge it has
|
||||
sufficient copyright rights in its Contribution, if any, to grant
|
||||
the copyright license set forth in this Agreement.
|
||||
|
||||
e) Notwithstanding the terms of any Secondary License, no
|
||||
Contributor makes additional grants to any Recipient (other than
|
||||
those set forth in this Agreement) as a result of such Recipient's
|
||||
receipt of the Program under the terms of a Secondary License
|
||||
(if permitted under the terms of Section 3).
|
||||
|
||||
3. REQUIREMENTS
|
||||
|
||||
3.1 If a Contributor Distributes the Program in any form, then:
|
||||
|
||||
a) the Program must also be made available as Source Code, in
|
||||
accordance with section 3.2, and the Contributor must accompany
|
||||
the Program with a statement that the Source Code for the Program
|
||||
is available under this Agreement, and informs Recipients how to
|
||||
obtain it in a reasonable manner on or through a medium customarily
|
||||
used for software exchange; and
|
||||
|
||||
b) the Contributor may Distribute the Program under a license
|
||||
different than this Agreement, provided that such license:
|
||||
i) effectively disclaims on behalf of all other Contributors all
|
||||
warranties and conditions, express and implied, including
|
||||
warranties or conditions of title and non-infringement, and
|
||||
implied warranties or conditions of merchantability and fitness
|
||||
for a particular purpose;
|
||||
|
||||
ii) effectively excludes on behalf of all other Contributors all
|
||||
liability for damages, including direct, indirect, special,
|
||||
incidental and consequential damages, such as lost profits;
|
||||
|
||||
iii) does not attempt to limit or alter the recipients' rights
|
||||
in the Source Code under section 3.2; and
|
||||
|
||||
iv) requires any subsequent distribution of the Program by any
|
||||
party to be under a license that satisfies the requirements
|
||||
of this section 3.
|
||||
|
||||
3.2 When the Program is Distributed as Source Code:
|
||||
|
||||
a) it must be made available under this Agreement, or if the
|
||||
Program (i) is combined with other material in a separate file or
|
||||
files made available under a Secondary License, and (ii) the initial
|
||||
Contributor attached to the Source Code the notice described in
|
||||
Exhibit A of this Agreement, then the Program may be made available
|
||||
under the terms of such Secondary Licenses, and
|
||||
|
||||
b) a copy of this Agreement must be included with each copy of
|
||||
the Program.
|
||||
|
||||
3.3 Contributors may not remove or alter any copyright, patent,
|
||||
trademark, attribution notices, disclaimers of warranty, or limitations
|
||||
of liability ("notices") contained within the Program from any copy of
|
||||
the Program which they Distribute, provided that Contributors may add
|
||||
their own appropriate notices.
|
||||
|
||||
4. COMMERCIAL DISTRIBUTION
|
||||
|
||||
Commercial distributors of software may accept certain responsibilities
|
||||
with respect to end users, business partners and the like. While this
|
||||
license is intended to facilitate the commercial use of the Program,
|
||||
the Contributor who includes the Program in a commercial product
|
||||
offering should do so in a manner which does not create potential
|
||||
liability for other Contributors. Therefore, if a Contributor includes
|
||||
the Program in a commercial product offering, such Contributor
|
||||
("Commercial Contributor") hereby agrees to defend and indemnify every
|
||||
other Contributor ("Indemnified Contributor") against any losses,
|
||||
damages and costs (collectively "Losses") arising from claims, lawsuits
|
||||
and other legal actions brought by a third party against the Indemnified
|
||||
Contributor to the extent caused by the acts or omissions of such
|
||||
Commercial Contributor in connection with its distribution of the Program
|
||||
in a commercial product offering. The obligations in this section do not
|
||||
apply to any claims or Losses relating to any actual or alleged
|
||||
intellectual property infringement. In order to qualify, an Indemnified
|
||||
Contributor must: a) promptly notify the Commercial Contributor in
|
||||
writing of such claim, and b) allow the Commercial Contributor to control,
|
||||
and cooperate with the Commercial Contributor in, the defense and any
|
||||
related settlement negotiations. The Indemnified Contributor may
|
||||
participate in any such claim at its own expense.
|
||||
|
||||
For example, a Contributor might include the Program in a commercial
|
||||
product offering, Product X. That Contributor is then a Commercial
|
||||
Contributor. If that Commercial Contributor then makes performance
|
||||
claims, or offers warranties related to Product X, those performance
|
||||
claims and warranties are such Commercial Contributor's responsibility
|
||||
alone. Under this section, the Commercial Contributor would have to
|
||||
defend claims against the other Contributors related to those performance
|
||||
claims and warranties, and if a court requires any other Contributor to
|
||||
pay any damages as a result, the Commercial Contributor must pay
|
||||
those damages.
|
||||
|
||||
5. NO WARRANTY
|
||||
|
||||
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT
|
||||
PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS"
|
||||
BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR
|
||||
IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF
|
||||
TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR
|
||||
PURPOSE. Each Recipient is solely responsible for determining the
|
||||
appropriateness of using and distributing the Program and assumes all
|
||||
risks associated with its exercise of rights under this Agreement,
|
||||
including but not limited to the risks and costs of program errors,
|
||||
compliance with applicable laws, damage to or loss of data, programs
|
||||
or equipment, and unavailability or interruption of operations.
|
||||
|
||||
6. DISCLAIMER OF LIABILITY
|
||||
|
||||
EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT
|
||||
PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS
|
||||
SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST
|
||||
PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE
|
||||
EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
7. GENERAL
|
||||
|
||||
If any provision of this Agreement is invalid or unenforceable under
|
||||
applicable law, it shall not affect the validity or enforceability of
|
||||
the remainder of the terms of this Agreement, and without further
|
||||
action by the parties hereto, such provision shall be reformed to the
|
||||
minimum extent necessary to make such provision valid and enforceable.
|
||||
|
||||
If Recipient institutes patent litigation against any entity
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that the
|
||||
Program itself (excluding combinations of the Program with other software
|
||||
or hardware) infringes such Recipient's patent(s), then such Recipient's
|
||||
rights granted under Section 2(b) shall terminate as of the date such
|
||||
litigation is filed.
|
||||
|
||||
All Recipient's rights under this Agreement shall terminate if it
|
||||
fails to comply with any of the material terms or conditions of this
|
||||
Agreement and does not cure such failure in a reasonable period of
|
||||
time after becoming aware of such noncompliance. If all Recipient's
|
||||
rights under this Agreement terminate, Recipient agrees to cease use
|
||||
and distribution of the Program as soon as reasonably practicable.
|
||||
However, Recipient's obligations under this Agreement and any licenses
|
||||
granted by Recipient relating to the Program shall continue and survive.
|
||||
|
||||
Everyone is permitted to copy and distribute copies of this Agreement,
|
||||
but in order to avoid inconsistency the Agreement is copyrighted and
|
||||
may only be modified in the following manner. The Agreement Steward
|
||||
reserves the right to publish new versions (including revisions) of
|
||||
this Agreement from time to time. No one other than the Agreement
|
||||
Steward has the right to modify this Agreement. The Eclipse Foundation
|
||||
is the initial Agreement Steward. The Eclipse Foundation may assign the
|
||||
responsibility to serve as the Agreement Steward to a suitable separate
|
||||
entity. Each new version of the Agreement will be given a distinguishing
|
||||
version number. The Program (including Contributions) may always be
|
||||
Distributed subject to the version of the Agreement under which it was
|
||||
received. In addition, after a new version of the Agreement is published,
|
||||
Contributor may elect to Distribute the Program (including its
|
||||
Contributions) under the new version.
|
||||
|
||||
Except as expressly stated in Sections 2(a) and 2(b) above, Recipient
|
||||
receives no rights or licenses to the intellectual property of any
|
||||
Contributor under this Agreement, whether expressly, by implication,
|
||||
estoppel or otherwise. All rights in the Program not expressly granted
|
||||
under this Agreement are reserved. Nothing in this Agreement is intended
|
||||
to be enforceable by any entity that is not a Contributor or Recipient.
|
||||
No third-party beneficiary rights are created under this Agreement.
|
||||
|
||||
Exhibit A - Form of Secondary Licenses Notice
|
||||
|
||||
"This Source Code may also be made available under the following
|
||||
Secondary Licenses when the conditions for such availability set forth
|
||||
in the Eclipse Public License, v. 2.0 are satisfied: {name license(s),
|
||||
version(s), and exceptions or additional permissions here}."
|
||||
|
||||
Simply including a copy of this Agreement, including this Exhibit A
|
||||
is not sufficient to license the Source Code under Secondary Licenses.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular
|
||||
file, then You may include the notice in a location (such as a LICENSE
|
||||
file in a relevant directory) where a recipient would be likely to
|
||||
look for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
------------------------------------------------------------------------------------
|
||||
This distribution has a binary dependency on zstd, which is available under the BSD 3-Clause License as described below.
|
||||
|
||||
BSD License
|
||||
|
||||
For Zstandard software
|
||||
|
||||
Copyright (c) 2016-present, Facebook, Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name Facebook nor the names of its contributors may be used to
|
||||
endorse or promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
------------------------------------------------------------------------------------
|
||||
This distribution has a binary dependency on zstd-jni, which is available under the BSD 2-Clause License
|
||||
as described below.
|
||||
|
||||
Zstd-jni: JNI bindings to Zstd Library
|
||||
|
||||
Copyright (c) 2015-2016, Luben Karavelov/ All rights reserved.
|
||||
|
||||
BSD License
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
8
NOTICE
Normal file
8
NOTICE
Normal file
@@ -0,0 +1,8 @@
|
||||
Apache Kafka
|
||||
Copyright 2020 The Apache Software Foundation.
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (https://www.apache.org/).
|
||||
|
||||
This distribution has a binary dependency on jersey, which is available under the CDDL
|
||||
License. The source code of jersey can be found at https://github.com/jersey/jersey/.
|
||||
14
PULL_REQUEST_TEMPLATE.md
Normal file
14
PULL_REQUEST_TEMPLATE.md
Normal file
@@ -0,0 +1,14 @@
|
||||
*More detailed description of your change,
|
||||
if necessary. The PR title and PR message become
|
||||
the squashed commit message, so use a separate
|
||||
comment to ping reviewers.*
|
||||
|
||||
*Summary of testing strategy (including rationale)
|
||||
for the feature or bug fix. Unit and/or integration
|
||||
tests are expected for any behaviour change and
|
||||
system tests should be considered for larger changes.*
|
||||
|
||||
### Committer Checklist (excluded from commit message)
|
||||
- [ ] Verify design and implementation
|
||||
- [ ] Verify test coverage and CI build status
|
||||
- [ ] Verify documentation (including upgrade notes)
|
||||
283
README.md
283
README.md
@@ -1,63 +1,220 @@
|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||
**一站式`Apache Kafka`集群指标监控与运维管控平台**
|
||||
|
||||
---
|
||||
|
||||
## 主要功能特性
|
||||
|
||||
|
||||
### 集群监控维度
|
||||
|
||||
- 多版本集群管控,支持从`0.10.2`到`2.4`版本;
|
||||
- 集群Topic、Broker等多维度历史与实时关键指标查看;
|
||||
|
||||
|
||||
### 集群管控维度
|
||||
|
||||
- 集群运维,包括逻辑Region方式管理集群
|
||||
- Broker运维,包括优先副本选举
|
||||
- Topic运维,包括创建、查询、扩容、修改属性、数据采样及迁移等;
|
||||
- 消费组运维,包括指定时间或指定偏移两种方式进行重置消费偏移
|
||||
|
||||
|
||||
### 用户使用维度
|
||||
|
||||
- 管理员用户与普通用户视角区分
|
||||
- 管理员用户与普通用户权限区分
|
||||
|
||||
|
||||
## kafka-manager架构图
|
||||
|
||||

|
||||
|
||||
|
||||
## 相关文档
|
||||
|
||||
- [kafka-manager安装手册](./docs/install_cn_guide.md)
|
||||
- [kafka-manager使用手册](./docs/user_cn_guide.md)
|
||||
|
||||
|
||||
## 钉钉交流群
|
||||
|
||||

|
||||
|
||||
|
||||
## 项目成员
|
||||
|
||||
### 内部核心人员
|
||||
|
||||
`iceyuhui`、`liuyaguang`、`limengmonty`、`zhangliangmike`、`nullhuangyiming`、`zengqiao`、`eilenexuzhe`、`huangjiaweihjw`
|
||||
|
||||
|
||||
### 外部贡献者
|
||||
|
||||
`fangjunyu`、`zhoutaiyang`
|
||||
|
||||
|
||||
## 协议
|
||||
|
||||
`kafka-manager`基于`Apache-2.0`协议进行分发和使用,更多信息参见[协议文件](./LICENSE)
|
||||
Apache Kafka
|
||||
=================
|
||||
See our [web site](https://kafka.apache.org) for details on the project.
|
||||
|
||||
You need to have [Java](http://www.oracle.com/technetwork/java/javase/downloads/index.html) installed.
|
||||
|
||||
Java 8 should be used for building in order to support both Java 8 and Java 11 at runtime.
|
||||
|
||||
Scala 2.12 is used by default, see below for how to use a different Scala version or all of the supported Scala versions.
|
||||
|
||||
### Build a jar and run it ###
|
||||
./gradlew jar
|
||||
|
||||
Follow instructions in https://kafka.apache.org/documentation.html#quickstart
|
||||
|
||||
### Build source jar ###
|
||||
./gradlew srcJar
|
||||
|
||||
### Build aggregated javadoc ###
|
||||
./gradlew aggregatedJavadoc
|
||||
|
||||
### Build javadoc and scaladoc ###
|
||||
./gradlew javadoc
|
||||
./gradlew javadocJar # builds a javadoc jar for each module
|
||||
./gradlew scaladoc
|
||||
./gradlew scaladocJar # builds a scaladoc jar for each module
|
||||
./gradlew docsJar # builds both (if applicable) javadoc and scaladoc jars for each module
|
||||
|
||||
### Run unit/integration tests ###
|
||||
./gradlew test # runs both unit and integration tests
|
||||
./gradlew unitTest
|
||||
./gradlew integrationTest
|
||||
|
||||
### Force re-running tests without code change ###
|
||||
./gradlew cleanTest test
|
||||
./gradlew cleanTest unitTest
|
||||
./gradlew cleanTest integrationTest
|
||||
|
||||
### Running a particular unit/integration test ###
|
||||
./gradlew clients:test --tests RequestResponseTest
|
||||
|
||||
### Running a particular test method within a unit/integration test ###
|
||||
./gradlew core:test --tests kafka.api.ProducerFailureHandlingTest.testCannotSendToInternalTopic
|
||||
./gradlew clients:test --tests org.apache.kafka.clients.MetadataTest.testMetadataUpdateWaitTime
|
||||
|
||||
### Running a particular unit/integration test with log4j output ###
|
||||
Change the log4j setting in either `clients/src/test/resources/log4j.properties` or `core/src/test/resources/log4j.properties`
|
||||
|
||||
./gradlew clients:test --tests RequestResponseTest
|
||||
|
||||
### Generating test coverage reports ###
|
||||
Generate coverage reports for the whole project:
|
||||
|
||||
./gradlew reportCoverage
|
||||
|
||||
Generate coverage for a single module, i.e.:
|
||||
|
||||
./gradlew clients:reportCoverage
|
||||
|
||||
### Building a binary release gzipped tar ball ###
|
||||
./gradlew clean releaseTarGz
|
||||
|
||||
The above command will fail if you haven't set up the signing key. To bypass signing the artifact, you can run:
|
||||
|
||||
./gradlew clean releaseTarGz -x signArchives
|
||||
|
||||
The release file can be found inside `./core/build/distributions/`.
|
||||
|
||||
### Cleaning the build ###
|
||||
./gradlew clean
|
||||
|
||||
### Running a task with one of the Scala versions available (2.12.x or 2.13.x) ###
|
||||
*Note that if building the jars with a version other than 2.12.x, you need to set the `SCALA_VERSION` variable or change it in `bin/kafka-run-class.sh` to run the quick start.*
|
||||
|
||||
You can pass either the major version (eg 2.12) or the full version (eg 2.12.7):
|
||||
|
||||
./gradlew -PscalaVersion=2.12 jar
|
||||
./gradlew -PscalaVersion=2.12 test
|
||||
./gradlew -PscalaVersion=2.12 releaseTarGz
|
||||
|
||||
### Running a task with all the scala versions enabled by default ###
|
||||
|
||||
Append `All` to the task name:
|
||||
|
||||
./gradlew testAll
|
||||
./gradlew jarAll
|
||||
./gradlew releaseTarGzAll
|
||||
|
||||
### Running a task for a specific project ###
|
||||
This is for `core`, `examples` and `clients`
|
||||
|
||||
./gradlew core:jar
|
||||
./gradlew core:test
|
||||
|
||||
### Listing all gradle tasks ###
|
||||
./gradlew tasks
|
||||
|
||||
### Building IDE project ####
|
||||
*Note that this is not strictly necessary (IntelliJ IDEA has good built-in support for Gradle projects, for example).*
|
||||
|
||||
./gradlew eclipse
|
||||
./gradlew idea
|
||||
|
||||
The `eclipse` task has been configured to use `${project_dir}/build_eclipse` as Eclipse's build directory. Eclipse's default
|
||||
build directory (`${project_dir}/bin`) clashes with Kafka's scripts directory and we don't use Gradle's build directory
|
||||
to avoid known issues with this configuration.
|
||||
|
||||
### Publishing the jar for all version of Scala and for all projects to maven ###
|
||||
./gradlew uploadArchivesAll
|
||||
|
||||
Please note for this to work you should create/update `${GRADLE_USER_HOME}/gradle.properties` (typically, `~/.gradle/gradle.properties`) and assign the following variables
|
||||
|
||||
mavenUrl=
|
||||
mavenUsername=
|
||||
mavenPassword=
|
||||
signing.keyId=
|
||||
signing.password=
|
||||
signing.secretKeyRingFile=
|
||||
|
||||
### Publishing the streams quickstart archetype artifact to maven ###
|
||||
For the Streams archetype project, one cannot use gradle to upload to maven; instead the `mvn deploy` command needs to be called at the quickstart folder:
|
||||
|
||||
cd streams/quickstart
|
||||
mvn deploy
|
||||
|
||||
Please note for this to work you should create/update user maven settings (typically, `${USER_HOME}/.m2/settings.xml`) to assign the following variables
|
||||
|
||||
<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0
|
||||
https://maven.apache.org/xsd/settings-1.0.0.xsd">
|
||||
...
|
||||
<servers>
|
||||
...
|
||||
<server>
|
||||
<id>apache.snapshots.https</id>
|
||||
<username>${maven_username}</username>
|
||||
<password>${maven_password}</password>
|
||||
</server>
|
||||
<server>
|
||||
<id>apache.releases.https</id>
|
||||
<username>${maven_username}</username>
|
||||
<password>${maven_password}</password>
|
||||
</server>
|
||||
...
|
||||
</servers>
|
||||
...
|
||||
|
||||
|
||||
### Installing the jars to the local Maven repository ###
|
||||
./gradlew installAll
|
||||
|
||||
### Building the test jar ###
|
||||
./gradlew testJar
|
||||
|
||||
### Determining how transitive dependencies are added ###
|
||||
./gradlew core:dependencies --configuration runtime
|
||||
|
||||
### Determining if any dependencies could be updated ###
|
||||
./gradlew dependencyUpdates
|
||||
|
||||
### Running code quality checks ###
|
||||
There are two code quality analysis tools that we regularly run, spotbugs and checkstyle.
|
||||
|
||||
#### Checkstyle ####
|
||||
Checkstyle enforces a consistent coding style in Kafka.
|
||||
You can run checkstyle using:
|
||||
|
||||
./gradlew checkstyleMain checkstyleTest
|
||||
|
||||
The checkstyle warnings will be found in `reports/checkstyle/reports/main.html` and `reports/checkstyle/reports/test.html` files in the
|
||||
subproject build directories. They are also printed to the console. The build will fail if Checkstyle fails.
|
||||
|
||||
#### Spotbugs ####
|
||||
Spotbugs uses static analysis to look for bugs in the code.
|
||||
You can run spotbugs using:
|
||||
|
||||
./gradlew spotbugsMain spotbugsTest -x test
|
||||
|
||||
The spotbugs warnings will be found in `reports/spotbugs/main.html` and `reports/spotbugs/test.html` files in the subproject build
|
||||
directories. Use -PxmlSpotBugsReport=true to generate an XML report instead of an HTML one.
|
||||
|
||||
### Common build options ###
|
||||
|
||||
The following options should be set with a `-P` switch, for example `./gradlew -PmaxParallelForks=1 test`.
|
||||
|
||||
* `commitId`: sets the build commit ID as .git/HEAD might not be correct if there are local commits added for build purposes.
|
||||
* `mavenUrl`: sets the URL of the maven deployment repository (`file://path/to/repo` can be used to point to a local repository).
|
||||
* `maxParallelForks`: limits the maximum number of processes for each task.
|
||||
* `showStandardStreams`: shows standard out and standard error of the test JVM(s) on the console.
|
||||
* `skipSigning`: skips signing of artifacts.
|
||||
* `testLoggingEvents`: unit test events to be logged, separated by comma. For example `./gradlew -PtestLoggingEvents=started,passed,skipped,failed test`.
|
||||
* `xmlSpotBugsReport`: enable XML reports for spotBugs. This also disables HTML reports as only one can be enabled at a time.
|
||||
|
||||
### Dependency Analysis ###
|
||||
|
||||
The gradle [dependency debugging documentation](https://docs.gradle.org/current/userguide/viewing_debugging_dependencies.html) mentions using the `dependencies` or `dependencyInsight` tasks to debug dependencies for the root project or individual subprojects.
|
||||
|
||||
Alternatively, use the `allDeps` or `allDepInsight` tasks for recursively iterating through all subprojects:
|
||||
|
||||
./gradlew allDeps
|
||||
|
||||
./gradlew allDepInsight --configuration runtime --dependency com.fasterxml.jackson.core:jackson-databind
|
||||
|
||||
These take the same arguments as the builtin variants.
|
||||
|
||||
### Running system tests ###
|
||||
|
||||
See [tests/README.md](tests/README.md).
|
||||
|
||||
### Running in Vagrant ###
|
||||
|
||||
See [vagrant/README.md](vagrant/README.md).
|
||||
|
||||
### Contribution ###
|
||||
|
||||
Apache Kafka is interested in building the community; we would welcome any thoughts or [patches](https://issues.apache.org/jira/browse/KAFKA). You can reach us [on the Apache mailing lists](http://kafka.apache.org/contact.html).
|
||||
|
||||
To contribute follow the instructions here:
|
||||
* https://kafka.apache.org/contributing.html
|
||||
|
||||
189
TROGDOR.md
Normal file
189
TROGDOR.md
Normal file
@@ -0,0 +1,189 @@
|
||||
Trogdor
|
||||
========================================
|
||||
Trogdor is a test framework for Apache Kafka.
|
||||
|
||||
Trogdor can run benchmarks and other workloads. Trogdor can also inject faults in order to stress test the system.
|
||||
|
||||
Quickstart
|
||||
=========================================================
|
||||
First, we want to start a single-node Kafka cluster with a ZooKeeper and a broker.
|
||||
|
||||
Running ZooKeeper:
|
||||
|
||||
> ./bin/zookeeper-server-start.sh ./config/zookeeper.properties &> /tmp/zookeeper.log &
|
||||
|
||||
Running Kafka:
|
||||
|
||||
> ./bin/kafka-server-start.sh ./config/server.properties &> /tmp/kafka.log &
|
||||
|
||||
Then, we want to run a Trogdor Agent, plus a Trogdor Coordinator.
|
||||
|
||||
To run the Trogdor Agent:
|
||||
|
||||
> ./bin/trogdor.sh agent -c ./config/trogdor.conf -n node0 &> /tmp/trogdor-agent.log &
|
||||
|
||||
To run the Trogdor Coordinator:
|
||||
|
||||
> ./bin/trogdor.sh coordinator -c ./config/trogdor.conf -n node0 &> /tmp/trogdor-coordinator.log &
|
||||
|
||||
Let's confirm that all of the daemons are running:
|
||||
|
||||
> jps
|
||||
116212 Coordinator
|
||||
115188 QuorumPeerMain
|
||||
116571 Jps
|
||||
115420 Kafka
|
||||
115694 Agent
|
||||
|
||||
Now, we can submit a test job to Trogdor.
|
||||
|
||||
> ./bin/trogdor.sh client createTask -t localhost:8889 -i produce0 --spec ./tests/spec/simple_produce_bench.json
|
||||
Sent CreateTaskRequest for task produce0.
|
||||
|
||||
We can run showTask to see what the task's status is:
|
||||
|
||||
> ./bin/trogdor.sh client showTask -t localhost:8889 -i produce0
|
||||
Task bar of type org.apache.kafka.trogdor.workload.ProduceBenchSpec is DONE. FINISHED at 2019-01-09T20:38:22.039-08:00 after 6s
|
||||
|
||||
To see the results, we use showTask with --show-status:
|
||||
|
||||
> ./bin/trogdor.sh client showTask -t localhost:8889 -i produce0 --show-status
|
||||
Task bar of type org.apache.kafka.trogdor.workload.ProduceBenchSpec is DONE. FINISHED at 2019-01-09T20:38:22.039-08:00 after 6s
|
||||
Status: {
|
||||
"totalSent" : 50000,
|
||||
"averageLatencyMs" : 17.83388,
|
||||
"p50LatencyMs" : 12,
|
||||
"p95LatencyMs" : 75,
|
||||
"p99LatencyMs" : 96,
|
||||
"transactionsCommitted" : 0
|
||||
}
|
||||
|
||||
Trogdor Architecture
|
||||
========================================
|
||||
Trogdor has a single coordinator process which manages multiple agent processes. Each agent process is responsible for a single cluster node.
|
||||
|
||||
The Trogdor coordinator manages tasks. A task is anything we might want to do on a cluster, such as running a benchmark, injecting a fault, or running a workload. In order to implement each task, the coordinator creates workers on one or more agent nodes.
|
||||
|
||||
The Trogdor agent process implements the tasks. For example, when running a workload, the agent process is the process which produces and consumes messages.
|
||||
|
||||
Both the coordinator and the agent expose a REST interface that accepts objects serialized via JSON. There is also a command-line program which makes it easy to send messages to either one without manually crafting the JSON message body.
|
||||
|
||||
All Trogdor RPCs are idempotent except the shutdown requests. Sending an idempotent RPC twice in a row has the same effect as sending the RPC once.
|
||||
|
||||
Tasks
|
||||
========================================
|
||||
Tasks are described by specifications containing:
|
||||
|
||||
* A "class" field describing the task type. This contains a full Java class name.
|
||||
* A "startMs" field describing when the task should start. This is given in terms of milliseconds since the UNIX epoch.
|
||||
* A "durationMs" field describing how long the task should last. This is given in terms of milliseconds.
|
||||
* Other fields which are task-specific.
|
||||
|
||||
The task specification is usually written as JSON. For example, this task specification describes a network partition between nodes 1 and 2, and 3:
|
||||
|
||||
{
|
||||
"class": "org.apache.kafka.trogdor.fault.NetworkPartitionFaultSpec",
|
||||
"startMs": 1000,
|
||||
"durationMs": 30000,
|
||||
"partitions": [["node1", "node2"], ["node3"]]
|
||||
}
|
||||
|
||||
This task runs a simple ProduceBench test on a cluster with one producer node, 5 topics, and 10,000 messages per second.
|
||||
The keys are generated sequentially and the configured partitioner (DefaultPartitioner) is used.
|
||||
|
||||
{
|
||||
"class": "org.apache.kafka.trogdor.workload.ProduceBenchSpec",
|
||||
"durationMs": 10000000,
|
||||
"producerNode": "node0",
|
||||
"bootstrapServers": "localhost:9092",
|
||||
"targetMessagesPerSec": 10000,
|
||||
"maxMessages": 50000,
|
||||
"activeTopics": {
|
||||
"foo[1-3]": {
|
||||
"numPartitions": 10,
|
||||
"replicationFactor": 1
|
||||
}
|
||||
},
|
||||
"inactiveTopics": {
|
||||
"foo[4-5]": {
|
||||
"numPartitions": 10,
|
||||
"replicationFactor": 1
|
||||
}
|
||||
},
|
||||
"keyGenerator": {
|
||||
"type": "sequential",
|
||||
"size": 8,
|
||||
"offset": 1
|
||||
},
|
||||
"useConfiguredPartitioner": true
|
||||
}
|
||||
|
||||
Tasks are submitted to the coordinator. Once the coordinator determines that it is time for the task to start, it creates workers on agent processes. The workers run until the task is done.
|
||||
|
||||
Task specifications are immutable; they do not change after the task has been created.
|
||||
|
||||
Tasks can be in several states:
|
||||
* PENDING, when task is waiting to execute,
|
||||
* RUNNING, when the task is running,
|
||||
* STOPPING, when the task is in the process of stopping,
|
||||
* DONE, when the task is done.
|
||||
|
||||
Tasks that are DONE also have an error field which will be set if the task failed.
|
||||
|
||||
Workloads
|
||||
========================================
|
||||
Trogdor can run several workloads. Workloads perform operations on the cluster and measure their performance. Workloads fail when the operations cannot be performed.
|
||||
|
||||
### ProduceBench
|
||||
ProduceBench starts a Kafka producer on a single agent node, producing to several partitions. The workload measures the average produce latency, as well as the median, 95th percentile, and 99th percentile latency.
|
||||
It can be configured to use a transactional producer which can commit transactions based on a set time interval or number of messages.
|
||||
|
||||
### RoundTripWorkload
|
||||
RoundTripWorkload tests both production and consumption. The workload starts a Kafka producer and consumer on a single node. The consumer will read back the messages that were produced by the producer.
|
||||
|
||||
### ConsumeBench
|
||||
ConsumeBench starts one or more Kafka consumers on a single agent node. Depending on the passed in configuration (see ConsumeBenchSpec), the consumers either subscribe to a set of topics (leveraging consumer group functionality and dynamic partition assignment) or manually assign partitions to themselves.
|
||||
The workload measures the average produce latency, as well as the median, 95th percentile, and 99th percentile latency.
|
||||
|
||||
Faults
|
||||
========================================
|
||||
Trogdor can run several faults which deliberately break something in the cluster.
|
||||
|
||||
### ProcessStopFault
|
||||
ProcessStopFault stops a process by sending it a SIGSTOP signal. When the fault ends, the process is resumed with SIGCONT.
|
||||
|
||||
### NetworkPartitionFault
|
||||
NetworkPartitionFault sets up an artificial network partition between one or more sets of nodes. Currently, this is implemented using iptables. The iptables rules are set up on the outbound traffic from the affected nodes. Therefore, the affected nodes should still be reachable from outside the cluster.
|
||||
|
||||
External Processes
|
||||
========================================
|
||||
Trogdor supports running arbitrary commands in external processes. This is a generic way to run any configurable command in the Trogdor framework - be it a Python program, bash script, docker image, etc.
|
||||
|
||||
### ExternalCommandWorker
|
||||
ExternalCommandWorker starts an external command defined by the ExternalCommandSpec. It essentially allows you to run any command on any Trogdor agent node.
|
||||
The worker communicates with the external process via its stdin, stdout and stderr in a JSON protocol. It uses stdout for any actionable communication and only logs what it sees in stderr.
|
||||
On startup the worker will first send a message describing the workload to the external process in this format:
|
||||
```
|
||||
{"id":<task ID string>, "workload":<configured workload JSON object>}
|
||||
```
|
||||
and will then listen for messages from the external process, again in a JSON format.
|
||||
Said JSON can contain the following fields:
|
||||
- status: If the object contains this field, the status of the worker will be set to the given value.
|
||||
- error: If the object contains this field, the error of the worker will be set to the given value. Once an error occurs, the external process will be terminated.
|
||||
- log: If the object contains this field, a log message will be issued with this text.
|
||||
An example:
|
||||
```json
|
||||
{"log": "Finished successfully.", "status": {"p99ProduceLatency": "100ms", "messagesSent": 10000}}
|
||||
```
|
||||
|
||||
Exec Mode
|
||||
========================================
|
||||
Sometimes, you just want to run a test quickly on a single node. In this case, you can use "exec mode." This mode allows you to run a single Trogdor Agent without a Coordinator.
|
||||
|
||||
When using exec mode, you must pass in a Task specification to use. The Agent will try to start this task.
|
||||
|
||||
For example:
|
||||
|
||||
> ./bin/trogdor.sh agent -n node0 -c ./config/trogdor.conf --exec ./tests/spec/simple_produce_bench.json
|
||||
|
||||
When using exec mode, the Agent will exit once the task is complete.
|
||||
199
Vagrantfile
vendored
Normal file
199
Vagrantfile
vendored
Normal file
@@ -0,0 +1,199 @@
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
require 'socket'
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
# General config
|
||||
enable_dns = false
|
||||
# Override to false when bringing up a cluster on AWS
|
||||
enable_hostmanager = true
|
||||
enable_jmx = false
|
||||
num_zookeepers = 1
|
||||
num_brokers = 3
|
||||
num_workers = 0 # Generic workers that get the code, but don't start any services
|
||||
ram_megabytes = 1280
|
||||
base_box = "ubuntu/trusty64"
|
||||
|
||||
# EC2
|
||||
ec2_access_key = ENV['AWS_ACCESS_KEY']
|
||||
ec2_secret_key = ENV['AWS_SECRET_KEY']
|
||||
ec2_session_token = ENV['AWS_SESSION_TOKEN']
|
||||
ec2_keypair_name = nil
|
||||
ec2_keypair_file = nil
|
||||
|
||||
ec2_region = "us-east-1"
|
||||
ec2_az = nil # Uses set by AWS
|
||||
ec2_ami = "ami-29ebb519"
|
||||
ec2_instance_type = "m3.medium"
|
||||
ec2_spot_instance = ENV['SPOT_INSTANCE'] ? ENV['SPOT_INSTANCE'] == 'true' : true
|
||||
ec2_spot_max_price = "0.113" # On-demand price for instance type
|
||||
ec2_user = "ubuntu"
|
||||
ec2_instance_name_prefix = "kafka-vagrant"
|
||||
ec2_security_groups = nil
|
||||
ec2_subnet_id = nil
|
||||
# Only override this by setting it to false if you're running in a VPC and you
|
||||
# are running Vagrant from within that VPC as well.
|
||||
ec2_associate_public_ip = nil
|
||||
|
||||
jdk_major = '8'
|
||||
jdk_full = '8u202-linux-x64'
|
||||
|
||||
local_config_file = File.join(File.dirname(__FILE__), "Vagrantfile.local")
|
||||
if File.exists?(local_config_file) then
|
||||
eval(File.read(local_config_file), binding, "Vagrantfile.local")
|
||||
end
|
||||
|
||||
# TODO(ksweeney): RAM requirements are not empirical and can probably be significantly lowered.
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.hostmanager.enabled = enable_hostmanager
|
||||
config.hostmanager.manage_host = enable_dns
|
||||
config.hostmanager.include_offline = false
|
||||
|
||||
## Provider-specific global configs
|
||||
config.vm.provider :virtualbox do |vb,override|
|
||||
override.vm.box = base_box
|
||||
|
||||
override.hostmanager.ignore_private_ip = false
|
||||
|
||||
# Brokers started with the standard script currently set Xms and Xmx to 1G,
|
||||
# plus we need some extra head room.
|
||||
vb.customize ["modifyvm", :id, "--memory", ram_megabytes.to_s]
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-cachier")
|
||||
override.cache.scope = :box
|
||||
end
|
||||
end
|
||||
|
||||
config.vm.provider :aws do |aws,override|
|
||||
# The "box" is specified as an AMI
|
||||
override.vm.box = "dummy"
|
||||
override.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
|
||||
|
||||
cached_addresses = {}
|
||||
# Use a custom resolver that SSH's into the machine and finds the IP address
|
||||
# directly. This lets us get at the private IP address directly, avoiding
|
||||
# some issues with using the default IP resolver, which uses the public IP
|
||||
# address.
|
||||
override.hostmanager.ip_resolver = proc do |vm, resolving_vm|
|
||||
if !cached_addresses.has_key?(vm.name)
|
||||
state_id = vm.state.id
|
||||
if state_id != :not_created && state_id != :stopped && vm.communicate.ready?
|
||||
contents = ''
|
||||
vm.communicate.execute("/sbin/ifconfig eth0 | grep 'inet addr' | tail -n 1 | egrep -o '[0-9\.]+' | head -n 1 2>&1") do |type, data|
|
||||
contents << data
|
||||
end
|
||||
cached_addresses[vm.name] = contents.split("\n").first[/(\d+\.\d+\.\d+\.\d+)/, 1]
|
||||
else
|
||||
cached_addresses[vm.name] = nil
|
||||
end
|
||||
end
|
||||
cached_addresses[vm.name]
|
||||
end
|
||||
|
||||
override.ssh.username = ec2_user
|
||||
override.ssh.private_key_path = ec2_keypair_file
|
||||
|
||||
aws.access_key_id = ec2_access_key
|
||||
aws.secret_access_key = ec2_secret_key
|
||||
aws.session_token = ec2_session_token
|
||||
aws.keypair_name = ec2_keypair_name
|
||||
|
||||
aws.region = ec2_region
|
||||
aws.availability_zone = ec2_az
|
||||
aws.instance_type = ec2_instance_type
|
||||
aws.ami = ec2_ami
|
||||
aws.security_groups = ec2_security_groups
|
||||
aws.subnet_id = ec2_subnet_id
|
||||
# If a subnet is specified, default to turning on a public IP unless the
|
||||
# user explicitly specifies the option. Without a public IP, Vagrant won't
|
||||
# be able to SSH into the hosts unless Vagrant is also running in the VPC.
|
||||
if ec2_associate_public_ip.nil?
|
||||
aws.associate_public_ip = true unless ec2_subnet_id.nil?
|
||||
else
|
||||
aws.associate_public_ip = ec2_associate_public_ip
|
||||
end
|
||||
aws.region_config ec2_region do |region|
|
||||
region.spot_instance = ec2_spot_instance
|
||||
region.spot_max_price = ec2_spot_max_price
|
||||
end
|
||||
|
||||
# Exclude some directories that can grow very large from syncing
|
||||
override.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__exclude: ['.git', 'core/data/', 'logs/', 'tests/results/', 'results/']
|
||||
end
|
||||
|
||||
def name_node(node, name, ec2_instance_name_prefix)
|
||||
node.vm.hostname = name
|
||||
node.vm.provider :aws do |aws|
|
||||
aws.tags = {
|
||||
'Name' => ec2_instance_name_prefix + "-" + Socket.gethostname + "-" + name,
|
||||
'JenkinsBuildUrl' => ENV['BUILD_URL']
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
def assign_local_ip(node, ip_address)
|
||||
node.vm.provider :virtualbox do |vb,override|
|
||||
override.vm.network :private_network, ip: ip_address
|
||||
end
|
||||
end
|
||||
|
||||
## Cluster definition
|
||||
zookeepers = []
|
||||
(1..num_zookeepers).each { |i|
|
||||
name = "zk" + i.to_s
|
||||
zookeepers.push(name)
|
||||
config.vm.define name do |zookeeper|
|
||||
name_node(zookeeper, name, ec2_instance_name_prefix)
|
||||
ip_address = "192.168.50." + (10 + i).to_s
|
||||
assign_local_ip(zookeeper, ip_address)
|
||||
zookeeper.vm.provision "shell", path: "vagrant/base.sh", env: {"JDK_MAJOR" => jdk_major, "JDK_FULL" => jdk_full}
|
||||
zk_jmx_port = enable_jmx ? (8000 + i).to_s : ""
|
||||
zookeeper.vm.provision "shell", path: "vagrant/zk.sh", :args => [i.to_s, num_zookeepers, zk_jmx_port]
|
||||
end
|
||||
}
|
||||
|
||||
(1..num_brokers).each { |i|
|
||||
name = "broker" + i.to_s
|
||||
config.vm.define name do |broker|
|
||||
name_node(broker, name, ec2_instance_name_prefix)
|
||||
ip_address = "192.168.50." + (50 + i).to_s
|
||||
assign_local_ip(broker, ip_address)
|
||||
# We need to be careful about what we list as the publicly routable
|
||||
# address since this is registered in ZK and handed out to clients. If
|
||||
# host DNS isn't setup, we shouldn't use hostnames -- IP addresses must be
|
||||
# used to support clients running on the host.
|
||||
zookeeper_connect = zookeepers.map{ |zk_addr| zk_addr + ":2181"}.join(",")
|
||||
broker.vm.provision "shell", path: "vagrant/base.sh", env: {"JDK_MAJOR" => jdk_major, "JDK_FULL" => jdk_full}
|
||||
kafka_jmx_port = enable_jmx ? (9000 + i).to_s : ""
|
||||
broker.vm.provision "shell", path: "vagrant/broker.sh", :args => [i.to_s, enable_dns ? name : ip_address, zookeeper_connect, kafka_jmx_port]
|
||||
end
|
||||
}
|
||||
|
||||
(1..num_workers).each { |i|
|
||||
name = "worker" + i.to_s
|
||||
config.vm.define name do |worker|
|
||||
name_node(worker, name, ec2_instance_name_prefix)
|
||||
ip_address = "192.168.50." + (100 + i).to_s
|
||||
assign_local_ip(worker, ip_address)
|
||||
worker.vm.provision "shell", path: "vagrant/base.sh", env: {"JDK_MAJOR" => jdk_major, "JDK_FULL" => jdk_full}
|
||||
end
|
||||
}
|
||||
|
||||
end
|
||||
45
bin/connect-distributed.sh
Executable file
45
bin/connect-distributed.sh
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/bin/sh
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ $# -lt 1 ];
|
||||
then
|
||||
echo "USAGE: $0 [-daemon] connect-distributed.properties"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
base_dir=$(dirname $0)
|
||||
|
||||
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
|
||||
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties"
|
||||
fi
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G"
|
||||
fi
|
||||
|
||||
EXTRA_ARGS=${EXTRA_ARGS-'-name connectDistributed'}
|
||||
|
||||
COMMAND=$1
|
||||
case $COMMAND in
|
||||
-daemon)
|
||||
EXTRA_ARGS="-daemon "$EXTRA_ARGS
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectDistributed "$@"
|
||||
45
bin/connect-mirror-maker.sh
Executable file
45
bin/connect-mirror-maker.sh
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/bin/sh
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ $# -lt 1 ];
|
||||
then
|
||||
echo "USAGE: $0 [-daemon] mm2.properties"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
base_dir=$(dirname $0)
|
||||
|
||||
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
|
||||
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties"
|
||||
fi
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G"
|
||||
fi
|
||||
|
||||
EXTRA_ARGS=${EXTRA_ARGS-'-name mirrorMaker'}
|
||||
|
||||
COMMAND=$1
|
||||
case $COMMAND in
|
||||
-daemon)
|
||||
EXTRA_ARGS="-daemon "$EXTRA_ARGS
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.mirror.MirrorMaker "$@"
|
||||
45
bin/connect-standalone.sh
Executable file
45
bin/connect-standalone.sh
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/bin/sh
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ $# -lt 1 ];
|
||||
then
|
||||
echo "USAGE: $0 [-daemon] connect-standalone.properties"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
base_dir=$(dirname $0)
|
||||
|
||||
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
|
||||
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/connect-log4j.properties"
|
||||
fi
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xms256M -Xmx2G"
|
||||
fi
|
||||
|
||||
EXTRA_ARGS=${EXTRA_ARGS-'-name connectStandalone'}
|
||||
|
||||
COMMAND=$1
|
||||
case $COMMAND in
|
||||
-daemon)
|
||||
EXTRA_ARGS="-daemon "$EXTRA_ARGS
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh $EXTRA_ARGS org.apache.kafka.connect.cli.ConnectStandalone "$@"
|
||||
17
bin/kafka-acls.sh
Executable file
17
bin/kafka-acls.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.AclCommand "$@"
|
||||
17
bin/kafka-broker-api-versions.sh
Executable file
17
bin/kafka-broker-api-versions.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.BrokerApiVersionsCommand "$@"
|
||||
17
bin/kafka-configs.sh
Executable file
17
bin/kafka-configs.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.ConfigCommand "$@"
|
||||
21
bin/kafka-console-consumer.sh
Executable file
21
bin/kafka-console-consumer.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M"
|
||||
fi
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleConsumer "$@"
|
||||
20
bin/kafka-console-producer.sh
Executable file
20
bin/kafka-console-producer.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M"
|
||||
fi
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleProducer "$@"
|
||||
17
bin/kafka-consumer-groups.sh
Executable file
17
bin/kafka-consumer-groups.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.ConsumerGroupCommand "$@"
|
||||
20
bin/kafka-consumer-perf-test.sh
Executable file
20
bin/kafka-consumer-perf-test.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M"
|
||||
fi
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsumerPerformance "$@"
|
||||
17
bin/kafka-delegation-tokens.sh
Executable file
17
bin/kafka-delegation-tokens.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.DelegationTokenCommand "$@"
|
||||
17
bin/kafka-delete-records.sh
Executable file
17
bin/kafka-delete-records.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.DeleteRecordsCommand "$@"
|
||||
18
bin/kafka-diskload-protector.sh
Executable file
18
bin/kafka-diskload-protector.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.DiskLoadProtectorCommand "$@"
|
||||
|
||||
17
bin/kafka-dump-log.sh
Executable file
17
bin/kafka-dump-log.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.tools.DumpLogSegments "$@"
|
||||
18
bin/kafka-exmetrics.sh
Executable file
18
bin/kafka-exmetrics.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.KafkaExMetricsCommand "$@"
|
||||
|
||||
17
bin/kafka-leader-election.sh
Executable file
17
bin/kafka-leader-election.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.LeaderElectionCommand "$@"
|
||||
17
bin/kafka-log-dirs.sh
Executable file
17
bin/kafka-log-dirs.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.LogDirsCommand "$@"
|
||||
17
bin/kafka-mirror-maker.sh
Executable file
17
bin/kafka-mirror-maker.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.tools.MirrorMaker "$@"
|
||||
17
bin/kafka-preferred-replica-election.sh
Executable file
17
bin/kafka-preferred-replica-election.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.PreferredReplicaLeaderElectionCommand "$@"
|
||||
20
bin/kafka-producer-perf-test.sh
Executable file
20
bin/kafka-producer-perf-test.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M"
|
||||
fi
|
||||
exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.ProducerPerformance "$@"
|
||||
17
bin/kafka-reassign-partitions.sh
Executable file
17
bin/kafka-reassign-partitions.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.ReassignPartitionsCommand "$@"
|
||||
17
bin/kafka-replica-verification.sh
Executable file
17
bin/kafka-replica-verification.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.tools.ReplicaVerificationTool "$@"
|
||||
316
bin/kafka-run-class.sh
Executable file
316
bin/kafka-run-class.sh
Executable file
@@ -0,0 +1,316 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ $# -lt 1 ];
|
||||
then
|
||||
echo "USAGE: $0 [-daemon] [-name servicename] [-loggc] classname [opts]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# CYGWIN == 1 if Cygwin is detected, else 0.
|
||||
if [[ $(uname -a) =~ "CYGWIN" ]]; then
|
||||
CYGWIN=1
|
||||
else
|
||||
CYGWIN=0
|
||||
fi
|
||||
|
||||
if [ -z "$INCLUDE_TEST_JARS" ]; then
|
||||
INCLUDE_TEST_JARS=false
|
||||
fi
|
||||
|
||||
# Exclude jars not necessary for running commands.
|
||||
regex="(-(test|test-sources|src|scaladoc|javadoc)\.jar|jar.asc)$"
|
||||
should_include_file() {
|
||||
if [ "$INCLUDE_TEST_JARS" = true ]; then
|
||||
return 0
|
||||
fi
|
||||
file=$1
|
||||
if [ -z "$(echo "$file" | egrep "$regex")" ] ; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
base_dir=$(dirname $0)/..
|
||||
|
||||
if [ -z "$SCALA_VERSION" ]; then
|
||||
SCALA_VERSION=2.12.10
|
||||
fi
|
||||
|
||||
if [ -z "$SCALA_BINARY_VERSION" ]; then
|
||||
SCALA_BINARY_VERSION=$(echo $SCALA_VERSION | cut -f 1-2 -d '.')
|
||||
fi
|
||||
|
||||
# run ./gradlew copyDependantLibs to get all dependant jars in a local dir
|
||||
shopt -s nullglob
|
||||
if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
|
||||
for dir in "$base_dir"/core/build/dependant-libs-${SCALA_VERSION}*;
|
||||
do
|
||||
CLASSPATH="$CLASSPATH:$dir/*"
|
||||
done
|
||||
fi
|
||||
|
||||
for file in "$base_dir"/examples/build/libs/kafka-examples*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
|
||||
clients_lib_dir=$(dirname $0)/../clients/build/libs
|
||||
streams_lib_dir=$(dirname $0)/../streams/build/libs
|
||||
streams_dependant_clients_lib_dir=$(dirname $0)/../streams/build/dependant-libs-${SCALA_VERSION}
|
||||
else
|
||||
clients_lib_dir=/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs
|
||||
streams_lib_dir=$clients_lib_dir
|
||||
streams_dependant_clients_lib_dir=$streams_lib_dir
|
||||
fi
|
||||
|
||||
|
||||
for file in "$clients_lib_dir"/kafka-clients*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
|
||||
for file in "$streams_lib_dir"/kafka-streams*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -z "$UPGRADE_KAFKA_STREAMS_TEST_VERSION" ]; then
|
||||
for file in "$base_dir"/streams/examples/build/libs/kafka-streams-examples*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
else
|
||||
VERSION_NO_DOTS=`echo $UPGRADE_KAFKA_STREAMS_TEST_VERSION | sed 's/\.//g'`
|
||||
SHORT_VERSION_NO_DOTS=${VERSION_NO_DOTS:0:((${#VERSION_NO_DOTS} - 1))} # remove last char, ie, bug-fix number
|
||||
for file in "$base_dir"/streams/upgrade-system-tests-$SHORT_VERSION_NO_DOTS/build/libs/kafka-streams-upgrade-system-tests*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$file":"$CLASSPATH"
|
||||
fi
|
||||
done
|
||||
if [ "$SHORT_VERSION_NO_DOTS" = "0100" ]; then
|
||||
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.8.jar":"$CLASSPATH"
|
||||
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.6.jar":"$CLASSPATH"
|
||||
fi
|
||||
if [ "$SHORT_VERSION_NO_DOTS" = "0101" ]; then
|
||||
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zkclient-0.9.jar":"$CLASSPATH"
|
||||
CLASSPATH="/opt/kafka-$UPGRADE_KAFKA_STREAMS_TEST_VERSION/libs/zookeeper-3.4.8.jar":"$CLASSPATH"
|
||||
fi
|
||||
fi
|
||||
|
||||
for file in "$streams_dependant_clients_lib_dir"/rocksdb*.jar;
|
||||
do
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
done
|
||||
|
||||
for file in "$streams_dependant_clients_lib_dir"/*hamcrest*.jar;
|
||||
do
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
done
|
||||
|
||||
for file in "$base_dir"/tools/build/libs/kafka-tools*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
|
||||
for dir in "$base_dir"/tools/build/dependant-libs-${SCALA_VERSION}*;
|
||||
do
|
||||
CLASSPATH="$CLASSPATH:$dir/*"
|
||||
done
|
||||
|
||||
for cc_pkg in "api" "transforms" "runtime" "file" "mirror" "mirror-client" "json" "tools" "basic-auth-extension"
|
||||
do
|
||||
for file in "$base_dir"/connect/${cc_pkg}/build/libs/connect-${cc_pkg}*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
if [ -d "$base_dir/connect/${cc_pkg}/build/dependant-libs" ] ; then
|
||||
CLASSPATH="$CLASSPATH:$base_dir/connect/${cc_pkg}/build/dependant-libs/*"
|
||||
fi
|
||||
done
|
||||
|
||||
# classpath addition for release
|
||||
for file in "$base_dir"/libs/*;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
|
||||
for file in "$base_dir"/core/build/libs/kafka_${SCALA_BINARY_VERSION}*.jar;
|
||||
do
|
||||
if should_include_file "$file"; then
|
||||
CLASSPATH="$CLASSPATH":"$file"
|
||||
fi
|
||||
done
|
||||
shopt -u nullglob
|
||||
|
||||
if [ -z "$CLASSPATH" ] ; then
|
||||
echo "Classpath is empty. Please build the project first e.g. by running './gradlew jar -PscalaVersion=$SCALA_VERSION'"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# JMX settings
|
||||
if [ -z "$KAFKA_JMX_OPTS" ]; then
|
||||
KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false "
|
||||
fi
|
||||
|
||||
# JMX port to use
|
||||
if [ $JMX_PORT ]; then
|
||||
KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
|
||||
fi
|
||||
|
||||
# Log directory to use
|
||||
if [ "x$LOG_DIR" = "x" ]; then
|
||||
LOG_DIR="$base_dir/logs"
|
||||
fi
|
||||
|
||||
# Log4j settings
|
||||
if [ -z "$KAFKA_LOG4J_OPTS" ]; then
|
||||
# Log to console. This is a tool.
|
||||
LOG4J_DIR="$base_dir/config/tools-log4j.properties"
|
||||
# If Cygwin is detected, LOG4J_DIR is converted to Windows format.
|
||||
(( CYGWIN )) && LOG4J_DIR=$(cygpath --path --mixed "${LOG4J_DIR}")
|
||||
KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_DIR}"
|
||||
else
|
||||
# create logs directory
|
||||
if [ ! -d "$LOG_DIR" ]; then
|
||||
mkdir -p "$LOG_DIR"
|
||||
fi
|
||||
fi
|
||||
|
||||
# If Cygwin is detected, LOG_DIR is converted to Windows format.
|
||||
(( CYGWIN )) && LOG_DIR=$(cygpath --path --mixed "${LOG_DIR}")
|
||||
KAFKA_LOG4J_OPTS="-Dkafka.logs.dir=$LOG_DIR $KAFKA_LOG4J_OPTS"
|
||||
|
||||
# Generic jvm settings you want to add
|
||||
if [ -z "$KAFKA_OPTS" ]; then
|
||||
KAFKA_OPTS=""
|
||||
fi
|
||||
|
||||
# Set Debug options if enabled
|
||||
if [ "x$KAFKA_DEBUG" != "x" ]; then
|
||||
|
||||
# Use default ports
|
||||
DEFAULT_JAVA_DEBUG_PORT="5005"
|
||||
|
||||
if [ -z "$JAVA_DEBUG_PORT" ]; then
|
||||
JAVA_DEBUG_PORT="$DEFAULT_JAVA_DEBUG_PORT"
|
||||
fi
|
||||
|
||||
# Use the defaults if JAVA_DEBUG_OPTS was not set
|
||||
DEFAULT_JAVA_DEBUG_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=${DEBUG_SUSPEND_FLAG:-n},address=0.0.0.0:$JAVA_DEBUG_PORT"
|
||||
if [ -z "$JAVA_DEBUG_OPTS" ]; then
|
||||
JAVA_DEBUG_OPTS="$DEFAULT_JAVA_DEBUG_OPTS"
|
||||
fi
|
||||
|
||||
echo "Enabling Java debug options: $JAVA_DEBUG_OPTS"
|
||||
KAFKA_OPTS="$JAVA_DEBUG_OPTS $KAFKA_OPTS"
|
||||
fi
|
||||
|
||||
# Which java to use
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
JAVA="java"
|
||||
else
|
||||
JAVA="$JAVA_HOME/bin/java"
|
||||
fi
|
||||
|
||||
# Memory options
|
||||
if [ -z "$KAFKA_HEAP_OPTS" ]; then
|
||||
KAFKA_HEAP_OPTS="-Xmx256M"
|
||||
fi
|
||||
|
||||
# JVM performance options
|
||||
# MaxInlineLevel=15 is the default since JDK 14 and can be removed once older JDKs are no longer supported
|
||||
if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then
|
||||
KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:InitiatingHeapOccupancyPercent=35 -XX:G1HeapRegionSize=16m -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 -Djava.awt.headless=true"
|
||||
fi
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
COMMAND=$1
|
||||
case $COMMAND in
|
||||
-name)
|
||||
DAEMON_NAME=$2
|
||||
CONSOLE_OUTPUT_FILE=$LOG_DIR/$DAEMON_NAME.out
|
||||
shift 2
|
||||
;;
|
||||
-loggc)
|
||||
if [ -z "$KAFKA_GC_LOG_OPTS" ]; then
|
||||
GC_LOG_ENABLED="true"
|
||||
fi
|
||||
shift
|
||||
;;
|
||||
-daemon)
|
||||
DAEMON_MODE="true"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# GC options
|
||||
GC_FILE_SUFFIX='-gc.log'
|
||||
GC_LOG_FILE_NAME=''
|
||||
if [ "x$GC_LOG_ENABLED" = "xtrue" ]; then
|
||||
GC_LOG_FILE_NAME=$DAEMON_NAME$GC_FILE_SUFFIX
|
||||
|
||||
# The first segment of the version number, which is '1' for releases before Java 9
|
||||
# it then becomes '9', '10', ...
|
||||
# Some examples of the first line of `java --version`:
|
||||
# 8 -> java version "1.8.0_152"
|
||||
# 9.0.4 -> java version "9.0.4"
|
||||
# 10 -> java version "10" 2018-03-20
|
||||
# 10.0.1 -> java version "10.0.1" 2018-04-17
|
||||
# We need to match to the end of the line to prevent sed from printing the characters that do not match
|
||||
JAVA_MAJOR_VERSION=$($JAVA -version 2>&1 | sed -E -n 's/.* version "([0-9]*).*$/\1/p')
|
||||
if [[ "$JAVA_MAJOR_VERSION" -ge "9" ]] ; then
|
||||
KAFKA_GC_LOG_OPTS="-Xlog:gc*:file=$LOG_DIR/$GC_LOG_FILE_NAME:time"
|
||||
else
|
||||
KAFKA_GC_LOG_OPTS="-Xloggc:$LOG_DIR/$GC_LOG_FILE_NAME -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=100M"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Remove a possible colon prefix from the classpath (happens at lines like `CLASSPATH="$CLASSPATH:$file"` when CLASSPATH is blank)
|
||||
# Syntax used on the right side is native Bash string manipulation; for more details see
|
||||
# http://tldp.org/LDP/abs/html/string-manipulation.html, specifically the section titled "Substring Removal"
|
||||
CLASSPATH=${CLASSPATH#:}
|
||||
|
||||
# If Cygwin is detected, classpath is converted to Windows format.
|
||||
(( CYGWIN )) && CLASSPATH=$(cygpath --path --mixed "${CLASSPATH}")
|
||||
|
||||
# Launch mode
|
||||
if [ "x$DAEMON_MODE" = "xtrue" ]; then
|
||||
nohup $JAVA $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp $CLASSPATH $KAFKA_OPTS "$@" > "$CONSOLE_OUTPUT_FILE" 2>&1 < /dev/null &
|
||||
else
|
||||
exec $JAVA $KAFKA_HEAP_OPTS $KAFKA_JVM_PERFORMANCE_OPTS $KAFKA_GC_LOG_OPTS $KAFKA_JMX_OPTS $KAFKA_LOG4J_OPTS -cp $CLASSPATH $KAFKA_OPTS "$@"
|
||||
fi
|
||||
51
bin/kafka-server-start.sh
Executable file
51
bin/kafka-server-start.sh
Executable file
@@ -0,0 +1,51 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ $# -lt 1 ];
|
||||
then
|
||||
echo "USAGE: $0 [-daemon] server.properties [--override property=value]*"
|
||||
exit 1
|
||||
fi
|
||||
base_dir=$(dirname $0)
|
||||
|
||||
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
|
||||
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
|
||||
fi
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx8G -Xms8G"
|
||||
export JMX_PORT=8099
|
||||
#export KAFKA_DEBUG=debug
|
||||
#export DAEMON_MODE=true
|
||||
export KAFKA_OPTS="-Djava.security.auth.login.config=$base_dir/../config/kafka_server_jaas.conf"
|
||||
export DEBUG_SUSPEND_FLAG="n"
|
||||
export JAVA_DEBUG_PORT="8096"
|
||||
export GC_LOG_ENABLED=true
|
||||
fi
|
||||
|
||||
EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'}
|
||||
|
||||
COMMAND=$1
|
||||
case $COMMAND in
|
||||
-daemon)
|
||||
EXTRA_ARGS="-daemon "$EXTRA_ARGS
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
exec $base_dir/kafka-run-class.sh $EXTRA_ARGS kafka.Kafka "$@"
|
||||
24
bin/kafka-server-stop.sh
Executable file
24
bin/kafka-server-stop.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/sh
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
SIGNAL=${SIGNAL:-TERM}
|
||||
PIDS=$(ps ax | grep -i 'kafka\.Kafka' | grep java | grep -v grep | awk '{print $1}')
|
||||
|
||||
if [ -z "$PIDS" ]; then
|
||||
echo "No kafka server to stop"
|
||||
exit 1
|
||||
else
|
||||
kill -s $SIGNAL $PIDS
|
||||
fi
|
||||
21
bin/kafka-streams-application-reset.sh
Executable file
21
bin/kafka-streams-application-reset.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M"
|
||||
fi
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.tools.StreamsResetter "$@"
|
||||
17
bin/kafka-topics.sh
Executable file
17
bin/kafka-topics.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.TopicCommand "$@"
|
||||
20
bin/kafka-verifiable-consumer.sh
Executable file
20
bin/kafka-verifiable-consumer.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M"
|
||||
fi
|
||||
exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.VerifiableConsumer "$@"
|
||||
20
bin/kafka-verifiable-producer.sh
Executable file
20
bin/kafka-verifiable-producer.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M"
|
||||
fi
|
||||
exec $(dirname $0)/kafka-run-class.sh org.apache.kafka.tools.VerifiableProducer "$@"
|
||||
50
bin/trogdor.sh
Executable file
50
bin/trogdor.sh
Executable file
@@ -0,0 +1,50 @@
|
||||
#!/usr/bin/env bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
The Trogdor fault injector.
|
||||
|
||||
Usage:
|
||||
$0 [action] [options]
|
||||
|
||||
Actions:
|
||||
agent: Run the trogdor agent.
|
||||
coordinator: Run the trogdor coordinator.
|
||||
client: Run the client which communicates with the trogdor coordinator.
|
||||
agent-client: Run the client which communicates with the trogdor agent.
|
||||
help: This help message.
|
||||
EOF
|
||||
}
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
action="${1}"
|
||||
shift
|
||||
CLASS=""
|
||||
case ${action} in
|
||||
agent) CLASS="org.apache.kafka.trogdor.agent.Agent";;
|
||||
coordinator) CLASS="org.apache.kafka.trogdor.coordinator.Coordinator";;
|
||||
client) CLASS="org.apache.kafka.trogdor.coordinator.CoordinatorClient";;
|
||||
agent-client) CLASS="org.apache.kafka.trogdor.agent.AgentClient";;
|
||||
help) usage; exit 0;;
|
||||
*) echo "Unknown action '${action}'. Type '$0 help' for help."; exit 1;;
|
||||
esac
|
||||
|
||||
export INCLUDE_TEST_JARS=1
|
||||
exec $(dirname $0)/kafka-run-class.sh "${CLASS}" "$@"
|
||||
34
bin/windows/connect-distributed.bat
Normal file
34
bin/windows/connect-distributed.bat
Normal file
@@ -0,0 +1,34 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
IF [%1] EQU [] (
|
||||
echo USAGE: %0 connect-distributed.properties
|
||||
EXIT /B 1
|
||||
)
|
||||
|
||||
SetLocal
|
||||
rem Using pushd popd to set BASE_DIR to the absolute path
|
||||
pushd %~dp0..\..
|
||||
set BASE_DIR=%CD%
|
||||
popd
|
||||
|
||||
rem Log4j settings
|
||||
IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
|
||||
set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/tools-log4j.properties
|
||||
)
|
||||
|
||||
"%~dp0kafka-run-class.bat" org.apache.kafka.connect.cli.ConnectDistributed %*
|
||||
EndLocal
|
||||
34
bin/windows/connect-standalone.bat
Normal file
34
bin/windows/connect-standalone.bat
Normal file
@@ -0,0 +1,34 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
IF [%1] EQU [] (
|
||||
echo USAGE: %0 connect-standalone.properties
|
||||
EXIT /B 1
|
||||
)
|
||||
|
||||
SetLocal
|
||||
rem Using pushd popd to set BASE_DIR to the absolute path
|
||||
pushd %~dp0..\..
|
||||
set BASE_DIR=%CD%
|
||||
popd
|
||||
|
||||
rem Log4j settings
|
||||
IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
|
||||
set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/tools-log4j.properties
|
||||
)
|
||||
|
||||
"%~dp0kafka-run-class.bat" org.apache.kafka.connect.cli.ConnectStandalone %*
|
||||
EndLocal
|
||||
17
bin/windows/kafka-acls.bat
Normal file
17
bin/windows/kafka-acls.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.AclCommand %*
|
||||
17
bin/windows/kafka-broker-api-versions.bat
Normal file
17
bin/windows/kafka-broker-api-versions.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
%~dp0kafka-run-class.bat kafka.admin.BrokerApiVersionsCommand %*
|
||||
17
bin/windows/kafka-configs.bat
Normal file
17
bin/windows/kafka-configs.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.ConfigCommand %*
|
||||
20
bin/windows/kafka-console-consumer.bat
Normal file
20
bin/windows/kafka-console-consumer.bat
Normal file
@@ -0,0 +1,20 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
SetLocal
|
||||
set KAFKA_HEAP_OPTS=-Xmx512M
|
||||
"%~dp0kafka-run-class.bat" kafka.tools.ConsoleConsumer %*
|
||||
EndLocal
|
||||
20
bin/windows/kafka-console-producer.bat
Normal file
20
bin/windows/kafka-console-producer.bat
Normal file
@@ -0,0 +1,20 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
SetLocal
|
||||
set KAFKA_HEAP_OPTS=-Xmx512M
|
||||
"%~dp0kafka-run-class.bat" kafka.tools.ConsoleProducer %*
|
||||
EndLocal
|
||||
17
bin/windows/kafka-consumer-groups.bat
Normal file
17
bin/windows/kafka-consumer-groups.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.ConsumerGroupCommand %*
|
||||
20
bin/windows/kafka-consumer-perf-test.bat
Normal file
20
bin/windows/kafka-consumer-perf-test.bat
Normal file
@@ -0,0 +1,20 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
SetLocal
|
||||
set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M
|
||||
"%~dp0kafka-run-class.bat" kafka.tools.ConsumerPerformance %*
|
||||
EndLocal
|
||||
17
bin/windows/kafka-delegation-tokens.bat
Normal file
17
bin/windows/kafka-delegation-tokens.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.DelegationTokenCommand %*
|
||||
17
bin/windows/kafka-delete-records.bat
Normal file
17
bin/windows/kafka-delete-records.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.DeleteRecordsCommand %*
|
||||
17
bin/windows/kafka-dump-log.bat
Normal file
17
bin/windows/kafka-dump-log.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.tools.DumpLogSegments %*
|
||||
17
bin/windows/kafka-leader-election.bat
Normal file
17
bin/windows/kafka-leader-election.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.LeaderElectionCommand %*
|
||||
17
bin/windows/kafka-log-dirs.bat
Normal file
17
bin/windows/kafka-log-dirs.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.LogDirsCommand %*
|
||||
17
bin/windows/kafka-mirror-maker.bat
Normal file
17
bin/windows/kafka-mirror-maker.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.tools.MirrorMaker %*
|
||||
17
bin/windows/kafka-preferred-replica-election.bat
Normal file
17
bin/windows/kafka-preferred-replica-election.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.PreferredReplicaLeaderElectionCommand %*
|
||||
20
bin/windows/kafka-producer-perf-test.bat
Normal file
20
bin/windows/kafka-producer-perf-test.bat
Normal file
@@ -0,0 +1,20 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
SetLocal
|
||||
set KAFKA_HEAP_OPTS=-Xmx512M
|
||||
"%~dp0kafka-run-class.bat" org.apache.kafka.tools.ProducerPerformance %*
|
||||
EndLocal
|
||||
17
bin/windows/kafka-reassign-partitions.bat
Normal file
17
bin/windows/kafka-reassign-partitions.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.ReassignPartitionsCommand %*
|
||||
17
bin/windows/kafka-replica-verification.bat
Normal file
17
bin/windows/kafka-replica-verification.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.tools.ReplicaVerificationTool %*
|
||||
191
bin/windows/kafka-run-class.bat
Executable file
191
bin/windows/kafka-run-class.bat
Executable file
@@ -0,0 +1,191 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
setlocal enabledelayedexpansion
|
||||
|
||||
IF [%1] EQU [] (
|
||||
echo USAGE: %0 classname [opts]
|
||||
EXIT /B 1
|
||||
)
|
||||
|
||||
rem Using pushd popd to set BASE_DIR to the absolute path
|
||||
pushd %~dp0..\..
|
||||
set BASE_DIR=%CD%
|
||||
popd
|
||||
|
||||
IF ["%SCALA_VERSION%"] EQU [""] (
|
||||
set SCALA_VERSION=2.12.10
|
||||
)
|
||||
|
||||
IF ["%SCALA_BINARY_VERSION%"] EQU [""] (
|
||||
for /f "tokens=1,2 delims=." %%a in ("%SCALA_VERSION%") do (
|
||||
set FIRST=%%a
|
||||
set SECOND=%%b
|
||||
if ["!SECOND!"] EQU [""] (
|
||||
set SCALA_BINARY_VERSION=!FIRST!
|
||||
) else (
|
||||
set SCALA_BINARY_VERSION=!FIRST!.!SECOND!
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
rem Classpath addition for kafka-core dependencies
|
||||
for %%i in ("%BASE_DIR%\core\build\dependant-libs-%SCALA_VERSION%\*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
rem Classpath addition for kafka-examples
|
||||
for %%i in ("%BASE_DIR%\examples\build\libs\kafka-examples*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
rem Classpath addition for kafka-clients
|
||||
for %%i in ("%BASE_DIR%\clients\build\libs\kafka-clients*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
rem Classpath addition for kafka-streams
|
||||
for %%i in ("%BASE_DIR%\streams\build\libs\kafka-streams*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
rem Classpath addition for kafka-streams-examples
|
||||
for %%i in ("%BASE_DIR%\streams\examples\build\libs\kafka-streams-examples*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
for %%i in ("%BASE_DIR%\streams\build\dependant-libs-%SCALA_VERSION%\rocksdb*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
rem Classpath addition for kafka tools
|
||||
for %%i in ("%BASE_DIR%\tools\build\libs\kafka-tools*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
for %%i in ("%BASE_DIR%\tools\build\dependant-libs-%SCALA_VERSION%\*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
for %%p in (api runtime file json tools) do (
|
||||
for %%i in ("%BASE_DIR%\connect\%%p\build\libs\connect-%%p*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
if exist "%BASE_DIR%\connect\%%p\build\dependant-libs\*" (
|
||||
call :concat "%BASE_DIR%\connect\%%p\build\dependant-libs\*"
|
||||
)
|
||||
)
|
||||
|
||||
rem Classpath addition for release
|
||||
for %%i in ("%BASE_DIR%\libs\*") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
rem Classpath addition for core
|
||||
for %%i in ("%BASE_DIR%\core\build\libs\kafka_%SCALA_BINARY_VERSION%*.jar") do (
|
||||
call :concat "%%i"
|
||||
)
|
||||
|
||||
rem JMX settings
|
||||
IF ["%KAFKA_JMX_OPTS%"] EQU [""] (
|
||||
set KAFKA_JMX_OPTS=-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false
|
||||
)
|
||||
|
||||
rem JMX port to use
|
||||
IF ["%JMX_PORT%"] NEQ [""] (
|
||||
set KAFKA_JMX_OPTS=%KAFKA_JMX_OPTS% -Dcom.sun.management.jmxremote.port=%JMX_PORT%
|
||||
)
|
||||
|
||||
rem Log directory to use
|
||||
IF ["%LOG_DIR%"] EQU [""] (
|
||||
set LOG_DIR=%BASE_DIR%/logs
|
||||
)
|
||||
|
||||
rem Log4j settings
|
||||
IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
|
||||
set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%BASE_DIR%/config/tools-log4j.properties
|
||||
) ELSE (
|
||||
rem create logs directory
|
||||
IF not exist "%LOG_DIR%" (
|
||||
mkdir "%LOG_DIR%"
|
||||
)
|
||||
)
|
||||
|
||||
set KAFKA_LOG4J_OPTS=-Dkafka.logs.dir="%LOG_DIR%" "%KAFKA_LOG4J_OPTS%"
|
||||
|
||||
rem Generic jvm settings you want to add
|
||||
IF ["%KAFKA_OPTS%"] EQU [""] (
|
||||
set KAFKA_OPTS=
|
||||
)
|
||||
|
||||
set DEFAULT_JAVA_DEBUG_PORT=5005
|
||||
set DEFAULT_DEBUG_SUSPEND_FLAG=n
|
||||
rem Set Debug options if enabled
|
||||
IF ["%KAFKA_DEBUG%"] NEQ [""] (
|
||||
|
||||
|
||||
IF ["%JAVA_DEBUG_PORT%"] EQU [""] (
|
||||
set JAVA_DEBUG_PORT=%DEFAULT_JAVA_DEBUG_PORT%
|
||||
)
|
||||
|
||||
IF ["%DEBUG_SUSPEND_FLAG%"] EQU [""] (
|
||||
set DEBUG_SUSPEND_FLAG=%DEFAULT_DEBUG_SUSPEND_FLAG%
|
||||
)
|
||||
set DEFAULT_JAVA_DEBUG_OPTS=-agentlib:jdwp=transport=dt_socket,server=y,suspend=!DEBUG_SUSPEND_FLAG!,address=!JAVA_DEBUG_PORT!
|
||||
|
||||
IF ["%JAVA_DEBUG_OPTS%"] EQU [""] (
|
||||
set JAVA_DEBUG_OPTS=!DEFAULT_JAVA_DEBUG_OPTS!
|
||||
)
|
||||
|
||||
echo Enabling Java debug options: !JAVA_DEBUG_OPTS!
|
||||
set KAFKA_OPTS=!JAVA_DEBUG_OPTS! !KAFKA_OPTS!
|
||||
)
|
||||
|
||||
rem Which java to use
|
||||
IF ["%JAVA_HOME%"] EQU [""] (
|
||||
set JAVA=java
|
||||
) ELSE (
|
||||
set JAVA="%JAVA_HOME%/bin/java"
|
||||
)
|
||||
|
||||
rem Memory options
|
||||
IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
|
||||
set KAFKA_HEAP_OPTS=-Xmx256M
|
||||
)
|
||||
|
||||
rem JVM performance options
|
||||
IF ["%KAFKA_JVM_PERFORMANCE_OPTS%"] EQU [""] (
|
||||
set KAFKA_JVM_PERFORMANCE_OPTS=-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true
|
||||
)
|
||||
|
||||
IF not defined CLASSPATH (
|
||||
echo Classpath is empty. Please build the project first e.g. by running 'gradlew jarAll'
|
||||
EXIT /B 2
|
||||
)
|
||||
|
||||
set COMMAND=%JAVA% %KAFKA_HEAP_OPTS% %KAFKA_JVM_PERFORMANCE_OPTS% %KAFKA_JMX_OPTS% %KAFKA_LOG4J_OPTS% -cp "%CLASSPATH%" %KAFKA_OPTS% %*
|
||||
rem echo.
|
||||
rem echo %COMMAND%
|
||||
rem echo.
|
||||
%COMMAND%
|
||||
|
||||
goto :eof
|
||||
:concat
|
||||
IF not defined CLASSPATH (
|
||||
set CLASSPATH="%~1"
|
||||
) ELSE (
|
||||
set CLASSPATH=%CLASSPATH%;"%~1"
|
||||
)
|
||||
38
bin/windows/kafka-server-start.bat
Normal file
38
bin/windows/kafka-server-start.bat
Normal file
@@ -0,0 +1,38 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
IF [%1] EQU [] (
|
||||
echo USAGE: %0 server.properties
|
||||
EXIT /B 1
|
||||
)
|
||||
|
||||
SetLocal
|
||||
IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
|
||||
set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties
|
||||
)
|
||||
IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
|
||||
rem detect OS architecture
|
||||
wmic os get osarchitecture | find /i "32-bit" >nul 2>&1
|
||||
IF NOT ERRORLEVEL 1 (
|
||||
rem 32-bit OS
|
||||
set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M
|
||||
) ELSE (
|
||||
rem 64-bit OS
|
||||
set KAFKA_HEAP_OPTS=-Xmx1G -Xms1G
|
||||
)
|
||||
)
|
||||
"%~dp0kafka-run-class.bat" kafka.Kafka %*
|
||||
EndLocal
|
||||
18
bin/windows/kafka-server-stop.bat
Normal file
18
bin/windows/kafka-server-stop.bat
Normal file
@@ -0,0 +1,18 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
wmic process where (commandline like "%%kafka.Kafka%%" and not name="wmic.exe") delete
|
||||
rem ps ax | grep -i 'kafka.Kafka' | grep -v grep | awk '{print $1}' | xargs kill -SIGTERM
|
||||
23
bin/windows/kafka-streams-application-reset.bat
Normal file
23
bin/windows/kafka-streams-application-reset.bat
Normal file
@@ -0,0 +1,23 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
SetLocal
|
||||
IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
|
||||
set KAFKA_HEAP_OPTS=-Xmx512M
|
||||
)
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.tools.StreamsResetter %*
|
||||
EndLocal
|
||||
17
bin/windows/kafka-topics.bat
Normal file
17
bin/windows/kafka-topics.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
"%~dp0kafka-run-class.bat" kafka.admin.TopicCommand %*
|
||||
30
bin/windows/zookeeper-server-start.bat
Normal file
30
bin/windows/zookeeper-server-start.bat
Normal file
@@ -0,0 +1,30 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
IF [%1] EQU [] (
|
||||
echo USAGE: %0 zookeeper.properties
|
||||
EXIT /B 1
|
||||
)
|
||||
|
||||
SetLocal
|
||||
IF ["%KAFKA_LOG4J_OPTS%"] EQU [""] (
|
||||
set KAFKA_LOG4J_OPTS=-Dlog4j.configuration=file:%~dp0../../config/log4j.properties
|
||||
)
|
||||
IF ["%KAFKA_HEAP_OPTS%"] EQU [""] (
|
||||
set KAFKA_HEAP_OPTS=-Xmx512M -Xms512M
|
||||
)
|
||||
"%~dp0kafka-run-class.bat" org.apache.zookeeper.server.quorum.QuorumPeerMain %*
|
||||
EndLocal
|
||||
17
bin/windows/zookeeper-server-stop.bat
Normal file
17
bin/windows/zookeeper-server-stop.bat
Normal file
@@ -0,0 +1,17 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
wmic process where (commandline like "%%zookeeper%%" and not name="wmic.exe") delete
|
||||
22
bin/windows/zookeeper-shell.bat
Normal file
22
bin/windows/zookeeper-shell.bat
Normal file
@@ -0,0 +1,22 @@
|
||||
@echo off
|
||||
rem Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
rem contributor license agreements. See the NOTICE file distributed with
|
||||
rem this work for additional information regarding copyright ownership.
|
||||
rem The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
rem (the "License"); you may not use this file except in compliance with
|
||||
rem the License. You may obtain a copy of the License at
|
||||
rem
|
||||
rem http://www.apache.org/licenses/LICENSE-2.0
|
||||
rem
|
||||
rem Unless required by applicable law or agreed to in writing, software
|
||||
rem distributed under the License is distributed on an "AS IS" BASIS,
|
||||
rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
rem See the License for the specific language governing permissions and
|
||||
rem limitations under the License.
|
||||
|
||||
IF [%1] EQU [] (
|
||||
echo USAGE: %0 zookeeper_host:port[/path] [-zk-tls-config-file file] [args...]
|
||||
EXIT /B 1
|
||||
)
|
||||
|
||||
"%~dp0kafka-run-class.bat" org.apache.zookeeper.ZooKeeperMainWithTlsSupportForKafka -server %*
|
||||
17
bin/zookeeper-security-migration.sh
Executable file
17
bin/zookeeper-security-migration.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh kafka.admin.ZkSecurityMigrator "$@"
|
||||
44
bin/zookeeper-server-start.sh
Executable file
44
bin/zookeeper-server-start.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ $# -lt 1 ];
|
||||
then
|
||||
echo "USAGE: $0 [-daemon] zookeeper.properties"
|
||||
exit 1
|
||||
fi
|
||||
base_dir=$(dirname $0)
|
||||
|
||||
if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then
|
||||
export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties"
|
||||
fi
|
||||
|
||||
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
|
||||
export KAFKA_HEAP_OPTS="-Xmx512M -Xms512M"
|
||||
fi
|
||||
|
||||
EXTRA_ARGS=${EXTRA_ARGS-'-name zookeeper -loggc'}
|
||||
|
||||
COMMAND=$1
|
||||
case $COMMAND in
|
||||
-daemon)
|
||||
EXTRA_ARGS="-daemon "$EXTRA_ARGS
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
exec $base_dir/kafka-run-class.sh $EXTRA_ARGS org.apache.zookeeper.server.quorum.QuorumPeerMain "$@"
|
||||
24
bin/zookeeper-server-stop.sh
Executable file
24
bin/zookeeper-server-stop.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/sh
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
SIGNAL=${SIGNAL:-TERM}
|
||||
PIDS=$(ps ax | grep java | grep -i QuorumPeerMain | grep -v grep | awk '{print $1}')
|
||||
|
||||
if [ -z "$PIDS" ]; then
|
||||
echo "No zookeeper server to stop"
|
||||
exit 1
|
||||
else
|
||||
kill -s $SIGNAL $PIDS
|
||||
fi
|
||||
23
bin/zookeeper-shell.sh
Executable file
23
bin/zookeeper-shell.sh
Executable file
@@ -0,0 +1,23 @@
|
||||
#!/bin/sh
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [ $# -lt 1 ];
|
||||
then
|
||||
echo "USAGE: $0 zookeeper_host:port[/path] [-zk-tls-config-file file] [args...]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exec $(dirname $0)/kafka-run-class.sh org.apache.zookeeper.ZooKeeperMainWithTlsSupportForKafka -server "$@"
|
||||
1201
build.gradle
Normal file
1201
build.gradle
Normal file
File diff suppressed because it is too large
Load Diff
20
checkstyle/.scalafmt.conf
Normal file
20
checkstyle/.scalafmt.conf
Normal file
@@ -0,0 +1,20 @@
|
||||
// Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
// contributor license agreements. See the NOTICE file distributed with
|
||||
// this work for additional information regarding copyright ownership.
|
||||
// The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance with
|
||||
// the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
docstrings = JavaDoc
|
||||
maxColumn = 120
|
||||
continuationIndent.defnSite = 2
|
||||
assumeStandardLibraryStripMargin = true
|
||||
danglingParentheses = true
|
||||
rewrite.rules = [SortImports, RedundantBraces, RedundantParens, SortModifiers]
|
||||
142
checkstyle/checkstyle.xml
Normal file
142
checkstyle/checkstyle.xml
Normal file
@@ -0,0 +1,142 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE module PUBLIC
|
||||
"-//Puppy Crawl//DTD Check Configuration 1.3//EN"
|
||||
"http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
|
||||
<!--
|
||||
// Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
// contributor license agreements. See the NOTICE file distributed with
|
||||
// this work for additional information regarding copyright ownership.
|
||||
// The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance with
|
||||
// the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
-->
|
||||
<module name="Checker">
|
||||
<property name="localeLanguage" value="en"/>
|
||||
|
||||
<module name="FileTabCharacter"/>
|
||||
|
||||
<!-- header -->
|
||||
<module name="Header">
|
||||
<property name="headerFile" value="${headerFile}" />
|
||||
</module>
|
||||
|
||||
<module name="TreeWalker">
|
||||
|
||||
<!-- code cleanup -->
|
||||
<module name="UnusedImports">
|
||||
<property name="processJavadoc" value="true" />
|
||||
</module>
|
||||
<module name="RedundantImport"/>
|
||||
<module name="IllegalImport" />
|
||||
<module name="EqualsHashCode"/>
|
||||
<module name="SimplifyBooleanExpression"/>
|
||||
<module name="OneStatementPerLine"/>
|
||||
<module name="UnnecessaryParentheses" />
|
||||
<module name="SimplifyBooleanReturn"/>
|
||||
|
||||
<!-- style -->
|
||||
<module name="DefaultComesLast"/>
|
||||
<module name="EmptyStatement"/>
|
||||
<module name="ArrayTypeStyle"/>
|
||||
<module name="UpperEll"/>
|
||||
<module name="LeftCurly"/>
|
||||
<module name="RightCurly"/>
|
||||
<module name="EmptyStatement"/>
|
||||
<module name="ConstantName">
|
||||
<property name="format" value="(^[A-Z][A-Z0-9]*(_[A-Z0-9]+)*$)|(^log$)"/>
|
||||
</module>
|
||||
<module name="LocalVariableName"/>
|
||||
<module name="LocalFinalVariableName"/>
|
||||
<module name="MemberName"/>
|
||||
<module name="ClassTypeParameterName">
|
||||
<property name="format" value="^[A-Z][a-zA-Z0-9]*$$"/>
|
||||
</module>
|
||||
<module name="MethodTypeParameterName">
|
||||
<property name="format" value="^[A-Z][a-zA-Z0-9]*$$"/>
|
||||
</module>
|
||||
<module name="InterfaceTypeParameterName">
|
||||
<property name="format" value="^[A-Z][a-zA-Z0-9]*$$"/>
|
||||
</module>
|
||||
<module name="PackageName"/>
|
||||
<module name="ParameterName"/>
|
||||
<module name="StaticVariableName"/>
|
||||
<module name="TypeName"/>
|
||||
<module name="AvoidStarImport"/>
|
||||
|
||||
<!-- variables that can be final should be final (suppressed except for Streams) -->
|
||||
<module name="FinalLocalVariable">
|
||||
<property name="tokens" value="VARIABLE_DEF,PARAMETER_DEF"/>
|
||||
<property name="validateEnhancedForLoopVariable" value="true"/>
|
||||
</module>
|
||||
|
||||
<!-- dependencies -->
|
||||
<module name="ImportControl">
|
||||
<property name="file" value="${importControlFile}"/>
|
||||
</module>
|
||||
|
||||
<!-- whitespace -->
|
||||
<module name="GenericWhitespace"/>
|
||||
<module name="NoWhitespaceBefore"/>
|
||||
<module name="WhitespaceAfter" />
|
||||
<module name="NoWhitespaceAfter"/>
|
||||
<module name="WhitespaceAround">
|
||||
<property name="allowEmptyConstructors" value="true"/>
|
||||
<property name="allowEmptyMethods" value="true"/>
|
||||
</module>
|
||||
<module name="Indentation"/>
|
||||
<module name="MethodParamPad"/>
|
||||
<module name="ParenPad"/>
|
||||
<module name="TypecastParenPad"/>
|
||||
|
||||
<!-- locale-sensitive methods should specify locale -->
|
||||
<module name="Regexp">
|
||||
<property name="format" value="\.to(Lower|Upper)Case\(\)"/>
|
||||
<property name="illegalPattern" value="true"/>
|
||||
<property name="ignoreComments" value="true"/>
|
||||
</module>
|
||||
|
||||
<!-- code quality -->
|
||||
<module name="MethodLength"/>
|
||||
<module name="ParameterNumber">
|
||||
<!-- default is 8 -->
|
||||
<property name="max" value="13"/>
|
||||
</module>
|
||||
<module name="ClassDataAbstractionCoupling">
|
||||
<!-- default is 7 -->
|
||||
<property name="max" value="25"/>
|
||||
</module>
|
||||
<module name="BooleanExpressionComplexity">
|
||||
<!-- default is 3 -->
|
||||
<property name="max" value="5"/>
|
||||
</module>
|
||||
|
||||
<module name="ClassFanOutComplexity">
|
||||
<!-- default is 20 -->
|
||||
<property name="max" value="50"/>
|
||||
</module>
|
||||
<module name="CyclomaticComplexity">
|
||||
<!-- default is 10-->
|
||||
<property name="max" value="16"/>
|
||||
</module>
|
||||
<module name="JavaNCSS">
|
||||
<!-- default is 50 -->
|
||||
<property name="methodMaximum" value="100"/>
|
||||
</module>
|
||||
<module name="NPathComplexity">
|
||||
<!-- default is 200 -->
|
||||
<property name="max" value="500"/>
|
||||
</module>
|
||||
</module>
|
||||
|
||||
<module name="SuppressionFilter">
|
||||
<property name="file" value="${suppressionsFile}"/>
|
||||
</module>
|
||||
</module>
|
||||
57
checkstyle/import-control-core.xml
Normal file
57
checkstyle/import-control-core.xml
Normal file
@@ -0,0 +1,57 @@
|
||||
<!DOCTYPE import-control PUBLIC
|
||||
"-//Puppy Crawl//DTD Import Control 1.1//EN"
|
||||
"http://www.puppycrawl.com/dtds/import_control_1_1.dtd">
|
||||
<!--
|
||||
// Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
// contributor license agreements. See the NOTICE file distributed with
|
||||
// this work for additional information regarding copyright ownership.
|
||||
// The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance with
|
||||
// the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
-->
|
||||
|
||||
<import-control pkg="kafka">
|
||||
|
||||
<!-- THINK HARD ABOUT THE LAYERING OF THE PROJECT BEFORE CHANGING THIS FILE -->
|
||||
|
||||
<!-- common library dependencies -->
|
||||
<allow pkg="java" />
|
||||
<allow pkg="scala" />
|
||||
<allow pkg="javax.management" />
|
||||
<allow pkg="org.slf4j" />
|
||||
<allow pkg="org.junit" />
|
||||
<allow pkg="org.easymock" />
|
||||
<allow pkg="java.security" />
|
||||
<allow pkg="javax.net.ssl" />
|
||||
<allow pkg="javax.security" />
|
||||
<allow pkg="com.didichuxing.datachannel.kafka" />
|
||||
|
||||
<allow pkg="kafka.common" />
|
||||
<allow pkg="kafka.utils" />
|
||||
<allow pkg="kafka.serializer" />
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
|
||||
<subpackage name="tools">
|
||||
<allow pkg="org.apache.kafka.clients.admin" />
|
||||
<allow pkg="kafka.admin" />
|
||||
<allow pkg="joptsimple" />
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="coordinator">
|
||||
<allow class="kafka.server.MetadataCache" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="examples">
|
||||
<allow pkg="org.apache.kafka.clients" />
|
||||
</subpackage>
|
||||
|
||||
</import-control>
|
||||
47
checkstyle/import-control-jmh-benchmarks.xml
Normal file
47
checkstyle/import-control-jmh-benchmarks.xml
Normal file
@@ -0,0 +1,47 @@
|
||||
<!DOCTYPE import-control PUBLIC
|
||||
"-//Puppy Crawl//DTD Import Control 1.1//EN"
|
||||
"http://www.puppycrawl.com/dtds/import_control_1_1.dtd">
|
||||
<!--
|
||||
// Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
// contributor license agreements. See the NOTICE file distributed with
|
||||
// this work for additional information regarding copyright ownership.
|
||||
// The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance with
|
||||
// the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
-->
|
||||
|
||||
<import-control pkg="org.apache.kafka.jmh">
|
||||
|
||||
<allow pkg="java"/>
|
||||
<allow pkg="scala"/>
|
||||
<allow pkg="javax.management"/>
|
||||
<allow pkg="org.slf4j"/>
|
||||
<allow pkg="org.openjdk.jmh.annotations"/>
|
||||
<allow pkg="org.openjdk.jmh.runner"/>
|
||||
<allow pkg="org.openjdk.jmh.infra"/>
|
||||
<allow pkg="java.security"/>
|
||||
<allow pkg="javax.net.ssl"/>
|
||||
<allow pkg="javax.security"/>
|
||||
<allow pkg="org.apache.kafka.common"/>
|
||||
<allow pkg="org.apache.kafka.clients.producer"/>
|
||||
<allow pkg="kafka.cluster"/>
|
||||
<allow pkg="kafka.log"/>
|
||||
<allow pkg="kafka.server"/>
|
||||
<allow pkg="kafka.api"/>
|
||||
<allow class="kafka.utils.Pool"/>
|
||||
<allow class="kafka.utils.KafkaScheduler"/>
|
||||
<allow class="org.apache.kafka.clients.FetchSessionHandler"/>
|
||||
<allow pkg="org.mockito"/>
|
||||
|
||||
|
||||
<subpackage name="cache">
|
||||
</subpackage>
|
||||
</import-control>
|
||||
457
checkstyle/import-control.xml
Normal file
457
checkstyle/import-control.xml
Normal file
@@ -0,0 +1,457 @@
|
||||
<!DOCTYPE import-control PUBLIC
|
||||
"-//Puppy Crawl//DTD Import Control 1.1//EN"
|
||||
"http://www.puppycrawl.com/dtds/import_control_1_1.dtd">
|
||||
<!--
|
||||
// Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
// contributor license agreements. See the NOTICE file distributed with
|
||||
// this work for additional information regarding copyright ownership.
|
||||
// The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
// (the "License"); you may not use this file except in compliance with
|
||||
// the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
-->
|
||||
|
||||
<import-control pkg="org.apache.kafka">
|
||||
|
||||
<!-- THINK HARD ABOUT THE LAYERING OF THE PROJECT BEFORE CHANGING THIS FILE -->
|
||||
|
||||
<!-- common library dependencies -->
|
||||
<allow pkg="java" />
|
||||
<allow pkg="javax.management" />
|
||||
<allow pkg="org.slf4j" />
|
||||
<allow pkg="org.junit" />
|
||||
<allow pkg="org.hamcrest" />
|
||||
<allow pkg="org.mockito" />
|
||||
<allow pkg="org.easymock" />
|
||||
<allow pkg="org.powermock" />
|
||||
<allow pkg="java.security" />
|
||||
<allow pkg="javax.net.ssl" />
|
||||
<allow pkg="javax.security" />
|
||||
<allow pkg="org.ietf.jgss" />
|
||||
|
||||
<!-- no one depends on the server -->
|
||||
<disallow pkg="kafka" />
|
||||
|
||||
<!-- anyone can use public classes -->
|
||||
<allow pkg="org.apache.kafka.common" exact-match="true" />
|
||||
<allow pkg="org.apache.kafka.common.security" />
|
||||
<allow pkg="org.apache.kafka.common.serialization" />
|
||||
<allow pkg="org.apache.kafka.common.utils" />
|
||||
<allow pkg="org.apache.kafka.common.errors" exact-match="true" />
|
||||
<allow pkg="org.apache.kafka.common.memory" />
|
||||
|
||||
<subpackage name="common">
|
||||
<disallow pkg="org.apache.kafka.clients" />
|
||||
<allow pkg="org.apache.kafka.common" exact-match="true" />
|
||||
<allow pkg="org.apache.kafka.common.annotation" />
|
||||
<allow pkg="org.apache.kafka.common.config" exact-match="true" />
|
||||
<allow pkg="org.apache.kafka.common.internals" exact-match="true" />
|
||||
<allow pkg="org.apache.kafka.test" />
|
||||
|
||||
<subpackage name="acl">
|
||||
<allow pkg="org.apache.kafka.common.annotation" />
|
||||
<allow pkg="org.apache.kafka.common.acl" />
|
||||
<allow pkg="org.apache.kafka.common.resource" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="config">
|
||||
<allow pkg="org.apache.kafka.common.config" />
|
||||
<!-- for testing -->
|
||||
<allow pkg="org.apache.kafka.common.metrics" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="message">
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="org.apache.kafka.common.protocol" />
|
||||
<allow pkg="org.apache.kafka.common.protocol.types" />
|
||||
<allow pkg="org.apache.kafka.common.message" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="metrics">
|
||||
<allow pkg="org.apache.kafka.common.metrics" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="memory">
|
||||
<allow pkg="org.apache.kafka.common.metrics" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="network">
|
||||
<allow pkg="org.apache.kafka.common.security.auth" />
|
||||
<allow pkg="org.apache.kafka.common.protocol" />
|
||||
<allow pkg="org.apache.kafka.common.config" />
|
||||
<allow pkg="org.apache.kafka.common.metrics" />
|
||||
<allow pkg="org.apache.kafka.common.security" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="resource">
|
||||
<allow pkg="org.apache.kafka.common.annotation" />
|
||||
<allow pkg="org.apache.kafka.common.resource" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="security">
|
||||
<allow pkg="org.apache.kafka.common.annotation" />
|
||||
<allow pkg="org.apache.kafka.common.network" />
|
||||
<allow pkg="org.apache.kafka.common.config" />
|
||||
<allow pkg="org.apache.kafka.common.protocol" />
|
||||
<allow pkg="org.apache.kafka.common.errors" />
|
||||
<subpackage name="authenticator">
|
||||
<allow pkg="org.apache.kafka.common.message" />
|
||||
<allow pkg="org.apache.kafka.common.protocol.types" />
|
||||
<allow pkg="org.apache.kafka.common.requests" />
|
||||
<allow pkg="org.apache.kafka.clients" />
|
||||
</subpackage>
|
||||
<subpackage name="scram">
|
||||
<allow pkg="javax.crypto" />
|
||||
</subpackage>
|
||||
<subpackage name="oauthbearer">
|
||||
<allow pkg="com.fasterxml.jackson.databind" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="protocol">
|
||||
<allow pkg="org.apache.kafka.common.errors" />
|
||||
<allow pkg="org.apache.kafka.common.message" />
|
||||
<allow pkg="org.apache.kafka.common.protocol" />
|
||||
<allow pkg="org.apache.kafka.common.protocol.types" />
|
||||
<allow pkg="org.apache.kafka.common.record" />
|
||||
<allow pkg="org.apache.kafka.common.requests" />
|
||||
<allow pkg="org.apache.kafka.common.resource" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="record">
|
||||
<allow pkg="net.jpountz" />
|
||||
<allow pkg="org.apache.kafka.common.header" />
|
||||
<allow pkg="org.apache.kafka.common.record" />
|
||||
<allow pkg="org.apache.kafka.common.network" />
|
||||
<allow pkg="org.apache.kafka.common.protocol" />
|
||||
<allow pkg="org.apache.kafka.common.protocol.types" />
|
||||
<allow pkg="org.apache.kafka.common.errors" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="header">
|
||||
<allow pkg="org.apache.kafka.common.header" />
|
||||
<allow pkg="org.apache.kafka.common.record" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="requests">
|
||||
<allow pkg="org.apache.kafka.common.acl" />
|
||||
<allow pkg="org.apache.kafka.common.protocol" />
|
||||
<allow pkg="org.apache.kafka.common.message" />
|
||||
<allow pkg="org.apache.kafka.common.network" />
|
||||
<allow pkg="org.apache.kafka.common.requests" />
|
||||
<allow pkg="org.apache.kafka.common.resource" />
|
||||
<allow pkg="org.apache.kafka.common.record" />
|
||||
<!-- for AuthorizableRequestContext interface -->
|
||||
<allow pkg="org.apache.kafka.server.authorizer" />
|
||||
<!-- for testing -->
|
||||
<allow pkg="org.apache.kafka.common.errors" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="serialization">
|
||||
<allow class="org.apache.kafka.common.errors.SerializationException" />
|
||||
<allow class="org.apache.kafka.common.header.Headers" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="utils">
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="clients">
|
||||
<allow pkg="org.slf4j" />
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
<allow pkg="org.apache.kafka.clients" exact-match="true"/>
|
||||
<allow pkg="org.apache.kafka.test" />
|
||||
|
||||
<subpackage name="consumer">
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="producer">
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
<allow pkg="org.apache.kafka.clients.producer" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="admin">
|
||||
<allow pkg="org.apache.kafka.clients.admin" />
|
||||
<allow pkg="org.apache.kafka.clients.consumer.internals" />
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="server">
|
||||
<allow pkg="org.slf4j" />
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
<allow pkg="org.apache.kafka.test" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="tools">
|
||||
<allow pkg="org.apache.kafka.common"/>
|
||||
<allow pkg="org.apache.kafka.clients.admin" />
|
||||
<allow pkg="org.apache.kafka.clients.producer" />
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="net.sourceforge.argparse4j" />
|
||||
<allow pkg="org.apache.log4j" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="trogdor">
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="javax.servlet" />
|
||||
<allow pkg="javax.ws.rs" />
|
||||
<allow pkg="net.sourceforge.argparse4j" />
|
||||
<allow pkg="org.apache.kafka.clients" />
|
||||
<allow pkg="org.apache.kafka.clients.admin" />
|
||||
<allow pkg="org.apache.kafka.clients.consumer" exact-match="true"/>
|
||||
<allow pkg="org.apache.kafka.clients.producer" exact-match="true"/>
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
<allow pkg="org.apache.kafka.test"/>
|
||||
<allow pkg="org.apache.kafka.trogdor" />
|
||||
<allow pkg="org.apache.log4j" />
|
||||
<allow pkg="org.eclipse.jetty" />
|
||||
<allow pkg="org.glassfish.jersey" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="message">
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="com.fasterxml.jackson.annotation" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="streams">
|
||||
<allow pkg="org.apache.kafka.common"/>
|
||||
<allow pkg="org.apache.kafka.test"/>
|
||||
<allow pkg="org.apache.kafka.clients"/>
|
||||
<allow pkg="org.apache.kafka.clients.producer" exact-match="true"/>
|
||||
<allow pkg="org.apache.kafka.clients.consumer" exact-match="true"/>
|
||||
|
||||
<allow pkg="org.apache.kafka.streams"/>
|
||||
|
||||
<subpackage name="examples">
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="org.apache.kafka.connect.json" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="perf">
|
||||
<allow pkg="com.fasterxml.jackson.databind" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="integration">
|
||||
<allow pkg="kafka.admin" />
|
||||
<allow pkg="kafka.api" />
|
||||
<allow pkg="kafka.server" />
|
||||
<allow pkg="kafka.tools" />
|
||||
<allow pkg="kafka.utils" />
|
||||
<allow pkg="kafka.log" />
|
||||
<allow pkg="scala" />
|
||||
<allow class="kafka.zk.EmbeddedZookeeper"/>
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="test">
|
||||
<allow pkg="kafka.admin" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="tools">
|
||||
<allow pkg="kafka.tools" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="state">
|
||||
<allow pkg="org.rocksdb" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="processor">
|
||||
<subpackage name="internals">
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="kafka.utils" />
|
||||
<allow pkg="org.apache.zookeeper" />
|
||||
<allow pkg="org.apache.zookeeper" />
|
||||
<allow pkg="org.apache.log4j" />
|
||||
<subpackage name="testutil">
|
||||
<allow pkg="org.apache.log4j" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="jmh">
|
||||
<allow pkg="org.openjdk.jmh.annotations" />
|
||||
<allow pkg="org.openjdk.jmh.runner" />
|
||||
<allow pkg="org.openjdk.jmh.runner.options" />
|
||||
<allow pkg="org.openjdk.jmh.infra" />
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
<allow pkg="org.apache.kafka.clients" />
|
||||
<allow pkg="org.apache.kafka.streams" />
|
||||
<allow pkg="org.github.jamm" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="log4jappender">
|
||||
<allow pkg="org.apache.log4j" />
|
||||
<allow pkg="org.apache.kafka.clients" />
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
<allow pkg="org.apache.kafka.test" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="test">
|
||||
<allow pkg="org.apache.kafka" />
|
||||
<allow pkg="org.bouncycastle" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="connect">
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
<allow pkg="org.apache.kafka.connect.data" />
|
||||
<allow pkg="org.apache.kafka.connect.errors" />
|
||||
<allow pkg="org.apache.kafka.connect.header" />
|
||||
<allow pkg="org.apache.kafka.connect.components"/>
|
||||
<allow pkg="org.apache.kafka.clients" />
|
||||
<allow pkg="org.apache.kafka.test"/>
|
||||
|
||||
<subpackage name="source">
|
||||
<allow pkg="org.apache.kafka.connect.connector" />
|
||||
<allow pkg="org.apache.kafka.connect.storage" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="sink">
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
<allow pkg="org.apache.kafka.connect.connector" />
|
||||
<allow pkg="org.apache.kafka.connect.storage" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="converters">
|
||||
<allow pkg="org.apache.kafka.connect.storage" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="connector.policy">
|
||||
<allow pkg="org.apache.kafka.connect.health" />
|
||||
<allow pkg="org.apache.kafka.connect.connector" />
|
||||
<!-- for testing -->
|
||||
<allow pkg="org.apache.kafka.connect.runtime" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="rest">
|
||||
<allow pkg="org.apache.kafka.connect.health" />
|
||||
<allow pkg="javax.ws.rs" />
|
||||
<allow pkg= "javax.security.auth"/>
|
||||
<subpackage name="basic">
|
||||
<allow pkg="org.apache.kafka.connect.rest"/>
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="mirror">
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
<allow pkg="org.apache.kafka.connect.source" />
|
||||
<allow pkg="org.apache.kafka.connect.sink" />
|
||||
<allow pkg="org.apache.kafka.connect.storage" />
|
||||
<allow pkg="org.apache.kafka.connect.connector" />
|
||||
<allow pkg="org.apache.kafka.connect.runtime" />
|
||||
<allow pkg="org.apache.kafka.connect.runtime.distributed" />
|
||||
<allow pkg="org.apache.kafka.connect.util" />
|
||||
<allow pkg="org.apache.kafka.connect.converters" />
|
||||
<allow pkg="net.sourceforge.argparse4j" />
|
||||
<!-- for tests -->
|
||||
<allow pkg="org.apache.kafka.connect.integration" />
|
||||
<allow pkg="org.apache.kafka.connect.mirror" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="runtime">
|
||||
<allow pkg="org.apache.kafka.connect" />
|
||||
<allow pkg="org.reflections"/>
|
||||
<allow pkg="org.reflections.util"/>
|
||||
<allow pkg="javax.crypto"/>
|
||||
|
||||
<subpackage name="rest">
|
||||
<allow pkg="org.eclipse.jetty" />
|
||||
<allow pkg="javax.ws.rs" />
|
||||
<allow pkg="javax.servlet" />
|
||||
<allow pkg="org.glassfish.jersey" />
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="org.apache.http"/>
|
||||
<subpackage name="resources">
|
||||
<allow pkg="org.apache.log4j" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="isolation">
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="org.apache.maven.artifact.versioning" />
|
||||
<allow pkg="javax.tools" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="distributed">
|
||||
<allow pkg="javax.ws.rs.core" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="cli">
|
||||
<allow pkg="org.apache.kafka.connect.runtime" />
|
||||
<allow pkg="org.apache.kafka.connect.storage" />
|
||||
<allow pkg="org.apache.kafka.connect.util" />
|
||||
<allow pkg="org.apache.kafka.common" />
|
||||
<allow pkg="org.apache.kafka.connect.connector.policy" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="storage">
|
||||
<allow pkg="org.apache.kafka.connect" />
|
||||
<allow pkg="org.apache.kafka.common.serialization" />
|
||||
<allow pkg="javax.crypto.spec"/>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="util">
|
||||
<allow pkg="org.apache.kafka.connect" />
|
||||
<allow pkg="org.reflections.vfs" />
|
||||
<!-- for annotations to avoid code duplication -->
|
||||
<allow pkg="com.fasterxml.jackson.annotation" />
|
||||
<allow pkg="com.fasterxml.jackson.databind" />
|
||||
<subpackage name="clusters">
|
||||
<allow pkg="kafka.server" />
|
||||
<allow pkg="kafka.zk" />
|
||||
<allow pkg="kafka.utils" />
|
||||
<allow class="javax.servlet.http.HttpServletResponse" />
|
||||
<allow class="javax.ws.rs.core.Response" />
|
||||
<allow pkg="com.fasterxml.jackson.core.type" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="integration">
|
||||
<allow pkg="org.apache.kafka.connect.util.clusters" />
|
||||
<allow pkg="org.apache.kafka.connect" />
|
||||
<allow pkg="org.apache.kafka.tools" />
|
||||
<allow pkg="javax.ws.rs" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="json">
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
<allow pkg="org.apache.kafka.common.serialization" />
|
||||
<allow pkg="org.apache.kafka.common.errors" />
|
||||
<allow pkg="org.apache.kafka.connect.storage" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="file">
|
||||
<allow pkg="org.apache.kafka.connect" />
|
||||
<allow pkg="org.apache.kafka.clients.consumer" />
|
||||
<!-- for tests -->
|
||||
<allow pkg="org.easymock" />
|
||||
<allow pkg="org.powermock" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="tools">
|
||||
<allow pkg="org.apache.kafka.connect" />
|
||||
<allow pkg="org.apache.kafka.tools" />
|
||||
<allow pkg="com.fasterxml.jackson" />
|
||||
</subpackage>
|
||||
|
||||
<subpackage name="transforms">
|
||||
<allow class="org.apache.kafka.connect.connector.ConnectRecord" />
|
||||
<allow class="org.apache.kafka.connect.source.SourceRecord" />
|
||||
<allow class="org.apache.kafka.connect.sink.SinkRecord" />
|
||||
<allow pkg="org.apache.kafka.connect.transforms.util" />
|
||||
</subpackage>
|
||||
</subpackage>
|
||||
|
||||
</import-control>
|
||||
16
checkstyle/java.header
Normal file
16
checkstyle/java.header
Normal file
@@ -0,0 +1,16 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
265
checkstyle/suppressions.xml
Normal file
265
checkstyle/suppressions.xml
Normal file
@@ -0,0 +1,265 @@
|
||||
|
||||
|
||||
<!DOCTYPE suppressions PUBLIC
|
||||
"-//Puppy Crawl//DTD Suppressions 1.1//EN"
|
||||
"http://www.puppycrawl.com/dtds/suppressions_1_1.dtd">
|
||||
|
||||
<suppressions>
|
||||
|
||||
<!-- Note that [/\\] must be used as the path separator for cross-platform support -->
|
||||
|
||||
<!-- Generator -->
|
||||
<suppress checks="CyclomaticComplexity|BooleanExpressionComplexity"
|
||||
files="(SchemaGenerator|MessageDataGenerator|FieldSpec).java"/>
|
||||
<suppress checks="NPathComplexity"
|
||||
files="(MessageDataGenerator|FieldSpec).java"/>
|
||||
<suppress checks="JavaNCSS"
|
||||
files="(ApiMessageType).java|MessageDataGenerator.java"/>
|
||||
<suppress checks="MethodLength"
|
||||
files="MessageDataGenerator.java"/>
|
||||
|
||||
<!-- Clients -->
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="(Fetcher|Sender|SenderTest|ConsumerCoordinator|KafkaConsumer|KafkaProducer|Utils|TransactionManager|TransactionManagerTest|KafkaAdminClient|NetworkClient|Admin).java"/>
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="(SaslServerAuthenticator|SaslAuthenticatorTest).java"/>
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="Errors.java"/>
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="Utils.java"/>
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="AbstractRequest.java"/>
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="AbstractResponse.java"/>
|
||||
|
||||
<suppress checks="MethodLength"
|
||||
files="KerberosLogin.java|RequestResponseTest.java|ConnectMetricsRegistry.java|KafkaConsumer.java"/>
|
||||
|
||||
<suppress checks="ParameterNumber"
|
||||
files="NetworkClient.java|FieldSpec.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="KafkaConsumer.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="Fetcher.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="Sender.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="ConfigDef.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="DefaultRecordBatch.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="Sender.java"/>
|
||||
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="(KafkaConsumer|ConsumerCoordinator|Fetcher|KafkaProducer|AbstractRequest|AbstractResponse|TransactionManager|Admin|KafkaAdminClient).java"/>
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="(Errors|SaslAuthenticatorTest|AgentTest|CoordinatorTest).java"/>
|
||||
|
||||
<suppress checks="BooleanExpressionComplexity"
|
||||
files="(Utils|Topic|KafkaLZ4BlockOutputStream|AclData|JoinGroupRequest).java"/>
|
||||
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="(ConsumerCoordinator|Fetcher|Sender|KafkaProducer|BufferPool|ConfigDef|RecordAccumulator|KerberosLogin|AbstractRequest|AbstractResponse|Selector|SslFactory|SslTransportLayer|SaslClientAuthenticator|SaslClientCallbackHandler|SaslServerAuthenticator|AbstractCoordinator|TransactionManager|AbstractStickyAssignor).java"/>
|
||||
|
||||
<suppress checks="JavaNCSS"
|
||||
files="(AbstractRequest|KerberosLogin|WorkerSinkTaskTest|TransactionManagerTest|SenderTest|KafkaAdminClient|ConsumerCoordinatorTest).java"/>
|
||||
|
||||
<suppress checks="NPathComplexity"
|
||||
files="(ConsumerCoordinator|BufferPool|Fetcher|MetricName|Node|ConfigDef|RecordBatch|SslFactory|SslTransportLayer|MetadataResponse|KerberosLogin|Selector|Sender|Serdes|TokenInformation|Agent|Values|PluginUtils|MiniTrogdorCluster|TasksRequest|KafkaProducer|AbstractStickyAssignor).java"/>
|
||||
|
||||
<suppress checks="(JavaNCSS|CyclomaticComplexity|MethodLength)"
|
||||
files="CoordinatorClient.java"/>
|
||||
<suppress checks="(UnnecessaryParentheses|BooleanExpressionComplexity|CyclomaticComplexity|WhitespaceAfter|LocalVariableName)"
|
||||
files="Murmur3.java"/>
|
||||
|
||||
<suppress checks="(NPathComplexity|ClassFanOutComplexity|CyclomaticComplexity|ClassDataAbstractionCoupling|LocalVariableName|MemberName|ParameterName|MethodLength|JavaNCSS)"
|
||||
files="clients[\\/]src[\\/](generated|generated-test)[\\/].+.java$"/>
|
||||
|
||||
<suppress checks="NPathComplexity"
|
||||
files="MessageTest.java"/>
|
||||
|
||||
<!-- clients tests -->
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="(Sender|Fetcher|KafkaConsumer|Metrics|RequestResponse|TransactionManager|KafkaAdminClient|Message|KafkaProducer)Test.java"/>
|
||||
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="(ConsumerCoordinator|KafkaConsumer|RequestResponse|Fetcher|KafkaAdminClient|Message|KafkaProducer)Test.java"/>
|
||||
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="MockAdminClient.java"/>
|
||||
|
||||
<suppress checks="JavaNCSS"
|
||||
files="RequestResponseTest.java|FetcherTest.java"/>
|
||||
|
||||
<suppress checks="NPathComplexity"
|
||||
files="MemoryRecordsTest|MetricsTest"/>
|
||||
|
||||
<suppress checks="(WhitespaceAround|LocalVariableName|ImportControl|AvoidStarImport)"
|
||||
files="Murmur3Test.java"/>
|
||||
|
||||
<!-- Connect -->
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="DistributedHerder(|Test).java"/>
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="Worker.java"/>
|
||||
<suppress checks="MethodLength"
|
||||
files="(KafkaConfigBackingStore|IncrementalCooperativeAssignor|RequestResponseTest|WorkerSinkTaskTest).java"/>
|
||||
|
||||
<suppress checks="ParameterNumber"
|
||||
files="Worker(SinkTask|SourceTask|Coordinator).java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="ConfigKeyInfo.java"/>
|
||||
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="(RestServer|AbstractHerder|DistributedHerder).java"/>
|
||||
|
||||
<suppress checks="BooleanExpressionComplexity"
|
||||
files="JsonConverter.java"/>
|
||||
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="ConnectRecord.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="JsonConverter.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="FileStreamSourceTask.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="DistributedHerder.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="KafkaConfigBackingStore.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="(Values|ConnectHeader|ConnectHeaders).java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest.java"/>
|
||||
|
||||
<suppress checks="JavaNCSS"
|
||||
files="KafkaConfigBackingStore.java"/>
|
||||
<suppress checks="JavaNCSS"
|
||||
files="Values.java"/>
|
||||
|
||||
<suppress checks="NPathComplexity"
|
||||
files="(DistributedHerder|RestClient|JsonConverter|KafkaConfigBackingStore|FileStreamSourceTask).java"/>
|
||||
|
||||
<suppress checks="MethodLength"
|
||||
files="Values.java"/>
|
||||
|
||||
<!-- connect tests-->
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="(DistributedHerder|KafkaBasedLog)Test.java"/>
|
||||
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="(WorkerSinkTask|WorkerSourceTask)Test.java"/>
|
||||
|
||||
<!-- Streams -->
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="(TopologyBuilder|KafkaStreams|KStreamImpl|KTableImpl|StreamThread|StreamTask).java"/>
|
||||
|
||||
<suppress checks="MethodLength"
|
||||
files="(KTableImpl|StreamsPartitionAssignor.java)"/>
|
||||
|
||||
<suppress checks="ParameterNumber"
|
||||
files="StreamTask.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="RocksDBWindowStoreSupplier.java"/>
|
||||
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="(TopologyBuilder|KStreamImpl|StreamsPartitionAssignor|KafkaStreams|KTableImpl).java"/>
|
||||
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="TopologyBuilder.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="StreamsPartitionAssignor.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="StreamThread.java"/>
|
||||
|
||||
<suppress checks="JavaNCSS"
|
||||
files="StreamsPartitionAssignor.java"/>
|
||||
|
||||
<suppress checks="NPathComplexity"
|
||||
files="(AssignorConfiguration|InternalTopologyBuilder|KafkaStreams|ProcessorStateManager|StreamsPartitionAssignor|StreamThread|TaskManager).java"/>
|
||||
|
||||
<suppress checks="(FinalLocalVariable|UnnecessaryParentheses|BooleanExpressionComplexity|CyclomaticComplexity|WhitespaceAfter|LocalVariableName)"
|
||||
files="Murmur3.java"/>
|
||||
|
||||
<!-- suppress FinalLocalVariable outside of the streams package. -->
|
||||
<suppress checks="FinalLocalVariable"
|
||||
files="^(?!.*[\\/]org[\\/]apache[\\/]kafka[\\/]streams[\\/].*$)"/>
|
||||
|
||||
<!-- generated code -->
|
||||
<suppress checks="(NPathComplexity|ClassFanOutComplexity|CyclomaticComplexity|ClassDataAbstractionCoupling|FinalLocalVariable|LocalVariableName|MemberName|ParameterName|MethodLength|JavaNCSS)"
|
||||
files="streams[\\/]src[\\/](generated|generated-test)[\\/].+.java$"/>
|
||||
|
||||
|
||||
<!-- Streams tests -->
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="(StreamThreadTest|StreamTaskTest|ProcessorTopologyTestDriver).java"/>
|
||||
|
||||
<suppress checks="MethodLength"
|
||||
files="KStreamKTableJoinIntegrationTest.java"/>
|
||||
<suppress checks="MethodLength"
|
||||
files="KStreamKStreamJoinTest.java"/>
|
||||
<suppress checks="MethodLength"
|
||||
files="KStreamWindowAggregateTest.java"/>
|
||||
<suppress checks="MethodLength"
|
||||
files="RocksDBWindowStoreTest.java"/>
|
||||
|
||||
<suppress checks="MemberName"
|
||||
files="StreamsPartitionAssignorTest.java"/>
|
||||
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files=".*[/\\]streams[/\\].*test[/\\].*.java"/>
|
||||
|
||||
<suppress checks="BooleanExpressionComplexity"
|
||||
files="SmokeTestDriver.java"/>
|
||||
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="KStreamKStreamJoinTest.java|KTableKTableForeignKeyJoinIntegrationTest.java"/>
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="RelationalSmokeTest.java|SmokeTestDriver.java"/>
|
||||
|
||||
<suppress checks="JavaNCSS"
|
||||
files="KStreamKStreamJoinTest.java"/>
|
||||
<suppress checks="JavaNCSS"
|
||||
files="SmokeTestDriver.java"/>
|
||||
|
||||
<suppress checks="NPathComplexity"
|
||||
files="EosTestDriver|KStreamKStreamJoinTest.java|RelationalSmokeTest.java|SmokeTestDriver.java|KStreamKStreamLeftJoinTest.java|KTableKTableForeignKeyJoinIntegrationTest.java"/>
|
||||
|
||||
<suppress checks="(FinalLocalVariable|WhitespaceAround|LocalVariableName|ImportControl|AvoidStarImport)"
|
||||
files="Murmur3Test.java"/>
|
||||
|
||||
|
||||
<!-- Streams Test-Utils -->
|
||||
<suppress checks="ClassFanOutComplexity"
|
||||
files="TopologyTestDriver.java"/>
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="TopologyTestDriver.java"/>
|
||||
|
||||
<!-- Tools -->
|
||||
<suppress checks="ClassDataAbstractionCoupling"
|
||||
files="VerifiableConsumer.java"/>
|
||||
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="(StreamsResetter|ProducerPerformance|Agent).java"/>
|
||||
<suppress checks="BooleanExpressionComplexity"
|
||||
files="StreamsResetter.java"/>
|
||||
<suppress checks="NPathComplexity"
|
||||
files="(ProducerPerformance|StreamsResetter|Agent|TransactionalMessageCopier).java"/>
|
||||
<suppress checks="ImportControl"
|
||||
files="SignalLogger.java"/>
|
||||
<suppress checks="IllegalImport"
|
||||
files="SignalLogger.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="ProduceBenchSpec.java"/>
|
||||
<suppress checks="ParameterNumber"
|
||||
files="SustainedConnectionSpec.java"/>
|
||||
|
||||
<!-- Log4J-Appender -->
|
||||
<suppress checks="CyclomaticComplexity"
|
||||
files="KafkaLog4jAppender.java"/>
|
||||
|
||||
<suppress checks="NPathComplexity"
|
||||
files="KafkaLog4jAppender.java"/>
|
||||
<suppress checks="JavaNCSS"
|
||||
files="RequestResponseTest.java"/>
|
||||
|
||||
</suppressions>
|
||||
1
clients/.gitignore
vendored
Normal file
1
clients/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
/bin/
|
||||
@@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionsResponseKey;
|
||||
import org.apache.kafka.common.protocol.ApiKeys;
|
||||
|
||||
/**
|
||||
* Represents the min version and max version of an api key.
|
||||
*
|
||||
* NOTE: This class is intended for INTERNAL usage only within Kafka.
|
||||
*/
|
||||
public class ApiVersion {
|
||||
public final short apiKey;
|
||||
public final short minVersion;
|
||||
public final short maxVersion;
|
||||
|
||||
public ApiVersion(ApiKeys apiKey) {
|
||||
this(apiKey.id, apiKey.oldestVersion(), apiKey.latestVersion());
|
||||
}
|
||||
|
||||
public ApiVersion(short apiKey, short minVersion, short maxVersion) {
|
||||
this.apiKey = apiKey;
|
||||
this.minVersion = minVersion;
|
||||
this.maxVersion = maxVersion;
|
||||
}
|
||||
|
||||
public ApiVersion(ApiVersionsResponseKey apiVersionsResponseKey) {
|
||||
this.apiKey = apiVersionsResponseKey.apiKey();
|
||||
this.minVersion = apiVersionsResponseKey.minVersion();
|
||||
this.maxVersion = apiVersionsResponseKey.maxVersion();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ApiVersion(" +
|
||||
"apiKey=" + apiKey +
|
||||
", minVersion=" + minVersion +
|
||||
", maxVersion= " + maxVersion +
|
||||
")";
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import org.apache.kafka.common.protocol.ApiKeys;
|
||||
import org.apache.kafka.common.record.RecordBatch;
|
||||
import org.apache.kafka.common.requests.ProduceRequest;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Maintains node api versions for access outside of NetworkClient (which is where the information is derived).
|
||||
* The pattern is akin to the use of {@link Metadata} for topic metadata.
|
||||
*
|
||||
* NOTE: This class is intended for INTERNAL usage only within Kafka.
|
||||
*/
|
||||
public class ApiVersions {
|
||||
|
||||
private final Map<String, NodeApiVersions> nodeApiVersions = new HashMap<>();
|
||||
private byte maxUsableProduceMagic = RecordBatch.CURRENT_MAGIC_VALUE;
|
||||
|
||||
public synchronized void update(String nodeId, NodeApiVersions nodeApiVersions) {
|
||||
this.nodeApiVersions.put(nodeId, nodeApiVersions);
|
||||
this.maxUsableProduceMagic = computeMaxUsableProduceMagic();
|
||||
}
|
||||
|
||||
public synchronized void remove(String nodeId) {
|
||||
this.nodeApiVersions.remove(nodeId);
|
||||
this.maxUsableProduceMagic = computeMaxUsableProduceMagic();
|
||||
}
|
||||
|
||||
public synchronized NodeApiVersions get(String nodeId) {
|
||||
return this.nodeApiVersions.get(nodeId);
|
||||
}
|
||||
|
||||
private byte computeMaxUsableProduceMagic() {
|
||||
// use a magic version which is supported by all brokers to reduce the chance that
|
||||
// we will need to convert the messages when they are ready to be sent.
|
||||
byte maxUsableMagic = RecordBatch.CURRENT_MAGIC_VALUE;
|
||||
for (NodeApiVersions versions : this.nodeApiVersions.values()) {
|
||||
byte nodeMaxUsableMagic = ProduceRequest.requiredMagicForVersion(versions.latestUsableVersion(ApiKeys.PRODUCE));
|
||||
maxUsableMagic = (byte) Math.min(nodeMaxUsableMagic, maxUsableMagic);
|
||||
}
|
||||
return maxUsableMagic;
|
||||
}
|
||||
|
||||
public synchronized byte maxUsableProduceMagic() {
|
||||
return maxUsableProduceMagic;
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
public enum ClientDnsLookup {
|
||||
|
||||
DEFAULT("default"),
|
||||
USE_ALL_DNS_IPS("use_all_dns_ips"),
|
||||
RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY("resolve_canonical_bootstrap_servers_only");
|
||||
|
||||
private String clientDnsLookup;
|
||||
|
||||
ClientDnsLookup(String clientDnsLookup) {
|
||||
this.clientDnsLookup = clientDnsLookup;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return clientDnsLookup;
|
||||
}
|
||||
|
||||
public static ClientDnsLookup forConfig(String config) {
|
||||
return ClientDnsLookup.valueOf(config.toUpperCase(Locale.ROOT));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import org.apache.kafka.common.message.RequestHeaderData;
|
||||
import org.apache.kafka.common.protocol.ApiKeys;
|
||||
import org.apache.kafka.common.requests.AbstractRequest;
|
||||
import org.apache.kafka.common.requests.RequestHeader;
|
||||
|
||||
/**
|
||||
* A request being sent to the server. This holds both the network send as well as the client-level metadata.
|
||||
*/
|
||||
public final class ClientRequest {
|
||||
|
||||
private final String destination;
|
||||
private final AbstractRequest.Builder<?> requestBuilder;
|
||||
private final int correlationId;
|
||||
private final String clientId;
|
||||
private final long createdTimeMs;
|
||||
private final boolean expectResponse;
|
||||
private final int requestTimeoutMs;
|
||||
private final RequestCompletionHandler callback;
|
||||
|
||||
/**
|
||||
* @param destination The brokerId to send the request to
|
||||
* @param requestBuilder The builder for the request to make
|
||||
* @param correlationId The correlation id for this client request
|
||||
* @param clientId The client ID to use for the header
|
||||
* @param createdTimeMs The unix timestamp in milliseconds for the time at which this request was created.
|
||||
* @param expectResponse Should we expect a response message or is this request complete once it is sent?
|
||||
* @param callback A callback to execute when the response has been received (or null if no callback is necessary)
|
||||
*/
|
||||
public ClientRequest(String destination,
|
||||
AbstractRequest.Builder<?> requestBuilder,
|
||||
int correlationId,
|
||||
String clientId,
|
||||
long createdTimeMs,
|
||||
boolean expectResponse,
|
||||
int requestTimeoutMs,
|
||||
RequestCompletionHandler callback) {
|
||||
this.destination = destination;
|
||||
this.requestBuilder = requestBuilder;
|
||||
this.correlationId = correlationId;
|
||||
this.clientId = clientId;
|
||||
this.createdTimeMs = createdTimeMs;
|
||||
this.expectResponse = expectResponse;
|
||||
this.requestTimeoutMs = requestTimeoutMs;
|
||||
this.callback = callback;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ClientRequest(expectResponse=" + expectResponse +
|
||||
", callback=" + callback +
|
||||
", destination=" + destination +
|
||||
", correlationId=" + correlationId +
|
||||
", clientId=" + clientId +
|
||||
", createdTimeMs=" + createdTimeMs +
|
||||
", requestBuilder=" + requestBuilder +
|
||||
")";
|
||||
}
|
||||
|
||||
public boolean expectResponse() {
|
||||
return expectResponse;
|
||||
}
|
||||
|
||||
public ApiKeys apiKey() {
|
||||
return requestBuilder.apiKey();
|
||||
}
|
||||
|
||||
public RequestHeader makeHeader(short version) {
|
||||
short requestApiKey = requestBuilder.apiKey().id;
|
||||
return new RequestHeader(
|
||||
new RequestHeaderData().
|
||||
setRequestApiKey(requestApiKey).
|
||||
setRequestApiVersion(version).
|
||||
setClientId(clientId).
|
||||
setCorrelationId(correlationId),
|
||||
ApiKeys.forId(requestApiKey).requestHeaderVersion(version));
|
||||
}
|
||||
|
||||
public AbstractRequest.Builder<?> requestBuilder() {
|
||||
return requestBuilder;
|
||||
}
|
||||
|
||||
public String destination() {
|
||||
return destination;
|
||||
}
|
||||
|
||||
public RequestCompletionHandler callback() {
|
||||
return callback;
|
||||
}
|
||||
|
||||
public long createdTimeMs() {
|
||||
return createdTimeMs;
|
||||
}
|
||||
|
||||
public int correlationId() {
|
||||
return correlationId;
|
||||
}
|
||||
|
||||
public int requestTimeoutMs() {
|
||||
return requestTimeoutMs;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,126 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import org.apache.kafka.common.errors.AuthenticationException;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.requests.AbstractResponse;
|
||||
import org.apache.kafka.common.requests.RequestHeader;
|
||||
|
||||
/**
|
||||
* A response from the server. Contains both the body of the response as well as the correlated request
|
||||
* metadata that was originally sent.
|
||||
*/
|
||||
public class ClientResponse {
|
||||
|
||||
private final RequestHeader requestHeader;
|
||||
private final RequestCompletionHandler callback;
|
||||
private final String destination;
|
||||
private final long receivedTimeMs;
|
||||
private final long latencyMs;
|
||||
private final boolean disconnected;
|
||||
private final UnsupportedVersionException versionMismatch;
|
||||
private final AuthenticationException authenticationException;
|
||||
private final AbstractResponse responseBody;
|
||||
|
||||
/**
|
||||
* @param requestHeader The header of the corresponding request
|
||||
* @param callback The callback to be invoked
|
||||
* @param createdTimeMs The unix timestamp when the corresponding request was created
|
||||
* @param destination The node the corresponding request was sent to
|
||||
* @param receivedTimeMs The unix timestamp when this response was received
|
||||
* @param disconnected Whether the client disconnected before fully reading a response
|
||||
* @param versionMismatch Whether there was a version mismatch that prevented sending the request.
|
||||
* @param responseBody The response contents (or null) if we disconnected, no response was expected,
|
||||
* or if there was a version mismatch.
|
||||
*/
|
||||
public ClientResponse(RequestHeader requestHeader,
|
||||
RequestCompletionHandler callback,
|
||||
String destination,
|
||||
long createdTimeMs,
|
||||
long receivedTimeMs,
|
||||
boolean disconnected,
|
||||
UnsupportedVersionException versionMismatch,
|
||||
AuthenticationException authenticationException,
|
||||
AbstractResponse responseBody) {
|
||||
this.requestHeader = requestHeader;
|
||||
this.callback = callback;
|
||||
this.destination = destination;
|
||||
this.receivedTimeMs = receivedTimeMs;
|
||||
this.latencyMs = receivedTimeMs - createdTimeMs;
|
||||
this.disconnected = disconnected;
|
||||
this.versionMismatch = versionMismatch;
|
||||
this.authenticationException = authenticationException;
|
||||
this.responseBody = responseBody;
|
||||
}
|
||||
|
||||
public long receivedTimeMs() {
|
||||
return receivedTimeMs;
|
||||
}
|
||||
|
||||
public boolean wasDisconnected() {
|
||||
return disconnected;
|
||||
}
|
||||
|
||||
public UnsupportedVersionException versionMismatch() {
|
||||
return versionMismatch;
|
||||
}
|
||||
|
||||
public AuthenticationException authenticationException() {
|
||||
return authenticationException;
|
||||
}
|
||||
|
||||
public RequestHeader requestHeader() {
|
||||
return requestHeader;
|
||||
}
|
||||
|
||||
public String destination() {
|
||||
return destination;
|
||||
}
|
||||
|
||||
public AbstractResponse responseBody() {
|
||||
return responseBody;
|
||||
}
|
||||
|
||||
public boolean hasResponse() {
|
||||
return responseBody != null;
|
||||
}
|
||||
|
||||
public long requestLatencyMs() {
|
||||
return latencyMs;
|
||||
}
|
||||
|
||||
public void onComplete() {
|
||||
if (callback != null)
|
||||
callback.onComplete(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ClientResponse(receivedTimeMs=" + receivedTimeMs +
|
||||
", latencyMs=" +
|
||||
latencyMs +
|
||||
", disconnected=" +
|
||||
disconnected +
|
||||
", requestHeader=" +
|
||||
requestHeader +
|
||||
", responseBody=" +
|
||||
responseBody +
|
||||
")";
|
||||
}
|
||||
|
||||
}
|
||||
131
clients/src/main/java/org/apache/kafka/clients/ClientUtils.java
Normal file
131
clients/src/main/java/org/apache/kafka/clients/ClientUtils.java
Normal file
@@ -0,0 +1,131 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import org.apache.kafka.common.config.AbstractConfig;
|
||||
import org.apache.kafka.common.config.ConfigException;
|
||||
import org.apache.kafka.common.config.SaslConfigs;
|
||||
import org.apache.kafka.common.network.ChannelBuilder;
|
||||
import org.apache.kafka.common.network.ChannelBuilders;
|
||||
import org.apache.kafka.common.security.JaasContext;
|
||||
import org.apache.kafka.common.security.auth.SecurityProtocol;
|
||||
import org.apache.kafka.common.utils.LogContext;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.apache.kafka.common.utils.Utils.getHost;
|
||||
import static org.apache.kafka.common.utils.Utils.getPort;
|
||||
|
||||
public final class ClientUtils {
|
||||
private static final Logger log = LoggerFactory.getLogger(ClientUtils.class);
|
||||
|
||||
private ClientUtils() {
|
||||
}
|
||||
|
||||
public static List<InetSocketAddress> parseAndValidateAddresses(List<String> urls, String clientDnsLookupConfig) {
|
||||
return parseAndValidateAddresses(urls, ClientDnsLookup.forConfig(clientDnsLookupConfig));
|
||||
}
|
||||
|
||||
public static List<InetSocketAddress> parseAndValidateAddresses(List<String> urls, ClientDnsLookup clientDnsLookup) {
|
||||
List<InetSocketAddress> addresses = new ArrayList<>();
|
||||
for (String url : urls) {
|
||||
if (url != null && !url.isEmpty()) {
|
||||
try {
|
||||
String host = getHost(url);
|
||||
Integer port = getPort(url);
|
||||
if (host == null || port == null)
|
||||
throw new ConfigException("Invalid url in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url);
|
||||
|
||||
if (clientDnsLookup == ClientDnsLookup.RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY) {
|
||||
InetAddress[] inetAddresses = InetAddress.getAllByName(host);
|
||||
for (InetAddress inetAddress : inetAddresses) {
|
||||
String resolvedCanonicalName = inetAddress.getCanonicalHostName();
|
||||
InetSocketAddress address = new InetSocketAddress(resolvedCanonicalName, port);
|
||||
if (address.isUnresolved()) {
|
||||
log.warn("Couldn't resolve server {} from {} as DNS resolution of the canonical hostname {} failed for {}", url, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, resolvedCanonicalName, host);
|
||||
} else {
|
||||
addresses.add(address);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
InetSocketAddress address = new InetSocketAddress(host, port);
|
||||
if (address.isUnresolved()) {
|
||||
log.warn("Couldn't resolve server {} from {} as DNS resolution failed for {}", url, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, host);
|
||||
} else {
|
||||
addresses.add(address);
|
||||
}
|
||||
}
|
||||
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new ConfigException("Invalid port in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url);
|
||||
} catch (UnknownHostException e) {
|
||||
throw new ConfigException("Unknown host in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (addresses.isEmpty())
|
||||
throw new ConfigException("No resolvable bootstrap urls given in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG);
|
||||
return addresses;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new channel builder from the provided configuration.
|
||||
*
|
||||
* @param config client configs
|
||||
* @param time the time implementation
|
||||
* @param logContext the logging context
|
||||
*
|
||||
* @return configured ChannelBuilder based on the configs.
|
||||
*/
|
||||
public static ChannelBuilder createChannelBuilder(AbstractConfig config, Time time, LogContext logContext) {
|
||||
SecurityProtocol securityProtocol = SecurityProtocol.forName(config.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG));
|
||||
String clientSaslMechanism = config.getString(SaslConfigs.SASL_MECHANISM);
|
||||
return ChannelBuilders.clientChannelBuilder(securityProtocol, JaasContext.Type.CLIENT, config, null,
|
||||
clientSaslMechanism, time, true, logContext);
|
||||
}
|
||||
|
||||
static List<InetAddress> resolve(String host, ClientDnsLookup clientDnsLookup) throws UnknownHostException {
|
||||
InetAddress[] addresses = InetAddress.getAllByName(host);
|
||||
if (ClientDnsLookup.USE_ALL_DNS_IPS == clientDnsLookup) {
|
||||
return filterPreferredAddresses(addresses);
|
||||
} else {
|
||||
return Collections.singletonList(addresses[0]);
|
||||
}
|
||||
}
|
||||
|
||||
static List<InetAddress> filterPreferredAddresses(InetAddress[] allAddresses) {
|
||||
List<InetAddress> preferredAddresses = new ArrayList<>();
|
||||
Class<? extends InetAddress> clazz = null;
|
||||
for (InetAddress address : allAddresses) {
|
||||
if (clazz == null) {
|
||||
clazz = address.getClass();
|
||||
}
|
||||
if (clazz.isInstance(address)) {
|
||||
preferredAddresses.add(address);
|
||||
}
|
||||
}
|
||||
return preferredAddresses;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,427 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.apache.kafka.common.errors.AuthenticationException;
|
||||
import org.apache.kafka.common.utils.LogContext;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* The state of our connection to each node in the cluster.
|
||||
*
|
||||
*/
|
||||
final class ClusterConnectionStates {
|
||||
private final long reconnectBackoffInitMs;
|
||||
private final long reconnectBackoffMaxMs;
|
||||
private final static int RECONNECT_BACKOFF_EXP_BASE = 2;
|
||||
private final double reconnectBackoffMaxExp;
|
||||
private final Map<String, NodeConnectionState> nodeState;
|
||||
private final Logger log;
|
||||
|
||||
public ClusterConnectionStates(long reconnectBackoffMs, long reconnectBackoffMaxMs, LogContext logContext) {
|
||||
this.log = logContext.logger(ClusterConnectionStates.class);
|
||||
this.reconnectBackoffInitMs = reconnectBackoffMs;
|
||||
this.reconnectBackoffMaxMs = reconnectBackoffMaxMs;
|
||||
this.reconnectBackoffMaxExp = Math.log(this.reconnectBackoffMaxMs / (double) Math.max(reconnectBackoffMs, 1)) / Math.log(RECONNECT_BACKOFF_EXP_BASE);
|
||||
this.nodeState = new HashMap<>();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true iff we can currently initiate a new connection. This will be the case if we are not
|
||||
* connected and haven't been connected for at least the minimum reconnection backoff period.
|
||||
* @param id the connection id to check
|
||||
* @param now the current time in ms
|
||||
* @return true if we can initiate a new connection
|
||||
*/
|
||||
public boolean canConnect(String id, long now) {
|
||||
NodeConnectionState state = nodeState.get(id);
|
||||
if (state == null)
|
||||
return true;
|
||||
else
|
||||
return state.state.isDisconnected() &&
|
||||
now - state.lastConnectAttemptMs >= state.reconnectBackoffMs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if we are disconnected from the given node and can't re-establish a connection yet.
|
||||
* @param id the connection to check
|
||||
* @param now the current time in ms
|
||||
*/
|
||||
public boolean isBlackedOut(String id, long now) {
|
||||
NodeConnectionState state = nodeState.get(id);
|
||||
return state != null
|
||||
&& state.state.isDisconnected()
|
||||
&& now - state.lastConnectAttemptMs < state.reconnectBackoffMs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of milliseconds to wait, based on the connection state, before attempting to send data. When
|
||||
* disconnected, this respects the reconnect backoff time. When connecting or connected, this handles slow/stalled
|
||||
* connections.
|
||||
* @param id the connection to check
|
||||
* @param now the current time in ms
|
||||
*/
|
||||
public long connectionDelay(String id, long now) {
|
||||
NodeConnectionState state = nodeState.get(id);
|
||||
if (state == null) return 0;
|
||||
if (state.state.isDisconnected()) {
|
||||
long timeWaited = now - state.lastConnectAttemptMs;
|
||||
return Math.max(state.reconnectBackoffMs - timeWaited, 0);
|
||||
} else {
|
||||
// When connecting or connected, we should be able to delay indefinitely since other events (connection or
|
||||
// data acked) will cause a wakeup once data can be sent.
|
||||
return Long.MAX_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if a specific connection establishment is currently underway
|
||||
* @param id The id of the node to check
|
||||
*/
|
||||
public boolean isConnecting(String id) {
|
||||
NodeConnectionState state = nodeState.get(id);
|
||||
return state != null && state.state == ConnectionState.CONNECTING;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether a connection is either being established or awaiting API version information.
|
||||
* @param id The id of the node to check
|
||||
* @return true if the node is either connecting or has connected and is awaiting API versions, false otherwise
|
||||
*/
|
||||
public boolean isPreparingConnection(String id) {
|
||||
NodeConnectionState state = nodeState.get(id);
|
||||
return state != null &&
|
||||
(state.state == ConnectionState.CONNECTING || state.state == ConnectionState.CHECKING_API_VERSIONS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Enter the connecting state for the given connection, moving to a new resolved address if necessary.
|
||||
* @param id the id of the connection
|
||||
* @param now the current time in ms
|
||||
* @param host the host of the connection, to be resolved internally if needed
|
||||
* @param clientDnsLookup the mode of DNS lookup to use when resolving the {@code host}
|
||||
*/
|
||||
public void connecting(String id, long now, String host, ClientDnsLookup clientDnsLookup) {
|
||||
NodeConnectionState connectionState = nodeState.get(id);
|
||||
if (connectionState != null && connectionState.host().equals(host)) {
|
||||
connectionState.lastConnectAttemptMs = now;
|
||||
connectionState.state = ConnectionState.CONNECTING;
|
||||
// Move to next resolved address, or if addresses are exhausted, mark node to be re-resolved
|
||||
connectionState.moveToNextAddress();
|
||||
return;
|
||||
} else if (connectionState != null) {
|
||||
log.info("Hostname for node {} changed from {} to {}.", id, connectionState.host(), host);
|
||||
}
|
||||
|
||||
// Create a new NodeConnectionState if nodeState does not already contain one
|
||||
// for the specified id or if the hostname associated with the node id changed.
|
||||
nodeState.put(id, new NodeConnectionState(ConnectionState.CONNECTING, now,
|
||||
this.reconnectBackoffInitMs, host, clientDnsLookup));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a resolved address for the given connection, resolving it if necessary.
|
||||
* @param id the id of the connection
|
||||
* @throws UnknownHostException if the address was not resolvable
|
||||
*/
|
||||
public InetAddress currentAddress(String id) throws UnknownHostException {
|
||||
return nodeState(id).currentAddress();
|
||||
}
|
||||
|
||||
/**
|
||||
* Enter the disconnected state for the given node.
|
||||
* @param id the connection we have disconnected
|
||||
* @param now the current time in ms
|
||||
*/
|
||||
public void disconnected(String id, long now) {
|
||||
NodeConnectionState nodeState = nodeState(id);
|
||||
nodeState.state = ConnectionState.DISCONNECTED;
|
||||
nodeState.lastConnectAttemptMs = now;
|
||||
updateReconnectBackoff(nodeState);
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicate that the connection is throttled until the specified deadline.
|
||||
* @param id the connection to be throttled
|
||||
* @param throttleUntilTimeMs the throttle deadline in milliseconds
|
||||
*/
|
||||
public void throttle(String id, long throttleUntilTimeMs) {
|
||||
NodeConnectionState state = nodeState.get(id);
|
||||
// The throttle deadline should never regress.
|
||||
if (state != null && state.throttleUntilTimeMs < throttleUntilTimeMs) {
|
||||
state.throttleUntilTimeMs = throttleUntilTimeMs;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the remaining throttling delay in milliseconds if throttling is in progress. Return 0, otherwise.
|
||||
* @param id the connection to check
|
||||
* @param now the current time in ms
|
||||
*/
|
||||
public long throttleDelayMs(String id, long now) {
|
||||
NodeConnectionState state = nodeState.get(id);
|
||||
if (state != null && state.throttleUntilTimeMs > now) {
|
||||
return state.throttleUntilTimeMs - now;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the number of milliseconds to wait, based on the connection state and the throttle time, before
|
||||
* attempting to send data. If the connection has been established but being throttled, return throttle delay.
|
||||
* Otherwise, return connection delay.
|
||||
* @param id the connection to check
|
||||
* @param now the current time in ms
|
||||
*/
|
||||
public long pollDelayMs(String id, long now) {
|
||||
long throttleDelayMs = throttleDelayMs(id, now);
|
||||
if (isConnected(id) && throttleDelayMs > 0) {
|
||||
return throttleDelayMs;
|
||||
} else {
|
||||
return connectionDelay(id, now);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Enter the checking_api_versions state for the given node.
|
||||
* @param id the connection identifier
|
||||
*/
|
||||
public void checkingApiVersions(String id) {
|
||||
NodeConnectionState nodeState = nodeState(id);
|
||||
nodeState.state = ConnectionState.CHECKING_API_VERSIONS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enter the ready state for the given node.
|
||||
* @param id the connection identifier
|
||||
*/
|
||||
public void ready(String id) {
|
||||
NodeConnectionState nodeState = nodeState(id);
|
||||
nodeState.state = ConnectionState.READY;
|
||||
nodeState.authenticationException = null;
|
||||
resetReconnectBackoff(nodeState);
|
||||
}
|
||||
|
||||
/**
|
||||
* Enter the authentication failed state for the given node.
|
||||
* @param id the connection identifier
|
||||
* @param now the current time in ms
|
||||
* @param exception the authentication exception
|
||||
*/
|
||||
public void authenticationFailed(String id, long now, AuthenticationException exception) {
|
||||
NodeConnectionState nodeState = nodeState(id);
|
||||
nodeState.authenticationException = exception;
|
||||
nodeState.state = ConnectionState.AUTHENTICATION_FAILED;
|
||||
nodeState.lastConnectAttemptMs = now;
|
||||
updateReconnectBackoff(nodeState);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if the connection is in the READY state and currently not throttled.
|
||||
*
|
||||
* @param id the connection identifier
|
||||
* @param now the current time in ms
|
||||
*/
|
||||
public boolean isReady(String id, long now) {
|
||||
return isReady(nodeState.get(id), now);
|
||||
}
|
||||
|
||||
private boolean isReady(NodeConnectionState state, long now) {
|
||||
return state != null && state.state == ConnectionState.READY && state.throttleUntilTimeMs <= now;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if there is at least one node with connection in the READY state and not throttled. Returns false
|
||||
* otherwise.
|
||||
*
|
||||
* @param now the current time in ms
|
||||
*/
|
||||
public boolean hasReadyNodes(long now) {
|
||||
for (Map.Entry<String, NodeConnectionState> entry : nodeState.entrySet()) {
|
||||
if (isReady(entry.getValue(), now)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if the connection has been established
|
||||
* @param id The id of the node to check
|
||||
*/
|
||||
public boolean isConnected(String id) {
|
||||
NodeConnectionState state = nodeState.get(id);
|
||||
return state != null && state.state.isConnected();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if the connection has been disconnected
|
||||
* @param id The id of the node to check
|
||||
*/
|
||||
public boolean isDisconnected(String id) {
|
||||
NodeConnectionState state = nodeState.get(id);
|
||||
return state != null && state.state.isDisconnected();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return authentication exception if an authentication error occurred
|
||||
* @param id The id of the node to check
|
||||
*/
|
||||
public AuthenticationException authenticationException(String id) {
|
||||
NodeConnectionState state = nodeState.get(id);
|
||||
return state != null ? state.authenticationException : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets the failure count for a node and sets the reconnect backoff to the base
|
||||
* value configured via reconnect.backoff.ms
|
||||
*
|
||||
* @param nodeState The node state object to update
|
||||
*/
|
||||
private void resetReconnectBackoff(NodeConnectionState nodeState) {
|
||||
nodeState.failedAttempts = 0;
|
||||
nodeState.reconnectBackoffMs = this.reconnectBackoffInitMs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the node reconnect backoff exponentially.
|
||||
* The delay is reconnect.backoff.ms * 2**(failures - 1) * (+/- 20% random jitter)
|
||||
* Up to a (pre-jitter) maximum of reconnect.backoff.max.ms
|
||||
*
|
||||
* @param nodeState The node state object to update
|
||||
*/
|
||||
private void updateReconnectBackoff(NodeConnectionState nodeState) {
|
||||
if (this.reconnectBackoffMaxMs > this.reconnectBackoffInitMs) {
|
||||
nodeState.failedAttempts += 1;
|
||||
double backoffExp = Math.min(nodeState.failedAttempts - 1, this.reconnectBackoffMaxExp);
|
||||
double backoffFactor = Math.pow(RECONNECT_BACKOFF_EXP_BASE, backoffExp);
|
||||
long reconnectBackoffMs = (long) (this.reconnectBackoffInitMs * backoffFactor);
|
||||
// Actual backoff is randomized to avoid connection storms.
|
||||
double randomFactor = ThreadLocalRandom.current().nextDouble(0.8, 1.2);
|
||||
nodeState.reconnectBackoffMs = (long) (randomFactor * reconnectBackoffMs);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the given node from the tracked connection states. The main difference between this and `disconnected`
|
||||
* is the impact on `connectionDelay`: it will be 0 after this call whereas `reconnectBackoffMs` will be taken
|
||||
* into account after `disconnected` is called.
|
||||
*
|
||||
* @param id the connection to remove
|
||||
*/
|
||||
public void remove(String id) {
|
||||
nodeState.remove(id);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the state of a given connection.
|
||||
* @param id the id of the connection
|
||||
* @return the state of our connection
|
||||
*/
|
||||
public ConnectionState connectionState(String id) {
|
||||
return nodeState(id).state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the state of a given node.
|
||||
* @param id the connection to fetch the state for
|
||||
*/
|
||||
private NodeConnectionState nodeState(String id) {
|
||||
NodeConnectionState state = this.nodeState.get(id);
|
||||
if (state == null)
|
||||
throw new IllegalStateException("No entry found for connection " + id);
|
||||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* The state of our connection to a node.
|
||||
*/
|
||||
private static class NodeConnectionState {
|
||||
|
||||
ConnectionState state;
|
||||
AuthenticationException authenticationException;
|
||||
long lastConnectAttemptMs;
|
||||
long failedAttempts;
|
||||
long reconnectBackoffMs;
|
||||
// Connection is being throttled if current time < throttleUntilTimeMs.
|
||||
long throttleUntilTimeMs;
|
||||
private List<InetAddress> addresses;
|
||||
private int addressIndex;
|
||||
private final String host;
|
||||
private final ClientDnsLookup clientDnsLookup;
|
||||
|
||||
private NodeConnectionState(ConnectionState state, long lastConnectAttempt, long reconnectBackoffMs,
|
||||
String host, ClientDnsLookup clientDnsLookup) {
|
||||
this.state = state;
|
||||
this.addresses = Collections.emptyList();
|
||||
this.addressIndex = -1;
|
||||
this.authenticationException = null;
|
||||
this.lastConnectAttemptMs = lastConnectAttempt;
|
||||
this.failedAttempts = 0;
|
||||
this.reconnectBackoffMs = reconnectBackoffMs;
|
||||
this.throttleUntilTimeMs = 0;
|
||||
this.host = host;
|
||||
this.clientDnsLookup = clientDnsLookup;
|
||||
}
|
||||
|
||||
public String host() {
|
||||
return host;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetches the current selected IP address for this node, resolving {@link #host()} if necessary.
|
||||
* @return the selected address
|
||||
* @throws UnknownHostException if resolving {@link #host()} fails
|
||||
*/
|
||||
private InetAddress currentAddress() throws UnknownHostException {
|
||||
if (addresses.isEmpty()) {
|
||||
// (Re-)initialize list
|
||||
addresses = ClientUtils.resolve(host, clientDnsLookup);
|
||||
addressIndex = 0;
|
||||
}
|
||||
|
||||
return addresses.get(addressIndex);
|
||||
}
|
||||
|
||||
/**
|
||||
* Jumps to the next available resolved address for this node. If no other addresses are available, marks the
|
||||
* list to be refreshed on the next {@link #currentAddress()} call.
|
||||
*/
|
||||
private void moveToNextAddress() {
|
||||
if (addresses.isEmpty())
|
||||
return; // Avoid div0. List will initialize on next currentAddress() call
|
||||
|
||||
addressIndex = (addressIndex + 1) % addresses.size();
|
||||
if (addressIndex == 0)
|
||||
addresses = Collections.emptyList(); // Exhausted list. Re-resolve on next currentAddress() call
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return "NodeState(" + state + ", " + lastConnectAttemptMs + ", " + failedAttempts + ", " + throttleUntilTimeMs + ")";
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,168 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import org.apache.kafka.common.config.AbstractConfig;
|
||||
import org.apache.kafka.common.security.auth.SecurityProtocol;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Configurations shared by Kafka client applications: producer, consumer, connect, etc.
|
||||
*/
|
||||
public class CommonClientConfigs {
|
||||
private static final Logger log = LoggerFactory.getLogger(CommonClientConfigs.class);
|
||||
|
||||
/*
|
||||
* NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE.
|
||||
*/
|
||||
|
||||
public static final String BOOTSTRAP_SERVERS_CONFIG = "bootstrap.servers";
|
||||
public static final String BOOTSTRAP_SERVERS_DOC = "A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form "
|
||||
+ "<code>host1:port1,host2:port2,...</code>. Since these servers are just used for the initial connection to "
|
||||
+ "discover the full cluster membership (which may change dynamically), this list need not contain the full set of "
|
||||
+ "servers (you may want more than one, though, in case a server is down).";
|
||||
|
||||
public static final String CLIENT_DNS_LOOKUP_CONFIG = "client.dns.lookup";
|
||||
public static final String CLIENT_DNS_LOOKUP_DOC = "Controls how the client uses DNS lookups. If set to <code>use_all_dns_ips</code> then, when the lookup returns multiple IP addresses for a hostname,"
|
||||
+ " they will all be attempted to connect to before failing the connection. Applies to both bootstrap and advertised servers."
|
||||
+ " If the value is <code>resolve_canonical_bootstrap_servers_only</code> each entry will be resolved and expanded into a list of canonical names.";
|
||||
|
||||
public static final String METADATA_MAX_AGE_CONFIG = "metadata.max.age.ms";
|
||||
public static final String METADATA_MAX_AGE_DOC = "The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.";
|
||||
|
||||
public static final String SEND_BUFFER_CONFIG = "send.buffer.bytes";
|
||||
public static final String SEND_BUFFER_DOC = "The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.";
|
||||
public static final int SEND_BUFFER_LOWER_BOUND = -1;
|
||||
|
||||
public static final String RECEIVE_BUFFER_CONFIG = "receive.buffer.bytes";
|
||||
public static final String RECEIVE_BUFFER_DOC = "The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.";
|
||||
public static final int RECEIVE_BUFFER_LOWER_BOUND = -1;
|
||||
|
||||
public static final String CLIENT_ID_CONFIG = "client.id";
|
||||
public static final String CLIENT_ID_DOC = "An id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.";
|
||||
|
||||
public static final String CLIENT_RACK_CONFIG = "client.rack";
|
||||
public static final String CLIENT_RACK_DOC = "A rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config 'broker.rack'";
|
||||
|
||||
public static final String RECONNECT_BACKOFF_MS_CONFIG = "reconnect.backoff.ms";
|
||||
public static final String RECONNECT_BACKOFF_MS_DOC = "The base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker.";
|
||||
|
||||
public static final String RECONNECT_BACKOFF_MAX_MS_CONFIG = "reconnect.backoff.max.ms";
|
||||
public static final String RECONNECT_BACKOFF_MAX_MS_DOC = "The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.";
|
||||
|
||||
public static final String RETRIES_CONFIG = "retries";
|
||||
public static final String RETRIES_DOC = "Setting a value greater than zero will cause the client to resend any request that fails with a potentially transient error.";
|
||||
|
||||
public static final String RETRY_BACKOFF_MS_CONFIG = "retry.backoff.ms";
|
||||
public static final String RETRY_BACKOFF_MS_DOC = "The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.";
|
||||
|
||||
public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = "metrics.sample.window.ms";
|
||||
public static final String METRICS_SAMPLE_WINDOW_MS_DOC = "The window of time a metrics sample is computed over.";
|
||||
|
||||
public static final String METRICS_NUM_SAMPLES_CONFIG = "metrics.num.samples";
|
||||
public static final String METRICS_NUM_SAMPLES_DOC = "The number of samples maintained to compute metrics.";
|
||||
|
||||
public static final String METRICS_RECORDING_LEVEL_CONFIG = "metrics.recording.level";
|
||||
public static final String METRICS_RECORDING_LEVEL_DOC = "The highest recording level for metrics.";
|
||||
|
||||
public static final String METRIC_REPORTER_CLASSES_CONFIG = "metric.reporters";
|
||||
public static final String METRIC_REPORTER_CLASSES_DOC = "A list of classes to use as metrics reporters. Implementing the <code>org.apache.kafka.common.metrics.MetricsReporter</code> interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.";
|
||||
|
||||
public static final String SECURITY_PROTOCOL_CONFIG = "security.protocol";
|
||||
public static final String SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. Valid values are: " +
|
||||
Utils.join(SecurityProtocol.names(), ", ") + ".";
|
||||
public static final String DEFAULT_SECURITY_PROTOCOL = "PLAINTEXT";
|
||||
|
||||
public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG = "connections.max.idle.ms";
|
||||
public static final String CONNECTIONS_MAX_IDLE_MS_DOC = "Close idle connections after the number of milliseconds specified by this config.";
|
||||
|
||||
public static final String REQUEST_TIMEOUT_MS_CONFIG = "request.timeout.ms";
|
||||
public static final String REQUEST_TIMEOUT_MS_DOC = "The configuration controls the maximum amount of time the client will wait "
|
||||
+ "for the response of a request. If the response is not received before the timeout "
|
||||
+ "elapses the client will resend the request if necessary or fail the request if "
|
||||
+ "retries are exhausted.";
|
||||
|
||||
public static final String GROUP_ID_CONFIG = "group.id";
|
||||
public static final String GROUP_ID_DOC = "A unique string that identifies the consumer group this consumer belongs to. This property is required if the consumer uses either the group management functionality by using <code>subscribe(topic)</code> or the Kafka-based offset management strategy.";
|
||||
|
||||
public static final String GROUP_INSTANCE_ID_CONFIG = "group.instance.id";
|
||||
public static final String GROUP_INSTANCE_ID_DOC = "A unique identifier of the consumer instance provided by the end user. "
|
||||
+ "Only non-empty strings are permitted. If set, the consumer is treated as a static member, "
|
||||
+ "which means that only one instance with this ID is allowed in the consumer group at any time. "
|
||||
+ "This can be used in combination with a larger session timeout to avoid group rebalances caused by transient unavailability "
|
||||
+ "(e.g. process restarts). If not set, the consumer will join the group as a dynamic member, which is the traditional behavior.";
|
||||
|
||||
public static final String MAX_POLL_INTERVAL_MS_CONFIG = "max.poll.interval.ms";
|
||||
public static final String MAX_POLL_INTERVAL_MS_DOC = "The maximum delay between invocations of poll() when using "
|
||||
+ "consumer group management. This places an upper bound on the amount of time that the consumer can be idle "
|
||||
+ "before fetching more records. If poll() is not called before expiration of this timeout, then the consumer "
|
||||
+ "is considered failed and the group will rebalance in order to reassign the partitions to another member. "
|
||||
+ "For consumers using a non-null <code>group.instance.id</code> which reach this timeout, partitions will not be immediately reassigned. "
|
||||
+ "Instead, the consumer will stop sending heartbeats and partitions will be reassigned "
|
||||
+ "after expiration of <code>session.timeout.ms</code>. This mirrors the behavior of a static consumer which has shutdown.";
|
||||
|
||||
public static final String REBALANCE_TIMEOUT_MS_CONFIG = "rebalance.timeout.ms";
|
||||
public static final String REBALANCE_TIMEOUT_MS_DOC = "The maximum allowed time for each worker to join the group "
|
||||
+ "once a rebalance has begun. This is basically a limit on the amount of time needed for all tasks to "
|
||||
+ "flush any pending data and commit offsets. If the timeout is exceeded, then the worker will be removed "
|
||||
+ "from the group, which will cause offset commit failures.";
|
||||
|
||||
public static final String SESSION_TIMEOUT_MS_CONFIG = "session.timeout.ms";
|
||||
public static final String SESSION_TIMEOUT_MS_DOC = "The timeout used to detect client failures when using "
|
||||
+ "Kafka's group management facility. The client sends periodic heartbeats to indicate its liveness "
|
||||
+ "to the broker. If no heartbeats are received by the broker before the expiration of this session timeout, "
|
||||
+ "then the broker will remove this client from the group and initiate a rebalance. Note that the value "
|
||||
+ "must be in the allowable range as configured in the broker configuration by <code>group.min.session.timeout.ms</code> "
|
||||
+ "and <code>group.max.session.timeout.ms</code>.";
|
||||
|
||||
public static final String HEARTBEAT_INTERVAL_MS_CONFIG = "heartbeat.interval.ms";
|
||||
public static final String HEARTBEAT_INTERVAL_MS_DOC = "The expected time between heartbeats to the consumer "
|
||||
+ "coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the "
|
||||
+ "consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. "
|
||||
+ "The value must be set lower than <code>session.timeout.ms</code>, but typically should be set no higher "
|
||||
+ "than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.";
|
||||
|
||||
public static final String DEFAULT_API_TIMEOUT_MS_CONFIG = "default.api.timeout.ms";
|
||||
public static final String DEFAULT_API_TIMEOUT_MS_DOC = "Specifies the timeout (in milliseconds) for client APIs. " +
|
||||
"This configuration is used as the default timeout for all client operations that do not specify a <code>timeout</code> parameter.";
|
||||
|
||||
/**
|
||||
* Postprocess the configuration so that exponential backoff is disabled when reconnect backoff
|
||||
* is explicitly configured but the maximum reconnect backoff is not explicitly configured.
|
||||
*
|
||||
* @param config The config object.
|
||||
* @param parsedValues The parsedValues as provided to postProcessParsedConfig.
|
||||
*
|
||||
* @return The new values which have been set as described in postProcessParsedConfig.
|
||||
*/
|
||||
public static Map<String, Object> postProcessReconnectBackoffConfigs(AbstractConfig config,
|
||||
Map<String, Object> parsedValues) {
|
||||
HashMap<String, Object> rval = new HashMap<>();
|
||||
if ((!config.originals().containsKey(RECONNECT_BACKOFF_MAX_MS_CONFIG)) &&
|
||||
config.originals().containsKey(RECONNECT_BACKOFF_MS_CONFIG)) {
|
||||
log.debug("Disabling exponential reconnect backoff because {} is set, but {} is not.",
|
||||
RECONNECT_BACKOFF_MS_CONFIG, RECONNECT_BACKOFF_MAX_MS_CONFIG);
|
||||
rval.put(RECONNECT_BACKOFF_MAX_MS_CONFIG, parsedValues.get(RECONNECT_BACKOFF_MS_CONFIG));
|
||||
}
|
||||
return rval;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
/**
|
||||
* The states of a node connection
|
||||
*
|
||||
* DISCONNECTED: connection has not been successfully established yet
|
||||
* CONNECTING: connection is under progress
|
||||
* CHECKING_API_VERSIONS: connection has been established and api versions check is in progress. Failure of this check will cause connection to close
|
||||
* READY: connection is ready to send requests
|
||||
* AUTHENTICATION_FAILED: connection failed due to an authentication error
|
||||
*/
|
||||
public enum ConnectionState {
|
||||
DISCONNECTED, CONNECTING, CHECKING_API_VERSIONS, READY, AUTHENTICATION_FAILED;
|
||||
|
||||
public boolean isDisconnected() {
|
||||
return this == AUTHENTICATION_FAILED || this == DISCONNECTED;
|
||||
}
|
||||
|
||||
public boolean isConnected() {
|
||||
return this == CHECKING_API_VERSIONS || this == READY;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,484 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.protocol.Errors;
|
||||
import org.apache.kafka.common.requests.FetchMetadata;
|
||||
import org.apache.kafka.common.requests.FetchRequest.PartitionData;
|
||||
import org.apache.kafka.common.requests.FetchResponse;
|
||||
import org.apache.kafka.common.utils.LogContext;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID;
|
||||
|
||||
/**
|
||||
* FetchSessionHandler maintains the fetch session state for connecting to a broker.
|
||||
*
|
||||
* Using the protocol outlined by KIP-227, clients can create incremental fetch sessions.
|
||||
* These sessions allow the client to fetch information about a set of partition over
|
||||
* and over, without explicitly enumerating all the partitions in the request and the
|
||||
* response.
|
||||
*
|
||||
* FetchSessionHandler tracks the partitions which are in the session. It also
|
||||
* determines which partitions need to be included in each fetch request, and what
|
||||
* the attached fetch session metadata should be for each request. The corresponding
|
||||
* class on the receiving broker side is FetchManager.
|
||||
*/
|
||||
public class FetchSessionHandler {
|
||||
private final Logger log;
|
||||
|
||||
private final int node;
|
||||
|
||||
/**
|
||||
* The metadata for the next fetch request.
|
||||
*/
|
||||
private FetchMetadata nextMetadata = FetchMetadata.INITIAL;
|
||||
|
||||
public FetchSessionHandler(LogContext logContext, int node) {
|
||||
this.log = logContext.logger(FetchSessionHandler.class);
|
||||
this.node = node;
|
||||
}
|
||||
|
||||
/**
|
||||
* All of the partitions which exist in the fetch request session.
|
||||
*/
|
||||
private LinkedHashMap<TopicPartition, PartitionData> sessionPartitions =
|
||||
new LinkedHashMap<>(0);
|
||||
|
||||
public static class FetchRequestData {
|
||||
/**
|
||||
* The partitions to send in the fetch request.
|
||||
*/
|
||||
private final Map<TopicPartition, PartitionData> toSend;
|
||||
|
||||
/**
|
||||
* The partitions to send in the request's "forget" list.
|
||||
*/
|
||||
private final List<TopicPartition> toForget;
|
||||
|
||||
/**
|
||||
* All of the partitions which exist in the fetch request session.
|
||||
*/
|
||||
private final Map<TopicPartition, PartitionData> sessionPartitions;
|
||||
|
||||
/**
|
||||
* The metadata to use in this fetch request.
|
||||
*/
|
||||
private final FetchMetadata metadata;
|
||||
|
||||
FetchRequestData(Map<TopicPartition, PartitionData> toSend,
|
||||
List<TopicPartition> toForget,
|
||||
Map<TopicPartition, PartitionData> sessionPartitions,
|
||||
FetchMetadata metadata) {
|
||||
this.toSend = toSend;
|
||||
this.toForget = toForget;
|
||||
this.sessionPartitions = sessionPartitions;
|
||||
this.metadata = metadata;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the set of partitions to send in this fetch request.
|
||||
*/
|
||||
public Map<TopicPartition, PartitionData> toSend() {
|
||||
return toSend;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a list of partitions to forget in this fetch request.
|
||||
*/
|
||||
public List<TopicPartition> toForget() {
|
||||
return toForget;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the full set of partitions involved in this fetch request.
|
||||
*/
|
||||
public Map<TopicPartition, PartitionData> sessionPartitions() {
|
||||
return sessionPartitions;
|
||||
}
|
||||
|
||||
public FetchMetadata metadata() {
|
||||
return metadata;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
if (metadata.isFull()) {
|
||||
StringBuilder bld = new StringBuilder("FullFetchRequest(");
|
||||
String prefix = "";
|
||||
for (TopicPartition partition : toSend.keySet()) {
|
||||
bld.append(prefix);
|
||||
bld.append(partition);
|
||||
prefix = ", ";
|
||||
}
|
||||
bld.append(")");
|
||||
return bld.toString();
|
||||
} else {
|
||||
StringBuilder bld = new StringBuilder("IncrementalFetchRequest(toSend=(");
|
||||
String prefix = "";
|
||||
for (TopicPartition partition : toSend.keySet()) {
|
||||
bld.append(prefix);
|
||||
bld.append(partition);
|
||||
prefix = ", ";
|
||||
}
|
||||
bld.append("), toForget=(");
|
||||
prefix = "";
|
||||
for (TopicPartition partition : toForget) {
|
||||
bld.append(prefix);
|
||||
bld.append(partition);
|
||||
prefix = ", ";
|
||||
}
|
||||
bld.append("), implied=(");
|
||||
prefix = "";
|
||||
for (TopicPartition partition : sessionPartitions.keySet()) {
|
||||
if (!toSend.containsKey(partition)) {
|
||||
bld.append(prefix);
|
||||
bld.append(partition);
|
||||
prefix = ", ";
|
||||
}
|
||||
}
|
||||
bld.append("))");
|
||||
return bld.toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public class Builder {
|
||||
/**
|
||||
* The next partitions which we want to fetch.
|
||||
*
|
||||
* It is important to maintain the insertion order of this list by using a LinkedHashMap rather
|
||||
* than a regular Map.
|
||||
*
|
||||
* One reason is that when dealing with FULL fetch requests, if there is not enough response
|
||||
* space to return data from all partitions, the server will only return data from partitions
|
||||
* early in this list.
|
||||
*
|
||||
* Another reason is because we make use of the list ordering to optimize the preparation of
|
||||
* incremental fetch requests (see below).
|
||||
*/
|
||||
private LinkedHashMap<TopicPartition, PartitionData> next;
|
||||
private final boolean copySessionPartitions;
|
||||
|
||||
Builder() {
|
||||
this.next = new LinkedHashMap<>();
|
||||
this.copySessionPartitions = true;
|
||||
}
|
||||
|
||||
Builder(int initialSize, boolean copySessionPartitions) {
|
||||
this.next = new LinkedHashMap<>(initialSize);
|
||||
this.copySessionPartitions = copySessionPartitions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark that we want data from this partition in the upcoming fetch.
|
||||
*/
|
||||
public void add(TopicPartition topicPartition, PartitionData data) {
|
||||
next.put(topicPartition, data);
|
||||
}
|
||||
|
||||
public FetchRequestData build() {
|
||||
if (nextMetadata.isFull()) {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Built full fetch {} for node {} with {}.",
|
||||
nextMetadata, node, partitionsToLogString(next.keySet()));
|
||||
}
|
||||
sessionPartitions = next;
|
||||
next = null;
|
||||
Map<TopicPartition, PartitionData> toSend =
|
||||
Collections.unmodifiableMap(new LinkedHashMap<>(sessionPartitions));
|
||||
return new FetchRequestData(toSend, Collections.emptyList(), toSend, nextMetadata);
|
||||
}
|
||||
|
||||
List<TopicPartition> added = new ArrayList<>();
|
||||
List<TopicPartition> removed = new ArrayList<>();
|
||||
List<TopicPartition> altered = new ArrayList<>();
|
||||
for (Iterator<Entry<TopicPartition, PartitionData>> iter =
|
||||
sessionPartitions.entrySet().iterator(); iter.hasNext(); ) {
|
||||
Entry<TopicPartition, PartitionData> entry = iter.next();
|
||||
TopicPartition topicPartition = entry.getKey();
|
||||
PartitionData prevData = entry.getValue();
|
||||
PartitionData nextData = next.remove(topicPartition);
|
||||
if (nextData != null) {
|
||||
if (!prevData.equals(nextData)) {
|
||||
// Re-add the altered partition to the end of 'next'
|
||||
next.put(topicPartition, nextData);
|
||||
entry.setValue(nextData);
|
||||
altered.add(topicPartition);
|
||||
}
|
||||
} else {
|
||||
// Remove this partition from the session.
|
||||
iter.remove();
|
||||
// Indicate that we no longer want to listen to this partition.
|
||||
removed.add(topicPartition);
|
||||
}
|
||||
}
|
||||
// Add any new partitions to the session.
|
||||
for (Entry<TopicPartition, PartitionData> entry : next.entrySet()) {
|
||||
TopicPartition topicPartition = entry.getKey();
|
||||
PartitionData nextData = entry.getValue();
|
||||
if (sessionPartitions.containsKey(topicPartition)) {
|
||||
// In the previous loop, all the partitions which existed in both sessionPartitions
|
||||
// and next were moved to the end of next, or removed from next. Therefore,
|
||||
// once we hit one of them, we know there are no more unseen entries to look
|
||||
// at in next.
|
||||
break;
|
||||
}
|
||||
sessionPartitions.put(topicPartition, nextData);
|
||||
added.add(topicPartition);
|
||||
}
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Built incremental fetch {} for node {}. Added {}, altered {}, removed {} " +
|
||||
"out of {}", nextMetadata, node, partitionsToLogString(added),
|
||||
partitionsToLogString(altered), partitionsToLogString(removed),
|
||||
partitionsToLogString(sessionPartitions.keySet()));
|
||||
}
|
||||
Map<TopicPartition, PartitionData> toSend = Collections.unmodifiableMap(next);
|
||||
Map<TopicPartition, PartitionData> curSessionPartitions = copySessionPartitions
|
||||
? Collections.unmodifiableMap(new LinkedHashMap<>(sessionPartitions))
|
||||
: Collections.unmodifiableMap(sessionPartitions);
|
||||
next = null;
|
||||
return new FetchRequestData(toSend, Collections.unmodifiableList(removed),
|
||||
curSessionPartitions, nextMetadata);
|
||||
}
|
||||
}
|
||||
|
||||
public Builder newBuilder() {
|
||||
return new Builder();
|
||||
}
|
||||
|
||||
|
||||
/** A builder that allows for presizing the PartitionData hashmap, and avoiding making a
|
||||
* secondary copy of the sessionPartitions, in cases where this is not necessarily.
|
||||
* This builder is primarily for use by the Replica Fetcher
|
||||
* @param size the initial size of the PartitionData hashmap
|
||||
* @param copySessionPartitions boolean denoting whether the builder should make a deep copy of
|
||||
* session partitions
|
||||
*/
|
||||
public Builder newBuilder(int size, boolean copySessionPartitions) {
|
||||
return new Builder(size, copySessionPartitions);
|
||||
}
|
||||
|
||||
private String partitionsToLogString(Collection<TopicPartition> partitions) {
|
||||
if (!log.isTraceEnabled()) {
|
||||
return String.format("%d partition(s)", partitions.size());
|
||||
}
|
||||
return "(" + Utils.join(partitions, ", ") + ")";
|
||||
}
|
||||
|
||||
/**
|
||||
* Return some partitions which are expected to be in a particular set, but which are not.
|
||||
*
|
||||
* @param toFind The partitions to look for.
|
||||
* @param toSearch The set of partitions to search.
|
||||
* @return null if all partitions were found; some of the missing ones
|
||||
* in string form, if not.
|
||||
*/
|
||||
static Set<TopicPartition> findMissing(Set<TopicPartition> toFind, Set<TopicPartition> toSearch) {
|
||||
Set<TopicPartition> ret = new LinkedHashSet<>();
|
||||
for (TopicPartition partition : toFind) {
|
||||
if (!toSearch.contains(partition)) {
|
||||
ret.add(partition);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify that a full fetch response contains all the partitions in the fetch session.
|
||||
*
|
||||
* @param response The response.
|
||||
* @return True if the full fetch response partitions are valid.
|
||||
*/
|
||||
String verifyFullFetchResponsePartitions(FetchResponse<?> response) {
|
||||
StringBuilder bld = new StringBuilder();
|
||||
Set<TopicPartition> extra =
|
||||
findMissing(response.responseData().keySet(), sessionPartitions.keySet());
|
||||
Set<TopicPartition> omitted =
|
||||
findMissing(sessionPartitions.keySet(), response.responseData().keySet());
|
||||
if (!omitted.isEmpty()) {
|
||||
bld.append("omitted=(").append(Utils.join(omitted, ", ")).append(", ");
|
||||
}
|
||||
if (!extra.isEmpty()) {
|
||||
bld.append("extra=(").append(Utils.join(extra, ", ")).append(", ");
|
||||
}
|
||||
if ((!omitted.isEmpty()) || (!extra.isEmpty())) {
|
||||
bld.append("response=(").append(Utils.join(response.responseData().keySet(), ", ")).append(")");
|
||||
return bld.toString();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify that the partitions in an incremental fetch response are contained in the session.
|
||||
*
|
||||
* @param response The response.
|
||||
* @return True if the incremental fetch response partitions are valid.
|
||||
*/
|
||||
String verifyIncrementalFetchResponsePartitions(FetchResponse<?> response) {
|
||||
Set<TopicPartition> extra =
|
||||
findMissing(response.responseData().keySet(), sessionPartitions.keySet());
|
||||
if (!extra.isEmpty()) {
|
||||
StringBuilder bld = new StringBuilder();
|
||||
bld.append("extra=(").append(Utils.join(extra, ", ")).append("), ");
|
||||
bld.append("response=(").append(
|
||||
Utils.join(response.responseData().keySet(), ", ")).append("), ");
|
||||
return bld.toString();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a string describing the partitions in a FetchResponse.
|
||||
*
|
||||
* @param response The FetchResponse.
|
||||
* @return The string to log.
|
||||
*/
|
||||
private String responseDataToLogString(FetchResponse<?> response) {
|
||||
if (!log.isTraceEnabled()) {
|
||||
int implied = sessionPartitions.size() - response.responseData().size();
|
||||
if (implied > 0) {
|
||||
return String.format(" with %d response partition(s), %d implied partition(s)",
|
||||
response.responseData().size(), implied);
|
||||
} else {
|
||||
return String.format(" with %d response partition(s)",
|
||||
response.responseData().size());
|
||||
}
|
||||
}
|
||||
StringBuilder bld = new StringBuilder();
|
||||
bld.append(" with response=(").
|
||||
append(Utils.join(response.responseData().keySet(), ", ")).
|
||||
append(")");
|
||||
String prefix = ", implied=(";
|
||||
String suffix = "";
|
||||
for (TopicPartition partition : sessionPartitions.keySet()) {
|
||||
if (!response.responseData().containsKey(partition)) {
|
||||
bld.append(prefix);
|
||||
bld.append(partition);
|
||||
prefix = ", ";
|
||||
suffix = ")";
|
||||
}
|
||||
}
|
||||
bld.append(suffix);
|
||||
return bld.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the fetch response.
|
||||
*
|
||||
* @param response The response.
|
||||
* @return True if the response is well-formed; false if it can't be processed
|
||||
* because of missing or unexpected partitions.
|
||||
*/
|
||||
public boolean handleResponse(FetchResponse<?> response) {
|
||||
if (response.error() != Errors.NONE) {
|
||||
log.info("Node {} was unable to process the fetch request with {}: {}.",
|
||||
node, nextMetadata, response.error());
|
||||
if (response.error() == Errors.FETCH_SESSION_ID_NOT_FOUND) {
|
||||
nextMetadata = FetchMetadata.INITIAL;
|
||||
} else {
|
||||
nextMetadata = nextMetadata.nextCloseExisting();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
if (nextMetadata.isFull()) {
|
||||
if (response.responseData().isEmpty() && response.throttleTimeMs() > 0) {
|
||||
// Normally, an empty full fetch response would be invalid. However, KIP-219
|
||||
// specifies that if the broker wants to throttle the client, it will respond
|
||||
// to a full fetch request with an empty response and a throttleTimeMs
|
||||
// value set. We don't want to log this with a warning, since it's not an error.
|
||||
// However, the empty full fetch response can't be processed, so it's still appropriate
|
||||
// to return false here.
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Node {} sent a empty full fetch response to indicate that this " +
|
||||
"client should be throttled for {} ms.", node, response.throttleTimeMs());
|
||||
}
|
||||
nextMetadata = FetchMetadata.INITIAL;
|
||||
return false;
|
||||
}
|
||||
String problem = verifyFullFetchResponsePartitions(response);
|
||||
if (problem != null) {
|
||||
log.info("Node {} sent an invalid full fetch response with {}", node, problem);
|
||||
nextMetadata = FetchMetadata.INITIAL;
|
||||
return false;
|
||||
} else if (response.sessionId() == INVALID_SESSION_ID) {
|
||||
if (log.isDebugEnabled())
|
||||
log.debug("Node {} sent a full fetch response{}", node, responseDataToLogString(response));
|
||||
nextMetadata = FetchMetadata.INITIAL;
|
||||
return true;
|
||||
} else {
|
||||
// The server created a new incremental fetch session.
|
||||
if (log.isDebugEnabled())
|
||||
log.debug("Node {} sent a full fetch response that created a new incremental " +
|
||||
"fetch session {}{}", node, response.sessionId(), responseDataToLogString(response));
|
||||
nextMetadata = FetchMetadata.newIncremental(response.sessionId());
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
String problem = verifyIncrementalFetchResponsePartitions(response);
|
||||
if (problem != null) {
|
||||
log.info("Node {} sent an invalid incremental fetch response with {}", node, problem);
|
||||
nextMetadata = nextMetadata.nextCloseExisting();
|
||||
return false;
|
||||
} else if (response.sessionId() == INVALID_SESSION_ID) {
|
||||
// The incremental fetch session was closed by the server.
|
||||
if (log.isDebugEnabled())
|
||||
log.debug("Node {} sent an incremental fetch response closing session {}{}",
|
||||
node, nextMetadata.sessionId(), responseDataToLogString(response));
|
||||
nextMetadata = FetchMetadata.INITIAL;
|
||||
return true;
|
||||
} else {
|
||||
// The incremental fetch session was continued by the server.
|
||||
// We don't have to do anything special here to support KIP-219, since an empty incremental
|
||||
// fetch request is perfectly valid.
|
||||
if (log.isDebugEnabled())
|
||||
log.debug("Node {} sent an incremental fetch response with throttleTimeMs = {} " +
|
||||
"for session {}{}", response.throttleTimeMs(), node, response.sessionId(),
|
||||
responseDataToLogString(response));
|
||||
nextMetadata = nextMetadata.nextIncremental();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle an error sending the prepared request.
|
||||
*
|
||||
* When a network error occurs, we close any existing fetch session on our next request,
|
||||
* and try to create a new session.
|
||||
*
|
||||
* @param t The exception.
|
||||
*/
|
||||
public void handleError(Throwable t) {
|
||||
log.info("Error sending fetch request {} to node {}: {}.", nextMetadata, node, t);
|
||||
nextMetadata = nextMetadata.nextCloseExisting();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,100 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import org.apache.kafka.common.config.AbstractConfig;
|
||||
import org.apache.kafka.common.requests.JoinGroupRequest;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* Class to extract group rebalance related configs.
|
||||
*/
|
||||
public class GroupRebalanceConfig {
|
||||
|
||||
public enum ProtocolType {
|
||||
CONSUMER,
|
||||
CONNECT;
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return super.toString().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
}
|
||||
|
||||
public final int sessionTimeoutMs;
|
||||
public final int rebalanceTimeoutMs;
|
||||
public final int heartbeatIntervalMs;
|
||||
public final String groupId;
|
||||
public final Optional<String> groupInstanceId;
|
||||
public final long retryBackoffMs;
|
||||
public final boolean leaveGroupOnClose;
|
||||
|
||||
public GroupRebalanceConfig(AbstractConfig config, ProtocolType protocolType) {
|
||||
this.sessionTimeoutMs = config.getInt(CommonClientConfigs.SESSION_TIMEOUT_MS_CONFIG);
|
||||
|
||||
// Consumer and Connect use different config names for defining rebalance timeout
|
||||
if (protocolType == ProtocolType.CONSUMER) {
|
||||
this.rebalanceTimeoutMs = config.getInt(CommonClientConfigs.MAX_POLL_INTERVAL_MS_CONFIG);
|
||||
} else {
|
||||
this.rebalanceTimeoutMs = config.getInt(CommonClientConfigs.REBALANCE_TIMEOUT_MS_CONFIG);
|
||||
}
|
||||
|
||||
this.heartbeatIntervalMs = config.getInt(CommonClientConfigs.HEARTBEAT_INTERVAL_MS_CONFIG);
|
||||
this.groupId = config.getString(CommonClientConfigs.GROUP_ID_CONFIG);
|
||||
|
||||
// Static membership is only introduced in consumer API.
|
||||
if (protocolType == ProtocolType.CONSUMER) {
|
||||
String groupInstanceId = config.getString(CommonClientConfigs.GROUP_INSTANCE_ID_CONFIG);
|
||||
if (groupInstanceId != null) {
|
||||
JoinGroupRequest.validateGroupInstanceId(groupInstanceId);
|
||||
this.groupInstanceId = Optional.of(groupInstanceId);
|
||||
} else {
|
||||
this.groupInstanceId = Optional.empty();
|
||||
}
|
||||
} else {
|
||||
this.groupInstanceId = Optional.empty();
|
||||
}
|
||||
|
||||
this.retryBackoffMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG);
|
||||
|
||||
// Internal leave group config is only defined in Consumer.
|
||||
if (protocolType == ProtocolType.CONSUMER) {
|
||||
this.leaveGroupOnClose = config.getBoolean("internal.leave.group.on.close");
|
||||
} else {
|
||||
this.leaveGroupOnClose = true;
|
||||
}
|
||||
}
|
||||
|
||||
// For testing purpose.
|
||||
public GroupRebalanceConfig(final int sessionTimeoutMs,
|
||||
final int rebalanceTimeoutMs,
|
||||
final int heartbeatIntervalMs,
|
||||
String groupId,
|
||||
Optional<String> groupInstanceId,
|
||||
long retryBackoffMs,
|
||||
boolean leaveGroupOnClose) {
|
||||
this.sessionTimeoutMs = sessionTimeoutMs;
|
||||
this.rebalanceTimeoutMs = rebalanceTimeoutMs;
|
||||
this.heartbeatIntervalMs = heartbeatIntervalMs;
|
||||
this.groupId = groupId;
|
||||
this.groupInstanceId = groupInstanceId;
|
||||
this.retryBackoffMs = retryBackoffMs;
|
||||
this.leaveGroupOnClose = leaveGroupOnClose;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,185 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Deque;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
* The set of requests which have been sent or are being sent but haven't yet received a response
|
||||
*/
|
||||
final class InFlightRequests {
|
||||
|
||||
private final int maxInFlightRequestsPerConnection;
|
||||
private final Map<String, Deque<NetworkClient.InFlightRequest>> requests = new HashMap<>();
|
||||
/** Thread safe total number of in flight requests. */
|
||||
private final AtomicInteger inFlightRequestCount = new AtomicInteger(0);
|
||||
|
||||
public InFlightRequests(int maxInFlightRequestsPerConnection) {
|
||||
this.maxInFlightRequestsPerConnection = maxInFlightRequestsPerConnection;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the given request to the queue for the connection it was directed to
|
||||
*/
|
||||
public void add(NetworkClient.InFlightRequest request) {
|
||||
String destination = request.destination;
|
||||
Deque<NetworkClient.InFlightRequest> reqs = this.requests.get(destination);
|
||||
if (reqs == null) {
|
||||
reqs = new ArrayDeque<>();
|
||||
this.requests.put(destination, reqs);
|
||||
}
|
||||
reqs.addFirst(request);
|
||||
inFlightRequestCount.incrementAndGet();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the request queue for the given node
|
||||
*/
|
||||
private Deque<NetworkClient.InFlightRequest> requestQueue(String node) {
|
||||
Deque<NetworkClient.InFlightRequest> reqs = requests.get(node);
|
||||
if (reqs == null || reqs.isEmpty())
|
||||
throw new IllegalStateException("There are no in-flight requests for node " + node);
|
||||
return reqs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the oldest request (the one that will be completed next) for the given node
|
||||
*/
|
||||
public NetworkClient.InFlightRequest completeNext(String node) {
|
||||
NetworkClient.InFlightRequest inFlightRequest = requestQueue(node).pollLast();
|
||||
inFlightRequestCount.decrementAndGet();
|
||||
return inFlightRequest;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the last request we sent to the given node (but don't remove it from the queue)
|
||||
* @param node The node id
|
||||
*/
|
||||
public NetworkClient.InFlightRequest lastSent(String node) {
|
||||
return requestQueue(node).peekFirst();
|
||||
}
|
||||
|
||||
/**
|
||||
* Complete the last request that was sent to a particular node.
|
||||
* @param node The node the request was sent to
|
||||
* @return The request
|
||||
*/
|
||||
public NetworkClient.InFlightRequest completeLastSent(String node) {
|
||||
NetworkClient.InFlightRequest inFlightRequest = requestQueue(node).pollFirst();
|
||||
inFlightRequestCount.decrementAndGet();
|
||||
return inFlightRequest;
|
||||
}
|
||||
|
||||
/**
|
||||
* Can we send more requests to this node?
|
||||
*
|
||||
* @param node Node in question
|
||||
* @return true iff we have no requests still being sent to the given node
|
||||
*/
|
||||
public boolean canSendMore(String node) {
|
||||
Deque<NetworkClient.InFlightRequest> queue = requests.get(node);
|
||||
return queue == null || queue.isEmpty() ||
|
||||
(queue.peekFirst().send.completed() && queue.size() < this.maxInFlightRequestsPerConnection);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the number of in-flight requests directed at the given node
|
||||
* @param node The node
|
||||
* @return The request count.
|
||||
*/
|
||||
public int count(String node) {
|
||||
Deque<NetworkClient.InFlightRequest> queue = requests.get(node);
|
||||
return queue == null ? 0 : queue.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if there is no in-flight request directed at the given node and false otherwise
|
||||
*/
|
||||
public boolean isEmpty(String node) {
|
||||
Deque<NetworkClient.InFlightRequest> queue = requests.get(node);
|
||||
return queue == null || queue.isEmpty();
|
||||
}
|
||||
|
||||
/**
|
||||
* Count all in-flight requests for all nodes. This method is thread safe, but may lag the actual count.
|
||||
*/
|
||||
public int count() {
|
||||
return inFlightRequestCount.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if there is no in-flight request and false otherwise
|
||||
*/
|
||||
public boolean isEmpty() {
|
||||
for (Deque<NetworkClient.InFlightRequest> deque : this.requests.values()) {
|
||||
if (!deque.isEmpty())
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear out all the in-flight requests for the given node and return them
|
||||
*
|
||||
* @param node The node
|
||||
* @return All the in-flight requests for that node that have been removed
|
||||
*/
|
||||
public Iterable<NetworkClient.InFlightRequest> clearAll(String node) {
|
||||
Deque<NetworkClient.InFlightRequest> reqs = requests.get(node);
|
||||
if (reqs == null) {
|
||||
return Collections.emptyList();
|
||||
} else {
|
||||
final Deque<NetworkClient.InFlightRequest> clearedRequests = requests.remove(node);
|
||||
inFlightRequestCount.getAndAdd(-clearedRequests.size());
|
||||
return () -> clearedRequests.descendingIterator();
|
||||
}
|
||||
}
|
||||
|
||||
private Boolean hasExpiredRequest(long now, Deque<NetworkClient.InFlightRequest> deque) {
|
||||
for (NetworkClient.InFlightRequest request : deque) {
|
||||
long timeSinceSend = Math.max(0, now - request.sendTimeMs);
|
||||
if (timeSinceSend > request.requestTimeoutMs)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a list of nodes with pending in-flight request, that need to be timed out
|
||||
*
|
||||
* @param now current time in milliseconds
|
||||
* @return list of nodes
|
||||
*/
|
||||
public List<String> nodesWithTimedOutRequests(long now) {
|
||||
List<String> nodeIds = new ArrayList<>();
|
||||
for (Map.Entry<String, Deque<NetworkClient.InFlightRequest>> requestEntry : requests.entrySet()) {
|
||||
String nodeId = requestEntry.getKey();
|
||||
Deque<NetworkClient.InFlightRequest> deque = requestEntry.getValue();
|
||||
if (hasExpiredRequest(now, deque))
|
||||
nodeIds.add(nodeId);
|
||||
}
|
||||
return nodeIds;
|
||||
}
|
||||
|
||||
}
|
||||
216
clients/src/main/java/org/apache/kafka/clients/KafkaClient.java
Normal file
216
clients/src/main/java/org/apache/kafka/clients/KafkaClient.java
Normal file
@@ -0,0 +1,216 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.errors.AuthenticationException;
|
||||
import org.apache.kafka.common.requests.AbstractRequest;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* The interface for {@link NetworkClient}
|
||||
*/
|
||||
public interface KafkaClient extends Closeable {
|
||||
|
||||
/**
|
||||
* Check if we are currently ready to send another request to the given node but don't attempt to connect if we
|
||||
* aren't.
|
||||
*
|
||||
* @param node The node to check
|
||||
* @param now The current timestamp
|
||||
*/
|
||||
boolean isReady(Node node, long now);
|
||||
|
||||
/**
|
||||
* Initiate a connection to the given node (if necessary), and return true if already connected. The readiness of a
|
||||
* node will change only when poll is invoked.
|
||||
*
|
||||
* @param node The node to connect to.
|
||||
* @param now The current time
|
||||
* @return true iff we are ready to immediately initiate the sending of another request to the given node.
|
||||
*/
|
||||
boolean ready(Node node, long now);
|
||||
|
||||
/**
|
||||
* Return the number of milliseconds to wait, based on the connection state, before attempting to send data. When
|
||||
* disconnected, this respects the reconnect backoff time. When connecting or connected, this handles slow/stalled
|
||||
* connections.
|
||||
*
|
||||
* @param node The node to check
|
||||
* @param now The current timestamp
|
||||
* @return The number of milliseconds to wait.
|
||||
*/
|
||||
long connectionDelay(Node node, long now);
|
||||
|
||||
/**
|
||||
* Return the number of milliseconds to wait, based on the connection state and the throttle time, before
|
||||
* attempting to send data. If the connection has been established but being throttled, return throttle delay.
|
||||
* Otherwise, return connection delay.
|
||||
*
|
||||
* @param node the connection to check
|
||||
* @param now the current time in ms
|
||||
*/
|
||||
long pollDelayMs(Node node, long now);
|
||||
|
||||
/**
|
||||
* Check if the connection of the node has failed, based on the connection state. Such connection failure are
|
||||
* usually transient and can be resumed in the next {@link #ready(org.apache.kafka.common.Node, long)} }
|
||||
* call, but there are cases where transient failures needs to be caught and re-acted upon.
|
||||
*
|
||||
* @param node the node to check
|
||||
* @return true iff the connection has failed and the node is disconnected
|
||||
*/
|
||||
boolean connectionFailed(Node node);
|
||||
|
||||
/**
|
||||
* Check if authentication to this node has failed, based on the connection state. Authentication failures are
|
||||
* propagated without any retries.
|
||||
*
|
||||
* @param node the node to check
|
||||
* @return an AuthenticationException iff authentication has failed, null otherwise
|
||||
*/
|
||||
AuthenticationException authenticationException(Node node);
|
||||
|
||||
/**
|
||||
* Queue up the given request for sending. Requests can only be sent on ready connections.
|
||||
* @param request The request
|
||||
* @param now The current timestamp
|
||||
*/
|
||||
void send(ClientRequest request, long now);
|
||||
|
||||
/**
|
||||
* Do actual reads and writes from sockets.
|
||||
*
|
||||
* @param timeout The maximum amount of time to wait for responses in ms, must be non-negative. The implementation
|
||||
* is free to use a lower value if appropriate (common reasons for this are a lower request or
|
||||
* metadata update timeout)
|
||||
* @param now The current time in ms
|
||||
* @throws IllegalStateException If a request is sent to an unready node
|
||||
*/
|
||||
List<ClientResponse> poll(long timeout, long now);
|
||||
|
||||
/**
|
||||
* Disconnects the connection to a particular node, if there is one.
|
||||
* Any pending ClientRequests for this connection will receive disconnections.
|
||||
*
|
||||
* @param nodeId The id of the node
|
||||
*/
|
||||
void disconnect(String nodeId);
|
||||
|
||||
/**
|
||||
* Closes the connection to a particular node (if there is one).
|
||||
* All requests on the connection will be cleared. ClientRequest callbacks will not be invoked
|
||||
* for the cleared requests, nor will they be returned from poll().
|
||||
*
|
||||
* @param nodeId The id of the node
|
||||
*/
|
||||
void close(String nodeId);
|
||||
|
||||
/**
|
||||
* Choose the node with the fewest outstanding requests. This method will prefer a node with an existing connection,
|
||||
* but will potentially choose a node for which we don't yet have a connection if all existing connections are in
|
||||
* use.
|
||||
*
|
||||
* @param now The current time in ms
|
||||
* @return The node with the fewest in-flight requests.
|
||||
*/
|
||||
Node leastLoadedNode(long now);
|
||||
|
||||
/**
|
||||
* The number of currently in-flight requests for which we have not yet returned a response
|
||||
*/
|
||||
int inFlightRequestCount();
|
||||
|
||||
/**
|
||||
* Return true if there is at least one in-flight request and false otherwise.
|
||||
*/
|
||||
boolean hasInFlightRequests();
|
||||
|
||||
/**
|
||||
* Get the total in-flight requests for a particular node
|
||||
*
|
||||
* @param nodeId The id of the node
|
||||
*/
|
||||
int inFlightRequestCount(String nodeId);
|
||||
|
||||
/**
|
||||
* Return true if there is at least one in-flight request for a particular node and false otherwise.
|
||||
*/
|
||||
boolean hasInFlightRequests(String nodeId);
|
||||
|
||||
/**
|
||||
* Return true if there is at least one node with connection in the READY state and not throttled. Returns false
|
||||
* otherwise.
|
||||
*
|
||||
* @param now the current time
|
||||
*/
|
||||
boolean hasReadyNodes(long now);
|
||||
|
||||
/**
|
||||
* Wake up the client if it is currently blocked waiting for I/O
|
||||
*/
|
||||
void wakeup();
|
||||
|
||||
/**
|
||||
* Create a new ClientRequest.
|
||||
*
|
||||
* @param nodeId the node to send to
|
||||
* @param requestBuilder the request builder to use
|
||||
* @param createdTimeMs the time in milliseconds to use as the creation time of the request
|
||||
* @param expectResponse true iff we expect a response
|
||||
*/
|
||||
ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder,
|
||||
long createdTimeMs, boolean expectResponse);
|
||||
|
||||
/**
|
||||
* Create a new ClientRequest.
|
||||
*
|
||||
* @param nodeId the node to send to
|
||||
* @param requestBuilder the request builder to use
|
||||
* @param createdTimeMs the time in milliseconds to use as the creation time of the request
|
||||
* @param expectResponse true iff we expect a response
|
||||
* @param requestTimeoutMs Upper bound time in milliseconds to await a response before disconnecting the socket and
|
||||
* cancelling the request. The request may get cancelled sooner if the socket disconnects
|
||||
* for any reason including if another pending request to the same node timed out first.
|
||||
* @param callback the callback to invoke when we get a response
|
||||
*/
|
||||
ClientRequest newClientRequest(String nodeId,
|
||||
AbstractRequest.Builder<?> requestBuilder,
|
||||
long createdTimeMs,
|
||||
boolean expectResponse,
|
||||
int requestTimeoutMs,
|
||||
RequestCompletionHandler callback);
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Initiates shutdown of this client. This method may be invoked from another thread while this
|
||||
* client is being polled. No further requests may be sent using the client. The current poll()
|
||||
* will be terminated using wakeup(). The client should be explicitly shutdown using {@link #close()}
|
||||
* after poll returns. Note that {@link #close()} should not be invoked concurrently while polling.
|
||||
*/
|
||||
void initiateClose();
|
||||
|
||||
/**
|
||||
* Returns true if the client is still active. Returns false if {@link #initiateClose()} or {@link #close()}
|
||||
* was invoked for this client.
|
||||
*/
|
||||
boolean active();
|
||||
|
||||
}
|
||||
@@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import org.apache.kafka.common.KafkaException;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.errors.AuthenticationException;
|
||||
import org.apache.kafka.common.requests.MetadataResponse;
|
||||
import org.apache.kafka.common.requests.RequestHeader;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* A simple implementation of `MetadataUpdater` that returns the cluster nodes set via the constructor or via
|
||||
* `setNodes`.
|
||||
*
|
||||
* This is useful in cases where automatic metadata updates are not required. An example is controller/broker
|
||||
* communication.
|
||||
*
|
||||
* This class is not thread-safe!
|
||||
*/
|
||||
public class ManualMetadataUpdater implements MetadataUpdater {
|
||||
private List<Node> nodes;
|
||||
|
||||
public ManualMetadataUpdater() {
|
||||
this(new ArrayList<Node>(0));
|
||||
}
|
||||
|
||||
public ManualMetadataUpdater(List<Node> nodes) {
|
||||
this.nodes = nodes;
|
||||
}
|
||||
|
||||
public void setNodes(List<Node> nodes) {
|
||||
this.nodes = nodes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Node> fetchNodes() {
|
||||
return new ArrayList<>(nodes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isUpdateDue(long now) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long maybeUpdate(long now) {
|
||||
return Long.MAX_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleServerDisconnect(long now, String nodeId, Optional<AuthenticationException> maybeAuthException) {
|
||||
// We don't fail the broker on failures. There should be sufficient information from
|
||||
// the NetworkClient logs to indicate the reason for the failure.
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleFailedRequest(long now, Optional<KafkaException> maybeFatalException) {
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleSuccessfulResponse(RequestHeader requestHeader, long now, MetadataResponse response) {
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
}
|
||||
}
|
||||
603
clients/src/main/java/org/apache/kafka/clients/Metadata.java
Normal file
603
clients/src/main/java/org/apache/kafka/clients/Metadata.java
Normal file
@@ -0,0 +1,603 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import org.apache.kafka.common.Cluster;
|
||||
import org.apache.kafka.common.KafkaException;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.errors.InvalidMetadataException;
|
||||
import org.apache.kafka.common.errors.InvalidTopicException;
|
||||
import org.apache.kafka.common.errors.TopicAuthorizationException;
|
||||
import org.apache.kafka.common.internals.ClusterResourceListeners;
|
||||
import org.apache.kafka.common.protocol.Errors;
|
||||
import org.apache.kafka.common.requests.MetadataRequest;
|
||||
import org.apache.kafka.common.requests.MetadataResponse;
|
||||
import org.apache.kafka.common.utils.LogContext;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.apache.kafka.common.record.RecordBatch.NO_PARTITION_LEADER_EPOCH;
|
||||
|
||||
/**
|
||||
* A class encapsulating some of the logic around metadata.
|
||||
* <p>
|
||||
* This class is shared by the client thread (for partitioning) and the background sender thread.
|
||||
*
|
||||
* Metadata is maintained for only a subset of topics, which can be added to over time. When we request metadata for a
|
||||
* topic we don't have any metadata for it will trigger a metadata update.
|
||||
* <p>
|
||||
* If topic expiry is enabled for the metadata, any topic that has not been used within the expiry interval
|
||||
* is removed from the metadata refresh set after an update. Consumers disable topic expiry since they explicitly
|
||||
* manage topics while producers rely on topic expiry to limit the refresh set.
|
||||
*/
|
||||
public class Metadata implements Closeable {
|
||||
private final Logger log;
|
||||
private final long refreshBackoffMs;
|
||||
private final long metadataExpireMs;
|
||||
private int updateVersion; // bumped on every metadata response
|
||||
private int requestVersion; // bumped on every new topic addition
|
||||
private long lastRefreshMs;
|
||||
private long lastSuccessfulRefreshMs;
|
||||
private KafkaException fatalException;
|
||||
private Set<String> invalidTopics;
|
||||
private Set<String> unauthorizedTopics;
|
||||
private MetadataCache cache = MetadataCache.empty();
|
||||
private boolean needFullUpdate;
|
||||
private boolean needPartialUpdate;
|
||||
private final ClusterResourceListeners clusterResourceListeners;
|
||||
private boolean isClosed;
|
||||
private final Map<TopicPartition, Integer> lastSeenLeaderEpochs;
|
||||
|
||||
/**
|
||||
* Create a new Metadata instance
|
||||
*
|
||||
* @param refreshBackoffMs The minimum amount of time that must expire between metadata refreshes to avoid busy
|
||||
* polling
|
||||
* @param metadataExpireMs The maximum amount of time that metadata can be retained without refresh
|
||||
* @param logContext Log context corresponding to the containing client
|
||||
* @param clusterResourceListeners List of ClusterResourceListeners which will receive metadata updates.
|
||||
*/
|
||||
public Metadata(long refreshBackoffMs,
|
||||
long metadataExpireMs,
|
||||
LogContext logContext,
|
||||
ClusterResourceListeners clusterResourceListeners) {
|
||||
this.log = logContext.logger(Metadata.class);
|
||||
this.refreshBackoffMs = refreshBackoffMs;
|
||||
this.metadataExpireMs = metadataExpireMs;
|
||||
this.lastRefreshMs = 0L;
|
||||
this.lastSuccessfulRefreshMs = 0L;
|
||||
this.requestVersion = 0;
|
||||
this.updateVersion = 0;
|
||||
this.needFullUpdate = false;
|
||||
this.needPartialUpdate = false;
|
||||
this.clusterResourceListeners = clusterResourceListeners;
|
||||
this.isClosed = false;
|
||||
this.lastSeenLeaderEpochs = new HashMap<>();
|
||||
this.invalidTopics = Collections.emptySet();
|
||||
this.unauthorizedTopics = Collections.emptySet();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current cluster info without blocking
|
||||
*/
|
||||
public synchronized Cluster fetch() {
|
||||
return cache.cluster();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the next time when the current cluster info can be updated (i.e., backoff time has elapsed).
|
||||
*
|
||||
* @param nowMs current time in ms
|
||||
* @return remaining time in ms till the cluster info can be updated again
|
||||
*/
|
||||
public synchronized long timeToAllowUpdate(long nowMs) {
|
||||
return Math.max(this.lastRefreshMs + this.refreshBackoffMs - nowMs, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* The next time to update the cluster info is the maximum of the time the current info will expire and the time the
|
||||
* current info can be updated (i.e. backoff time has elapsed); If an update has been request then the expiry time
|
||||
* is now
|
||||
*
|
||||
* @param nowMs current time in ms
|
||||
* @return remaining time in ms till updating the cluster info
|
||||
*/
|
||||
public synchronized long timeToNextUpdate(long nowMs) {
|
||||
long timeToExpire = updateRequested() ? 0 : Math.max(this.lastSuccessfulRefreshMs + this.metadataExpireMs - nowMs, 0);
|
||||
return Math.max(timeToExpire, timeToAllowUpdate(nowMs));
|
||||
}
|
||||
|
||||
public long metadataExpireMs() {
|
||||
return this.metadataExpireMs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request an update of the current cluster metadata info, return the current updateVersion before the update
|
||||
*/
|
||||
public synchronized int requestUpdate() {
|
||||
this.needFullUpdate = true;
|
||||
return this.updateVersion;
|
||||
}
|
||||
|
||||
public synchronized int requestUpdateForNewTopics() {
|
||||
// Override the timestamp of last refresh to let immediate update.
|
||||
this.lastRefreshMs = 0;
|
||||
this.needPartialUpdate = true;
|
||||
this.requestVersion++;
|
||||
return this.updateVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request an update for the partition metadata iff we have seen a newer leader epoch. This is called by the client
|
||||
* any time it handles a response from the broker that includes leader epoch, except for UpdateMetadata which
|
||||
* follows a different code path ({@link #update}).
|
||||
*
|
||||
* @param topicPartition
|
||||
* @param leaderEpoch
|
||||
* @return true if we updated the last seen epoch, false otherwise
|
||||
*/
|
||||
public synchronized boolean updateLastSeenEpochIfNewer(TopicPartition topicPartition, int leaderEpoch) {
|
||||
Objects.requireNonNull(topicPartition, "TopicPartition cannot be null");
|
||||
if (leaderEpoch < 0)
|
||||
throw new IllegalArgumentException("Invalid leader epoch " + leaderEpoch + " (must be non-negative)");
|
||||
|
||||
Integer oldEpoch = lastSeenLeaderEpochs.get(topicPartition);
|
||||
log.trace("Determining if we should replace existing epoch {} with new epoch {} for partition {}", oldEpoch, leaderEpoch, topicPartition);
|
||||
|
||||
final boolean updated;
|
||||
if (oldEpoch == null) {
|
||||
log.debug("Not replacing null epoch with new epoch {} for partition {}", leaderEpoch, topicPartition);
|
||||
updated = false;
|
||||
} else if (leaderEpoch > oldEpoch) {
|
||||
log.debug("Updating last seen epoch from {} to {} for partition {}", oldEpoch, leaderEpoch, topicPartition);
|
||||
lastSeenLeaderEpochs.put(topicPartition, leaderEpoch);
|
||||
updated = true;
|
||||
} else {
|
||||
log.debug("Not replacing existing epoch {} with new epoch {} for partition {}", oldEpoch, leaderEpoch, topicPartition);
|
||||
updated = false;
|
||||
}
|
||||
|
||||
this.needFullUpdate = this.needFullUpdate || updated;
|
||||
return updated;
|
||||
}
|
||||
|
||||
public Optional<Integer> lastSeenLeaderEpoch(TopicPartition topicPartition) {
|
||||
return Optional.ofNullable(lastSeenLeaderEpochs.get(topicPartition));
|
||||
}
|
||||
|
||||
/**
|
||||
* Check whether an update has been explicitly requested.
|
||||
*
|
||||
* @return true if an update was requested, false otherwise
|
||||
*/
|
||||
public synchronized boolean updateRequested() {
|
||||
return this.needFullUpdate || this.needPartialUpdate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the cached partition info if it exists and a newer leader epoch isn't known about.
|
||||
*/
|
||||
synchronized Optional<MetadataResponse.PartitionMetadata> partitionMetadataIfCurrent(TopicPartition topicPartition) {
|
||||
Integer epoch = lastSeenLeaderEpochs.get(topicPartition);
|
||||
Optional<MetadataResponse.PartitionMetadata> partitionMetadata = cache.partitionMetadata(topicPartition);
|
||||
if (epoch == null) {
|
||||
// old cluster format (no epochs)
|
||||
return partitionMetadata;
|
||||
} else {
|
||||
return partitionMetadata.filter(metadata ->
|
||||
metadata.leaderEpoch.orElse(NO_PARTITION_LEADER_EPOCH).equals(epoch));
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized LeaderAndEpoch currentLeader(TopicPartition topicPartition) {
|
||||
Optional<MetadataResponse.PartitionMetadata> maybeMetadata = partitionMetadataIfCurrent(topicPartition);
|
||||
if (!maybeMetadata.isPresent())
|
||||
return new LeaderAndEpoch(Optional.empty(), Optional.ofNullable(lastSeenLeaderEpochs.get(topicPartition)));
|
||||
|
||||
MetadataResponse.PartitionMetadata partitionMetadata = maybeMetadata.get();
|
||||
Optional<Integer> leaderEpochOpt = partitionMetadata.leaderEpoch;
|
||||
Optional<Node> leaderNodeOpt = partitionMetadata.leaderId.flatMap(cache::nodeById);
|
||||
return new LeaderAndEpoch(leaderNodeOpt, leaderEpochOpt);
|
||||
}
|
||||
|
||||
public synchronized void bootstrap(List<InetSocketAddress> addresses) {
|
||||
this.needFullUpdate = true;
|
||||
this.updateVersion += 1;
|
||||
this.cache = MetadataCache.bootstrap(addresses);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update metadata assuming the current request version. This is mainly for convenience in testing.
|
||||
*/
|
||||
public synchronized void updateWithCurrentRequestVersion(MetadataResponse response, boolean isPartialUpdate, long nowMs) {
|
||||
this.update(this.requestVersion, response, isPartialUpdate, nowMs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the cluster metadata. If topic expiry is enabled, expiry time
|
||||
* is set for topics if required and expired topics are removed from the metadata.
|
||||
*
|
||||
* @param requestVersion The request version corresponding to the update response, as provided by
|
||||
* {@link #newMetadataRequestAndVersion()}.
|
||||
* @param response metadata response received from the broker
|
||||
* @param isPartialUpdate whether the metadata request was for a subset of the active topics
|
||||
* @param nowMs current time in milliseconds
|
||||
*/
|
||||
public synchronized void update(int requestVersion, MetadataResponse response, boolean isPartialUpdate, long nowMs) {
|
||||
Objects.requireNonNull(response, "Metadata response cannot be null");
|
||||
if (isClosed())
|
||||
throw new IllegalStateException("Update requested after metadata close");
|
||||
|
||||
this.needPartialUpdate = requestVersion < this.requestVersion;
|
||||
this.lastRefreshMs = nowMs;
|
||||
this.updateVersion += 1;
|
||||
if (!isPartialUpdate) {
|
||||
this.needFullUpdate = false;
|
||||
this.lastSuccessfulRefreshMs = nowMs;
|
||||
}
|
||||
|
||||
String previousClusterId = cache.clusterResource().clusterId();
|
||||
|
||||
this.cache = handleMetadataResponse(response, isPartialUpdate, nowMs);
|
||||
|
||||
Cluster cluster = cache.cluster();
|
||||
maybeSetMetadataError(cluster);
|
||||
|
||||
this.lastSeenLeaderEpochs.keySet().removeIf(tp -> !retainTopic(tp.topic(), false, nowMs));
|
||||
|
||||
String newClusterId = cache.clusterResource().clusterId();
|
||||
if (!Objects.equals(previousClusterId, newClusterId)) {
|
||||
log.info("Cluster ID: {}", newClusterId);
|
||||
}
|
||||
clusterResourceListeners.onUpdate(cache.clusterResource());
|
||||
|
||||
log.debug("Updated cluster metadata updateVersion {} to {}", this.updateVersion, this.cache);
|
||||
}
|
||||
|
||||
private void maybeSetMetadataError(Cluster cluster) {
|
||||
clearRecoverableErrors();
|
||||
checkInvalidTopics(cluster);
|
||||
checkUnauthorizedTopics(cluster);
|
||||
}
|
||||
|
||||
private void checkInvalidTopics(Cluster cluster) {
|
||||
if (!cluster.invalidTopics().isEmpty()) {
|
||||
log.error("Metadata response reported invalid topics {}", cluster.invalidTopics());
|
||||
invalidTopics = new HashSet<>(cluster.invalidTopics());
|
||||
}
|
||||
}
|
||||
|
||||
private void checkUnauthorizedTopics(Cluster cluster) {
|
||||
if (!cluster.unauthorizedTopics().isEmpty()) {
|
||||
log.error("Topic authorization failed for topics {}", cluster.unauthorizedTopics());
|
||||
unauthorizedTopics = new HashSet<>(cluster.unauthorizedTopics());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Transform a MetadataResponse into a new MetadataCache instance.
|
||||
*/
|
||||
private MetadataCache handleMetadataResponse(MetadataResponse metadataResponse, boolean isPartialUpdate, long nowMs) {
|
||||
// All encountered topics.
|
||||
Set<String> topics = new HashSet<>();
|
||||
|
||||
// Retained topics to be passed to the metadata cache.
|
||||
Set<String> internalTopics = new HashSet<>();
|
||||
Set<String> unauthorizedTopics = new HashSet<>();
|
||||
Set<String> invalidTopics = new HashSet<>();
|
||||
|
||||
List<MetadataResponse.PartitionMetadata> partitions = new ArrayList<>();
|
||||
for (MetadataResponse.TopicMetadata metadata : metadataResponse.topicMetadata()) {
|
||||
topics.add(metadata.topic());
|
||||
|
||||
if (!retainTopic(metadata.topic(), metadata.isInternal(), nowMs))
|
||||
continue;
|
||||
|
||||
if (metadata.isInternal())
|
||||
internalTopics.add(metadata.topic());
|
||||
|
||||
if (metadata.error() == Errors.NONE) {
|
||||
for (MetadataResponse.PartitionMetadata partitionMetadata : metadata.partitionMetadata()) {
|
||||
// Even if the partition's metadata includes an error, we need to handle
|
||||
// the update to catch new epochs
|
||||
updateLatestMetadata(partitionMetadata, metadataResponse.hasReliableLeaderEpochs())
|
||||
.ifPresent(partitions::add);
|
||||
|
||||
if (partitionMetadata.error.exception() instanceof InvalidMetadataException) {
|
||||
log.debug("Requesting metadata update for partition {} due to error {}",
|
||||
partitionMetadata.topicPartition, partitionMetadata.error);
|
||||
requestUpdate();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (metadata.error().exception() instanceof InvalidMetadataException) {
|
||||
log.debug("Requesting metadata update for topic {} due to error {}", metadata.topic(), metadata.error());
|
||||
requestUpdate();
|
||||
}
|
||||
|
||||
if (metadata.error() == Errors.INVALID_TOPIC_EXCEPTION)
|
||||
invalidTopics.add(metadata.topic());
|
||||
else if (metadata.error() == Errors.TOPIC_AUTHORIZATION_FAILED)
|
||||
unauthorizedTopics.add(metadata.topic());
|
||||
}
|
||||
}
|
||||
|
||||
Map<Integer, Node> nodes = metadataResponse.brokersById();
|
||||
if (isPartialUpdate)
|
||||
return this.cache.mergeWith(metadataResponse.clusterId(), nodes, partitions,
|
||||
unauthorizedTopics, invalidTopics, internalTopics, metadataResponse.controller(),
|
||||
(topic, isInternal) -> !topics.contains(topic) && retainTopic(topic, isInternal, nowMs));
|
||||
else
|
||||
return new MetadataCache(metadataResponse.clusterId(), nodes, partitions,
|
||||
unauthorizedTopics, invalidTopics, internalTopics, metadataResponse.controller());
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute the latest partition metadata to cache given ordering by leader epochs (if both
|
||||
* available and reliable).
|
||||
*/
|
||||
private Optional<MetadataResponse.PartitionMetadata> updateLatestMetadata(
|
||||
MetadataResponse.PartitionMetadata partitionMetadata,
|
||||
boolean hasReliableLeaderEpoch) {
|
||||
TopicPartition tp = partitionMetadata.topicPartition;
|
||||
if (hasReliableLeaderEpoch && partitionMetadata.leaderEpoch.isPresent()) {
|
||||
int newEpoch = partitionMetadata.leaderEpoch.get();
|
||||
// If the received leader epoch is at least the same as the previous one, update the metadata
|
||||
Integer currentEpoch = lastSeenLeaderEpochs.get(tp);
|
||||
if (currentEpoch == null || newEpoch >= currentEpoch) {
|
||||
log.debug("Updating last seen epoch for partition {} from {} to epoch {} from new metadata", tp, currentEpoch, newEpoch);
|
||||
lastSeenLeaderEpochs.put(tp, newEpoch);
|
||||
return Optional.of(partitionMetadata);
|
||||
} else {
|
||||
// Otherwise ignore the new metadata and use the previously cached info
|
||||
log.debug("Got metadata for an older epoch {} (current is {}) for partition {}, not updating", newEpoch, currentEpoch, tp);
|
||||
return cache.partitionMetadata(tp);
|
||||
}
|
||||
} else {
|
||||
// Handle old cluster formats as well as error responses where leader and epoch are missing
|
||||
lastSeenLeaderEpochs.remove(tp);
|
||||
return Optional.of(partitionMetadata.withoutLeaderEpoch());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If any non-retriable exceptions were encountered during metadata update, clear and throw the exception.
|
||||
* This is used by the consumer to propagate any fatal exceptions or topic exceptions for any of the topics
|
||||
* in the consumer's Metadata.
|
||||
*/
|
||||
public synchronized void maybeThrowAnyException() {
|
||||
clearErrorsAndMaybeThrowException(this::recoverableException);
|
||||
}
|
||||
|
||||
/**
|
||||
* If any fatal exceptions were encountered during metadata update, throw the exception. This is used by
|
||||
* the producer to abort waiting for metadata if there were fatal exceptions (e.g. authentication failures)
|
||||
* in the last metadata update.
|
||||
*/
|
||||
public synchronized void maybeThrowFatalException() {
|
||||
KafkaException metadataException = this.fatalException;
|
||||
if (metadataException != null) {
|
||||
fatalException = null;
|
||||
throw metadataException;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If any non-retriable exceptions were encountered during metadata update, throw exception if the exception
|
||||
* is fatal or related to the specified topic. All exceptions from the last metadata update are cleared.
|
||||
* This is used by the producer to propagate topic metadata errors for send requests.
|
||||
*/
|
||||
public synchronized void maybeThrowExceptionForTopic(String topic) {
|
||||
clearErrorsAndMaybeThrowException(() -> recoverableExceptionForTopic(topic));
|
||||
}
|
||||
|
||||
private void clearErrorsAndMaybeThrowException(Supplier<KafkaException> recoverableExceptionSupplier) {
|
||||
KafkaException metadataException = Optional.ofNullable(fatalException).orElseGet(recoverableExceptionSupplier);
|
||||
fatalException = null;
|
||||
clearRecoverableErrors();
|
||||
if (metadataException != null)
|
||||
throw metadataException;
|
||||
}
|
||||
|
||||
// We may be able to recover from this exception if metadata for this topic is no longer needed
|
||||
private KafkaException recoverableException() {
|
||||
if (!unauthorizedTopics.isEmpty())
|
||||
return new TopicAuthorizationException(unauthorizedTopics);
|
||||
else if (!invalidTopics.isEmpty())
|
||||
return new InvalidTopicException(invalidTopics);
|
||||
else
|
||||
return null;
|
||||
}
|
||||
|
||||
private KafkaException recoverableExceptionForTopic(String topic) {
|
||||
if (unauthorizedTopics.contains(topic))
|
||||
return new TopicAuthorizationException(Collections.singleton(topic));
|
||||
else if (invalidTopics.contains(topic))
|
||||
return new InvalidTopicException(Collections.singleton(topic));
|
||||
else
|
||||
return null;
|
||||
}
|
||||
|
||||
private void clearRecoverableErrors() {
|
||||
invalidTopics = Collections.emptySet();
|
||||
unauthorizedTopics = Collections.emptySet();
|
||||
}
|
||||
|
||||
/**
|
||||
* Record an attempt to update the metadata that failed. We need to keep track of this
|
||||
* to avoid retrying immediately.
|
||||
*/
|
||||
public synchronized void failedUpdate(long now) {
|
||||
this.lastRefreshMs = now;
|
||||
}
|
||||
|
||||
/**
|
||||
* Propagate a fatal error which affects the ability to fetch metadata for the cluster.
|
||||
* Two examples are authentication and unsupported version exceptions.
|
||||
*
|
||||
* @param exception The fatal exception
|
||||
*/
|
||||
public synchronized void fatalError(KafkaException exception) {
|
||||
this.fatalException = exception;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The current metadata updateVersion
|
||||
*/
|
||||
public synchronized int updateVersion() {
|
||||
return this.updateVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* The last time metadata was successfully updated.
|
||||
*/
|
||||
public synchronized long lastSuccessfulUpdate() {
|
||||
return this.lastSuccessfulRefreshMs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Close this metadata instance to indicate that metadata updates are no longer possible.
|
||||
*/
|
||||
@Override
|
||||
public synchronized void close() {
|
||||
this.isClosed = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if this metadata instance has been closed. See {@link #close()} for more information.
|
||||
*
|
||||
* @return True if this instance has been closed; false otherwise
|
||||
*/
|
||||
public synchronized boolean isClosed() {
|
||||
return this.isClosed;
|
||||
}
|
||||
|
||||
public synchronized MetadataRequestAndVersion newMetadataRequestAndVersion(long nowMs) {
|
||||
MetadataRequest.Builder request = null;
|
||||
boolean isPartialUpdate = false;
|
||||
|
||||
// Perform a partial update only if a full update hasn't been requested, and the last successful
|
||||
// hasn't exceeded the metadata refresh time.
|
||||
if (!this.needFullUpdate && this.lastSuccessfulRefreshMs + this.metadataExpireMs > nowMs) {
|
||||
request = newMetadataRequestBuilderForNewTopics();
|
||||
isPartialUpdate = true;
|
||||
}
|
||||
if (request == null) {
|
||||
request = newMetadataRequestBuilder();
|
||||
isPartialUpdate = false;
|
||||
}
|
||||
return new MetadataRequestAndVersion(request, requestVersion, isPartialUpdate);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs and returns a metadata request builder for fetching cluster data and all active topics.
|
||||
*
|
||||
* @return the constructed non-null metadata builder
|
||||
*/
|
||||
protected MetadataRequest.Builder newMetadataRequestBuilder() {
|
||||
return MetadataRequest.Builder.allTopics();
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs and returns a metadata request builder for fetching cluster data and any uncached topics,
|
||||
* otherwise null if the functionality is not supported.
|
||||
*
|
||||
* @return the constructed metadata builder, or null if not supported
|
||||
*/
|
||||
protected MetadataRequest.Builder newMetadataRequestBuilderForNewTopics() {
|
||||
return null;
|
||||
}
|
||||
|
||||
protected boolean retainTopic(String topic, boolean isInternal, long nowMs) {
|
||||
return true;
|
||||
}
|
||||
|
||||
public static class MetadataRequestAndVersion {
|
||||
public final MetadataRequest.Builder requestBuilder;
|
||||
public final int requestVersion;
|
||||
public final boolean isPartialUpdate;
|
||||
|
||||
private MetadataRequestAndVersion(MetadataRequest.Builder requestBuilder,
|
||||
int requestVersion,
|
||||
boolean isPartialUpdate) {
|
||||
this.requestBuilder = requestBuilder;
|
||||
this.requestVersion = requestVersion;
|
||||
this.isPartialUpdate = isPartialUpdate;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents current leader state known in metadata. It is possible that we know the leader, but not the
|
||||
* epoch if the metadata is received from a broker which does not support a sufficient Metadata API version.
|
||||
* It is also possible that we know of the leader epoch, but not the leader when it is derived
|
||||
* from an external source (e.g. a committed offset).
|
||||
*/
|
||||
public static class LeaderAndEpoch {
|
||||
private static final LeaderAndEpoch NO_LEADER_OR_EPOCH = new LeaderAndEpoch(Optional.empty(), Optional.empty());
|
||||
|
||||
public final Optional<Node> leader;
|
||||
public final Optional<Integer> epoch;
|
||||
|
||||
public LeaderAndEpoch(Optional<Node> leader, Optional<Integer> epoch) {
|
||||
this.leader = Objects.requireNonNull(leader);
|
||||
this.epoch = Objects.requireNonNull(epoch);
|
||||
}
|
||||
|
||||
public static LeaderAndEpoch noLeaderOrEpoch() {
|
||||
return NO_LEADER_OR_EPOCH;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
LeaderAndEpoch that = (LeaderAndEpoch) o;
|
||||
|
||||
if (!leader.equals(that.leader)) return false;
|
||||
return epoch.equals(that.epoch);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = leader.hashCode();
|
||||
result = 31 * result + epoch.hashCode();
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "LeaderAndEpoch{" +
|
||||
"leader=" + leader +
|
||||
", epoch=" + epoch.map(Number::toString).orElse("absent") +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,210 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import org.apache.kafka.common.Cluster;
|
||||
import org.apache.kafka.common.ClusterResource;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.requests.MetadataResponse;
|
||||
import org.apache.kafka.common.requests.MetadataResponse.PartitionMetadata;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiPredicate;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* An internal mutable cache of nodes, topics, and partitions in the Kafka cluster. This keeps an up-to-date Cluster
|
||||
* instance which is optimized for read access.
|
||||
*/
|
||||
public class MetadataCache {
|
||||
private final String clusterId;
|
||||
private final Map<Integer, Node> nodes;
|
||||
private final Set<String> unauthorizedTopics;
|
||||
private final Set<String> invalidTopics;
|
||||
private final Set<String> internalTopics;
|
||||
private final Node controller;
|
||||
private final Map<TopicPartition, PartitionMetadata> metadataByPartition;
|
||||
|
||||
private Cluster clusterInstance;
|
||||
|
||||
MetadataCache(String clusterId,
|
||||
Map<Integer, Node> nodes,
|
||||
Collection<PartitionMetadata> partitions,
|
||||
Set<String> unauthorizedTopics,
|
||||
Set<String> invalidTopics,
|
||||
Set<String> internalTopics,
|
||||
Node controller) {
|
||||
this(clusterId, nodes, partitions, unauthorizedTopics, invalidTopics, internalTopics, controller, null);
|
||||
}
|
||||
|
||||
private MetadataCache(String clusterId,
|
||||
Map<Integer, Node> nodes,
|
||||
Collection<PartitionMetadata> partitions,
|
||||
Set<String> unauthorizedTopics,
|
||||
Set<String> invalidTopics,
|
||||
Set<String> internalTopics,
|
||||
Node controller,
|
||||
Cluster clusterInstance) {
|
||||
this.clusterId = clusterId;
|
||||
this.nodes = nodes;
|
||||
this.unauthorizedTopics = unauthorizedTopics;
|
||||
this.invalidTopics = invalidTopics;
|
||||
this.internalTopics = internalTopics;
|
||||
this.controller = controller;
|
||||
|
||||
this.metadataByPartition = new HashMap<>(partitions.size());
|
||||
for (PartitionMetadata p : partitions) {
|
||||
this.metadataByPartition.put(p.topicPartition, p);
|
||||
}
|
||||
|
||||
if (clusterInstance == null) {
|
||||
computeClusterView();
|
||||
} else {
|
||||
this.clusterInstance = clusterInstance;
|
||||
}
|
||||
}
|
||||
|
||||
Optional<PartitionMetadata> partitionMetadata(TopicPartition topicPartition) {
|
||||
return Optional.ofNullable(metadataByPartition.get(topicPartition));
|
||||
}
|
||||
|
||||
Optional<Node> nodeById(int id) {
|
||||
return Optional.ofNullable(nodes.get(id));
|
||||
}
|
||||
|
||||
Cluster cluster() {
|
||||
if (clusterInstance == null) {
|
||||
throw new IllegalStateException("Cached Cluster instance should not be null, but was.");
|
||||
} else {
|
||||
return clusterInstance;
|
||||
}
|
||||
}
|
||||
|
||||
ClusterResource clusterResource() {
|
||||
return new ClusterResource(clusterId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Merges the metadata cache's contents with the provided metadata, returning a new metadata cache. The provided
|
||||
* metadata is presumed to be more recent than the cache's metadata, and therefore all overlapping metadata will
|
||||
* be overridden.
|
||||
*
|
||||
* @param newClusterId the new cluster Id
|
||||
* @param newNodes the new set of nodes
|
||||
* @param addPartitions partitions to add
|
||||
* @param addUnauthorizedTopics unauthorized topics to add
|
||||
* @param addInternalTopics internal topics to add
|
||||
* @param newController the new controller node
|
||||
* @param retainTopic returns whether a topic's metadata should be retained
|
||||
* @return the merged metadata cache
|
||||
*/
|
||||
MetadataCache mergeWith(String newClusterId,
|
||||
Map<Integer, Node> newNodes,
|
||||
Collection<PartitionMetadata> addPartitions,
|
||||
Set<String> addUnauthorizedTopics,
|
||||
Set<String> addInvalidTopics,
|
||||
Set<String> addInternalTopics,
|
||||
Node newController,
|
||||
BiPredicate<String, Boolean> retainTopic) {
|
||||
|
||||
Predicate<String> shouldRetainTopic = topic -> retainTopic.test(topic, internalTopics.contains(topic));
|
||||
|
||||
Map<TopicPartition, PartitionMetadata> newMetadataByPartition = new HashMap<>(addPartitions.size());
|
||||
for (PartitionMetadata partition : addPartitions) {
|
||||
newMetadataByPartition.put(partition.topicPartition, partition);
|
||||
}
|
||||
for (Map.Entry<TopicPartition, PartitionMetadata> entry : metadataByPartition.entrySet()) {
|
||||
if (shouldRetainTopic.test(entry.getKey().topic())) {
|
||||
newMetadataByPartition.putIfAbsent(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
Set<String> newUnauthorizedTopics = fillSet(addUnauthorizedTopics, unauthorizedTopics, shouldRetainTopic);
|
||||
Set<String> newInvalidTopics = fillSet(addInvalidTopics, invalidTopics, shouldRetainTopic);
|
||||
Set<String> newInternalTopics = fillSet(addInternalTopics, internalTopics, shouldRetainTopic);
|
||||
|
||||
return new MetadataCache(newClusterId, newNodes, newMetadataByPartition.values(), newUnauthorizedTopics,
|
||||
newInvalidTopics, newInternalTopics, newController);
|
||||
}
|
||||
|
||||
/**
|
||||
* Copies {@code baseSet} and adds all non-existent elements in {@code fillSet} such that {@code predicate} is true.
|
||||
* In other words, all elements of {@code baseSet} will be contained in the result, with additional non-overlapping
|
||||
* elements in {@code fillSet} where the predicate is true.
|
||||
*
|
||||
* @param baseSet the base elements for the resulting set
|
||||
* @param fillSet elements to be filled into the resulting set
|
||||
* @param predicate tested against the fill set to determine whether elements should be added to the base set
|
||||
*/
|
||||
private static <T> Set<T> fillSet(Set<T> baseSet, Set<T> fillSet, Predicate<T> predicate) {
|
||||
Set<T> result = new HashSet<>(baseSet);
|
||||
for (T element : fillSet) {
|
||||
if (predicate.test(element)) {
|
||||
result.add(element);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
private void computeClusterView() {
|
||||
List<PartitionInfo> partitionInfos = metadataByPartition.values()
|
||||
.stream()
|
||||
.map(metadata -> MetadataResponse.toPartitionInfo(metadata, nodes))
|
||||
.collect(Collectors.toList());
|
||||
this.clusterInstance = new Cluster(clusterId, nodes.values(), partitionInfos, unauthorizedTopics,
|
||||
invalidTopics, internalTopics, controller);
|
||||
}
|
||||
|
||||
static MetadataCache bootstrap(List<InetSocketAddress> addresses) {
|
||||
Map<Integer, Node> nodes = new HashMap<>();
|
||||
int nodeId = -1;
|
||||
for (InetSocketAddress address : addresses) {
|
||||
nodes.put(nodeId, new Node(nodeId, address.getHostString(), address.getPort()));
|
||||
nodeId--;
|
||||
}
|
||||
return new MetadataCache(null, nodes, Collections.emptyList(),
|
||||
Collections.emptySet(), Collections.emptySet(), Collections.emptySet(),
|
||||
null, Cluster.bootstrap(addresses));
|
||||
}
|
||||
|
||||
static MetadataCache empty() {
|
||||
return new MetadataCache(null, Collections.emptyMap(), Collections.emptyList(),
|
||||
Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Cluster.empty());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "MetadataCache{" +
|
||||
"clusterId='" + clusterId + '\'' +
|
||||
", nodes=" + nodes +
|
||||
", partitions=" + metadataByPartition.values() +
|
||||
", controller=" + controller +
|
||||
'}';
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,93 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import org.apache.kafka.common.KafkaException;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.errors.AuthenticationException;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.requests.MetadataResponse;
|
||||
import org.apache.kafka.common.requests.RequestHeader;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* The interface used by `NetworkClient` to request cluster metadata info to be updated and to retrieve the cluster nodes
|
||||
* from such metadata. This is an internal class.
|
||||
* <p>
|
||||
* This class is not thread-safe!
|
||||
*/
|
||||
public interface MetadataUpdater extends Closeable {
|
||||
|
||||
/**
|
||||
* Gets the current cluster info without blocking.
|
||||
*/
|
||||
List<Node> fetchNodes();
|
||||
|
||||
/**
|
||||
* Returns true if an update to the cluster metadata info is due.
|
||||
*/
|
||||
boolean isUpdateDue(long now);
|
||||
|
||||
/**
|
||||
* Starts a cluster metadata update if needed and possible. Returns the time until the metadata update (which would
|
||||
* be 0 if an update has been started as a result of this call).
|
||||
*
|
||||
* If the implementation relies on `NetworkClient` to send requests, `handleSuccessfulResponse` will be
|
||||
* invoked after the metadata response is received.
|
||||
*
|
||||
* The semantics of `needed` and `possible` are implementation-dependent and may take into account a number of
|
||||
* factors like node availability, how long since the last metadata update, etc.
|
||||
*/
|
||||
long maybeUpdate(long now);
|
||||
|
||||
/**
|
||||
* Handle a server disconnect.
|
||||
*
|
||||
* This provides a mechanism for the `MetadataUpdater` implementation to use the NetworkClient instance for its own
|
||||
* requests with special handling for disconnections of such requests.
|
||||
*
|
||||
* @param now Current time in milliseconds
|
||||
* @param nodeId The id of the node that disconnected
|
||||
* @param maybeAuthException Optional authentication error
|
||||
*/
|
||||
void handleServerDisconnect(long now, String nodeId, Optional<AuthenticationException> maybeAuthException);
|
||||
|
||||
/**
|
||||
* Handle a metadata request failure.
|
||||
*
|
||||
* @param now Current time in milliseconds
|
||||
* @param maybeFatalException Optional fatal error (e.g. {@link UnsupportedVersionException})
|
||||
*/
|
||||
void handleFailedRequest(long now, Optional<KafkaException> maybeFatalException);
|
||||
|
||||
/**
|
||||
* Handle responses for metadata requests.
|
||||
*
|
||||
* This provides a mechanism for the `MetadataUpdater` implementation to use the NetworkClient instance for its own
|
||||
* requests with special handling for completed receives of such requests.
|
||||
*/
|
||||
void handleSuccessfulResponse(RequestHeader requestHeader, long now, MetadataResponse metadataResponse);
|
||||
|
||||
/**
|
||||
* Close this updater.
|
||||
*/
|
||||
@Override
|
||||
void close();
|
||||
}
|
||||
1265
clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
Normal file
1265
clients/src/main/java/org/apache/kafka/clients/NetworkClient.java
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,118 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.errors.DisconnectException;
|
||||
import org.apache.kafka.common.utils.Time;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Provides additional utilities for {@link NetworkClient} (e.g. to implement blocking behaviour).
|
||||
*/
|
||||
public final class NetworkClientUtils {
|
||||
|
||||
private NetworkClientUtils() {}
|
||||
|
||||
/**
|
||||
* Checks whether the node is currently connected, first calling `client.poll` to ensure that any pending
|
||||
* disconnects have been processed.
|
||||
*
|
||||
* This method can be used to check the status of a connection prior to calling the blocking version to be able
|
||||
* to tell whether the latter completed a new connection.
|
||||
*/
|
||||
public static boolean isReady(KafkaClient client, Node node, long currentTime) {
|
||||
client.poll(0, currentTime);
|
||||
return client.isReady(node, currentTime);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes `client.poll` to discard pending disconnects, followed by `client.ready` and 0 or more `client.poll`
|
||||
* invocations until the connection to `node` is ready, the timeoutMs expires or the connection fails.
|
||||
*
|
||||
* It returns `true` if the call completes normally or `false` if the timeoutMs expires. If the connection fails,
|
||||
* an `IOException` is thrown instead. Note that if the `NetworkClient` has been configured with a positive
|
||||
* connection timeoutMs, it is possible for this method to raise an `IOException` for a previous connection which
|
||||
* has recently disconnected. If authentication to the node fails, an `AuthenticationException` is thrown.
|
||||
*
|
||||
* This method is useful for implementing blocking behaviour on top of the non-blocking `NetworkClient`, use it with
|
||||
* care.
|
||||
*/
|
||||
public static boolean awaitReady(KafkaClient client, Node node, Time time, long timeoutMs) throws IOException {
|
||||
if (timeoutMs < 0) {
|
||||
throw new IllegalArgumentException("Timeout needs to be greater than 0");
|
||||
}
|
||||
long startTime = time.milliseconds();
|
||||
long expiryTime = startTime + timeoutMs;
|
||||
|
||||
if (isReady(client, node, startTime) || client.ready(node, startTime))
|
||||
return true;
|
||||
|
||||
long attemptStartTime = time.milliseconds();
|
||||
while (!client.isReady(node, attemptStartTime) && attemptStartTime < expiryTime) {
|
||||
if (client.connectionFailed(node)) {
|
||||
throw new IOException("Connection to " + node + " failed.");
|
||||
}
|
||||
long pollTimeout = expiryTime - attemptStartTime;
|
||||
client.poll(pollTimeout, attemptStartTime);
|
||||
if (client.authenticationException(node) != null)
|
||||
throw client.authenticationException(node);
|
||||
attemptStartTime = time.milliseconds();
|
||||
}
|
||||
return client.isReady(node, attemptStartTime);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes `client.send` followed by 1 or more `client.poll` invocations until a response is received or a
|
||||
* disconnection happens (which can happen for a number of reasons including a request timeout).
|
||||
*
|
||||
* In case of a disconnection, an `IOException` is thrown.
|
||||
* If shutdown is initiated on the client during this method, an IOException is thrown.
|
||||
*
|
||||
* This method is useful for implementing blocking behaviour on top of the non-blocking `NetworkClient`, use it with
|
||||
* care.
|
||||
*/
|
||||
public static ClientResponse sendAndReceive(KafkaClient client, ClientRequest request, Time time) throws IOException {
|
||||
try {
|
||||
client.send(request, time.milliseconds());
|
||||
while (client.active()) {
|
||||
List<ClientResponse> responses = client.poll(Long.MAX_VALUE, time.milliseconds());
|
||||
for (ClientResponse response : responses) {
|
||||
if (response.requestHeader().correlationId() == request.correlationId()) {
|
||||
if (response.wasDisconnected()) {
|
||||
throw new IOException("Connection to " + response.destination() + " was disconnected before the response was read");
|
||||
}
|
||||
if (response.versionMismatch() != null) {
|
||||
throw response.versionMismatch();
|
||||
}
|
||||
return response;
|
||||
}
|
||||
}
|
||||
}
|
||||
throw new IOException("Client was shutdown before response was read");
|
||||
} catch (DisconnectException e) {
|
||||
if (client.active())
|
||||
throw e;
|
||||
else
|
||||
throw new IOException("Client was shutdown before response was read");
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,233 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedList;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionsResponseKey;
|
||||
import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionsResponseKeyCollection;
|
||||
import org.apache.kafka.common.protocol.ApiKeys;
|
||||
import org.apache.kafka.common.utils.Utils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.EnumMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
/**
|
||||
* An internal class which represents the API versions supported by a particular node.
|
||||
*/
|
||||
public class NodeApiVersions {
|
||||
private static final Logger log = LoggerFactory.getLogger(NodeApiVersions.class);
|
||||
|
||||
// A map of the usable versions of each API, keyed by the ApiKeys instance
|
||||
private final Map<ApiKeys, ApiVersion> supportedVersions = new EnumMap<>(ApiKeys.class);
|
||||
|
||||
// List of APIs which the broker supports, but which are unknown to the client
|
||||
private final List<ApiVersion> unknownApis = new ArrayList<>();
|
||||
|
||||
/**
|
||||
* Create a NodeApiVersions object with the current ApiVersions.
|
||||
*
|
||||
* @return A new NodeApiVersions object.
|
||||
*/
|
||||
public static NodeApiVersions create() {
|
||||
return create(Collections.<ApiVersion>emptyList());
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a NodeApiVersions object.
|
||||
*
|
||||
* @param overrides API versions to override. Any ApiVersion not specified here will be set to the current client
|
||||
* value.
|
||||
* @return A new NodeApiVersions object.
|
||||
*/
|
||||
public static NodeApiVersions create(Collection<ApiVersion> overrides) {
|
||||
List<ApiVersion> apiVersions = new LinkedList<>(overrides);
|
||||
for (ApiKeys apiKey : ApiKeys.values()) {
|
||||
boolean exists = false;
|
||||
for (ApiVersion apiVersion : apiVersions) {
|
||||
if (apiVersion.apiKey == apiKey.id) {
|
||||
exists = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!exists) {
|
||||
apiVersions.add(new ApiVersion(apiKey));
|
||||
}
|
||||
}
|
||||
return new NodeApiVersions(apiVersions);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Create a NodeApiVersions object with a single ApiKey. It is mainly used in tests.
|
||||
*
|
||||
* @param apiKey ApiKey's id.
|
||||
* @param minVersion ApiKey's minimum version.
|
||||
* @param maxVersion ApiKey's maximum version.
|
||||
* @return A new NodeApiVersions object.
|
||||
*/
|
||||
public static NodeApiVersions create(short apiKey, short minVersion, short maxVersion) {
|
||||
return create(Collections.singleton(new ApiVersion(apiKey, minVersion, maxVersion)));
|
||||
}
|
||||
|
||||
public NodeApiVersions(ApiVersionsResponseKeyCollection nodeApiVersions) {
|
||||
for (ApiVersionsResponseKey nodeApiVersion : nodeApiVersions) {
|
||||
if (ApiKeys.hasId(nodeApiVersion.apiKey()) && ApiKeys.forId(nodeApiVersion.apiKey()) != null) {
|
||||
ApiKeys nodeApiKey = ApiKeys.forId(nodeApiVersion.apiKey());
|
||||
supportedVersions.put(nodeApiKey, new ApiVersion(nodeApiVersion));
|
||||
} else {
|
||||
// Newer brokers may support ApiKeys we don't know about
|
||||
unknownApis.add(new ApiVersion(nodeApiVersion));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public NodeApiVersions(Collection<ApiVersion> nodeApiVersions) {
|
||||
for (ApiVersion nodeApiVersion : nodeApiVersions) {
|
||||
if (ApiKeys.hasId(nodeApiVersion.apiKey)) {
|
||||
ApiKeys nodeApiKey = ApiKeys.forId(nodeApiVersion.apiKey);
|
||||
supportedVersions.put(nodeApiKey, nodeApiVersion);
|
||||
} else {
|
||||
// Newer brokers may support ApiKeys we don't know about
|
||||
unknownApis.add(nodeApiVersion);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the most recent version supported by both the node and the local software.
|
||||
*/
|
||||
public short latestUsableVersion(ApiKeys apiKey) {
|
||||
return latestUsableVersion(apiKey, apiKey.oldestVersion(), apiKey.latestVersion());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the latest version supported by the broker within an allowed range of versions
|
||||
*/
|
||||
public short latestUsableVersion(ApiKeys apiKey, short oldestAllowedVersion, short latestAllowedVersion) {
|
||||
ApiVersion usableVersion = supportedVersions.get(apiKey);
|
||||
if (usableVersion == null)
|
||||
throw new UnsupportedVersionException("The broker does not support " + apiKey);
|
||||
return latestUsableVersion(apiKey, usableVersion, oldestAllowedVersion, latestAllowedVersion);
|
||||
}
|
||||
|
||||
private short latestUsableVersion(ApiKeys apiKey, ApiVersion supportedVersions,
|
||||
short minAllowedVersion, short maxAllowedVersion) {
|
||||
short minVersion = (short) Math.max(minAllowedVersion, supportedVersions.minVersion);
|
||||
short maxVersion = (short) Math.min(maxAllowedVersion, supportedVersions.maxVersion);
|
||||
if (minVersion > maxVersion)
|
||||
throw new UnsupportedVersionException("The broker does not support " + apiKey +
|
||||
" with version in range [" + minAllowedVersion + "," + maxAllowedVersion + "]. The supported" +
|
||||
" range is [" + supportedVersions.minVersion + "," + supportedVersions.maxVersion + "].");
|
||||
return maxVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert the object to a string with no linebreaks.<p/>
|
||||
* <p>
|
||||
* This toString method is relatively expensive, so avoid calling it unless debug logging is turned on.
|
||||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
return toString(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert the object to a string.
|
||||
*
|
||||
* @param lineBreaks True if we should add a linebreak after each api.
|
||||
*/
|
||||
public String toString(boolean lineBreaks) {
|
||||
// The apiVersion collection may not be in sorted order. We put it into
|
||||
// a TreeMap before printing it out to ensure that we always print in
|
||||
// ascending order.
|
||||
TreeMap<Short, String> apiKeysText = new TreeMap<>();
|
||||
for (ApiVersion supportedVersion : this.supportedVersions.values())
|
||||
apiKeysText.put(supportedVersion.apiKey, apiVersionToText(supportedVersion));
|
||||
for (ApiVersion apiVersion : unknownApis)
|
||||
apiKeysText.put(apiVersion.apiKey, apiVersionToText(apiVersion));
|
||||
|
||||
// Also handle the case where some apiKey types are not specified at all in the given ApiVersions,
|
||||
// which may happen when the remote is too old.
|
||||
for (ApiKeys apiKey : ApiKeys.values()) {
|
||||
if (!apiKeysText.containsKey(apiKey.id)) {
|
||||
StringBuilder bld = new StringBuilder();
|
||||
bld.append(apiKey.name).append("(").
|
||||
append(apiKey.id).append("): ").append("UNSUPPORTED");
|
||||
apiKeysText.put(apiKey.id, bld.toString());
|
||||
}
|
||||
}
|
||||
String separator = lineBreaks ? ",\n\t" : ", ";
|
||||
StringBuilder bld = new StringBuilder();
|
||||
bld.append("(");
|
||||
if (lineBreaks)
|
||||
bld.append("\n\t");
|
||||
bld.append(Utils.join(apiKeysText.values(), separator));
|
||||
if (lineBreaks)
|
||||
bld.append("\n");
|
||||
bld.append(")");
|
||||
return bld.toString();
|
||||
}
|
||||
|
||||
private String apiVersionToText(ApiVersion apiVersion) {
|
||||
StringBuilder bld = new StringBuilder();
|
||||
ApiKeys apiKey = null;
|
||||
if (ApiKeys.hasId(apiVersion.apiKey)) {
|
||||
apiKey = ApiKeys.forId(apiVersion.apiKey);
|
||||
bld.append(apiKey.name).append("(").append(apiKey.id).append("): ");
|
||||
} else {
|
||||
bld.append("UNKNOWN(").append(apiVersion.apiKey).append("): ");
|
||||
}
|
||||
|
||||
if (apiVersion.minVersion == apiVersion.maxVersion) {
|
||||
bld.append(apiVersion.minVersion);
|
||||
} else {
|
||||
bld.append(apiVersion.minVersion).append(" to ").append(apiVersion.maxVersion);
|
||||
}
|
||||
|
||||
if (apiKey != null) {
|
||||
ApiVersion supportedVersion = supportedVersions.get(apiKey);
|
||||
if (apiKey.latestVersion() < supportedVersion.minVersion) {
|
||||
bld.append(" [unusable: node too new]");
|
||||
} else if (supportedVersion.maxVersion < apiKey.oldestVersion()) {
|
||||
bld.append(" [unusable: node too old]");
|
||||
} else {
|
||||
short latestUsableVersion = Utils.min(apiKey.latestVersion(), supportedVersion.maxVersion);
|
||||
bld.append(" [usable: ").append(latestUsableVersion).append("]");
|
||||
}
|
||||
}
|
||||
return bld.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the version information for a given API.
|
||||
*
|
||||
* @param apiKey The api key to lookup
|
||||
* @return The api version information from the broker or null if it is unsupported
|
||||
*/
|
||||
public ApiVersion apiVersion(ApiKeys apiKey) {
|
||||
return supportedVersions.get(apiKey);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.kafka.clients;
|
||||
|
||||
/**
|
||||
* A callback interface for attaching an action to be executed when a request is complete and the corresponding response
|
||||
* has been received. This handler will also be invoked if there is a disconnection while handling the request.
|
||||
*/
|
||||
public interface RequestCompletionHandler {
|
||||
|
||||
public void onComplete(ClientResponse response);
|
||||
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user