diff --git a/.gitignore b/.gitignore index 9bdbddbe..045ec395 100644 --- a/.gitignore +++ b/.gitignore @@ -1,114 +1,112 @@ -### Intellij ### -# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm - -*.iml - -## Directory-based project format: -.idea/ -# if you remove the above rule, at least ignore the following: - -# User-specific stuff: -# .idea/workspace.xml -# .idea/tasks.xml -# .idea/dictionaries -# .idea/shelf - -# Sensitive or high-churn files: -.idea/dataSources.ids -.idea/dataSources.xml -.idea/sqlDataSources.xml -.idea/dynamic.xml -.idea/uiDesigner.xml - - -# Mongo Explorer plugin: -.idea/mongoSettings.xml - -## File-based project format: -*.ipr -*.iws - -## Plugin-specific files: - -# IntelliJ -/out/ - -# mpeltonen/sbt-idea plugin -.idea_modules/ - -# JIRA plugin -atlassian-ide-plugin.xml - -# Crashlytics plugin (for Android Studio and IntelliJ) -com_crashlytics_export_strings.xml -crashlytics.properties -crashlytics-build.properties -fabric.properties - - -### Java ### -*.class - -# Mobile Tools for Java (J2ME) -.mtj.tmp/ - -# Package Files # -*.jar -*.war -*.ear -*.tar.gz - -# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml -hs_err_pid* - - -### OSX ### -.DS_Store -.AppleDouble -.LSOverride - -# Icon must end with two \r -Icon - - -# Thumbnails -._* - -# Files that might appear in the root of a volume -.DocumentRevisions-V100 -.fseventsd -.Spotlight-V100 -.TemporaryItems -.Trashes -.VolumeIcon.icns - -# Directories potentially created on remote AFP share -.AppleDB -.AppleDesktop -Network Trash Folder -Temporary Items -.apdisk - -/target -target/ -*.log -*.log.* -*.bak -*.vscode -*/.vscode/* -*/.vscode -*/velocity.log* -*/*.log -*/*.log.* -node_modules/ -node_modules/* -workspace.xml -/output/* -.gitversion -node_modules/* -out/* -dist/ -dist/* -kafka-manager-web/src/main/resources/templates/ -.DS_Store -kafka-manager-console/package-lock.json +### Intellij ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm + +*.iml + +## Directory-based project format: +.idea/ +# if you remove the above rule, at least ignore the following: + +# User-specific stuff: +# .idea/workspace.xml +# .idea/tasks.xml +# .idea/dictionaries +# .idea/shelf + +# Sensitive or high-churn files: +.idea/dataSources.ids +.idea/dataSources.xml +.idea/sqlDataSources.xml +.idea/dynamic.xml +.idea/uiDesigner.xml + + +# Mongo Explorer plugin: +.idea/mongoSettings.xml + +## File-based project format: +*.ipr +*.iws + +## Plugin-specific files: + +# IntelliJ +/out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + + +### Java ### +*.class + +# Mobile Tools for Java (J2ME) +.mtj.tmp/ + +# Package Files # +*.jar +*.war +*.ear +*.tar.gz + +# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml +hs_err_pid* + + +### OSX ### +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon + + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +/target +target/ +*.log +*.log.* +*.bak +*.vscode +*/.vscode/* +*/.vscode +*/velocity.log* +*/*.log +*/*.log.* +node_modules/ +node_modules/* +workspace.xml +/output/* +.gitversion +out/* +dist/ +dist/* +km-rest/src/main/resources/templates/ +*dependency-reduced-pom* \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1e2de8dc..ef4ad6a7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,7 +7,7 @@ Thanks for considering to contribute this project. All issues and pull requests Before sending pull request to this project, please read and follow guidelines below. 1. Branch: We only accept pull request on `dev` branch. -2. Coding style: Follow the coding style used in kafka-manager. +2. Coding style: Follow the coding style used in LogiKM. 3. Commit message: Use English and be aware of your spell. 4. Test: Make sure to test your code. diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index af7ffa1c..00000000 --- a/Dockerfile +++ /dev/null @@ -1,41 +0,0 @@ -ARG MAVEN_VERSION=3.8.4-openjdk-8-slim -ARG JAVA_VERSION=8-jdk-alpine3.9 -FROM maven:${MAVEN_VERSION} AS builder -ARG CONSOLE_ENABLE=true - -WORKDIR /opt -COPY . . -COPY distribution/conf/settings.xml /root/.m2/settings.xml - -# whether to build console -RUN set -eux; \ - if [ $CONSOLE_ENABLE = 'false' ]; then \ - sed -i "/kafka-manager-console/d" pom.xml; \ - fi \ - && mvn -Dmaven.test.skip=true clean install -U - -FROM openjdk:${JAVA_VERSION} - -RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories && apk add --no-cache tini - -ENV TZ=Asia/Shanghai -ENV AGENT_HOME=/opt/agent/ - -COPY --from=builder /opt/kafka-manager-web/target/kafka-manager.jar /opt -COPY --from=builder /opt/container/dockerfiles/docker-depends/config.yaml $AGENT_HOME -COPY --from=builder /opt/container/dockerfiles/docker-depends/jmx_prometheus_javaagent-0.15.0.jar $AGENT_HOME -COPY --from=builder /opt/distribution/conf/application-docker.yml /opt - -WORKDIR /opt - -ENV JAVA_AGENT="-javaagent:$AGENT_HOME/jmx_prometheus_javaagent-0.15.0.jar=9999:$AGENT_HOME/config.yaml" -ENV JAVA_HEAP_OPTS="-Xms1024M -Xmx1024M -Xmn100M " -ENV JAVA_OPTS="-verbose:gc \ - -XX:MaxMetaspaceSize=256M -XX:+DisableExplicitGC -XX:+UseStringDeduplication \ - -XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:-UseContainerSupport" - -EXPOSE 8080 9999 - -ENTRYPOINT ["tini", "--"] - -CMD [ "sh", "-c", "java -jar $JAVA_AGENT $JAVA_HEAP_OPTS $JAVA_OPTS kafka-manager.jar --spring.config.location=application-docker.yml"] diff --git a/LICENSE b/LICENSE index 148493e0..74484e25 100644 --- a/LICENSE +++ b/LICENSE @@ -1,433 +1,661 @@ - Apache License - - Version 2.0, January 2004 - - http://www.apache.org/licenses/ - - - - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - - - -1. Definitions. - - - - - "License" shall mean the terms and conditions for use, reproduction, - - and distribution as defined by Sections 1 through 9 of this document. - - - - - "Licensor" shall mean the copyright owner or entity authorized by - - the copyright owner that is granting the License. - - - - - "Legal Entity" shall mean the union of the acting entity and all - - other entities that control, are controlled by, or are under common - - control with that entity. For the purposes of this definition, - - "control" means (i) the power, direct or indirect, to cause the - - direction or management of such entity, whether by contract or - - otherwise, or (ii) ownership of fifty percent (50%) or more of the - - outstanding shares, or (iii) beneficial ownership of such entity. - - - - - "You" (or "Your") shall mean an individual or Legal Entity - - exercising permissions granted by this License. - - - - - "Source" form shall mean the preferred form for making modifications, - - including but not limited to software source code, documentation - - source, and configuration files. - - - - - "Object" form shall mean any form resulting from mechanical - - transformation or translation of a Source form, including but - - not limited to compiled object code, generated documentation, - - and conversions to other media types. - - - - - "Work" shall mean the work of authorship, whether in Source or - - Object form, made available under the License, as indicated by a - - copyright notice that is included in or attached to the work - - (an example is provided in the Appendix below). - - - - - "Derivative Works" shall mean any work, whether in Source or Object - - form, that is based on (or derived from) the Work and for which the - - editorial revisions, annotations, elaborations, or other modifications - - represent, as a whole, an original work of authorship. For the purposes - - of this License, Derivative Works shall not include works that remain - - separable from, or merely link (or bind by name) to the interfaces of, - - the Work and Derivative Works thereof. - - - - - "Contribution" shall mean any work of authorship, including - - the original version of the Work and any modifications or additions - - to that Work or Derivative Works thereof, that is intentionally - - submitted to Licensor for inclusion in the Work by the copyright owner - - or by an individual or Legal Entity authorized to submit on behalf of - - the copyright owner. For the purposes of this definition, "submitted" - - means any form of electronic, verbal, or written communication sent - - to the Licensor or its representatives, including but not limited to - - communication on electronic mailing lists, source code control systems, - - and issue tracking systems that are managed by, or on behalf of, the - - Licensor for the purpose of discussing and improving the Work, but - - excluding communication that is conspicuously marked or otherwise - - designated in writing by the copyright owner as "Not a Contribution." - - - - - "Contributor" shall mean Licensor and any individual or Legal Entity - - on behalf of whom a Contribution has been received by Licensor and - - subsequently incorporated within the Work. - - - - -2. Grant of Copyright License. Subject to the terms and conditions of - - this License, each Contributor hereby grants to You a perpetual, - - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - - copyright license to reproduce, prepare Derivative Works of, - - publicly display, publicly perform, sublicense, and distribute the - - Work and such Derivative Works in Source or Object form. - - - - -3. Grant of Patent License. Subject to the terms and conditions of - - this License, each Contributor hereby grants to You a perpetual, - - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - - (except as stated in this section) patent license to make, have made, - - use, offer to sell, sell, import, and otherwise transfer the Work, - - where such license applies only to those patent claims licensable - - by such Contributor that are necessarily infringed by their - - Contribution(s) alone or by combination of their Contribution(s) - - with the Work to which such Contribution(s) was submitted. If You - - institute patent litigation against any entity (including a - - cross-claim or counterclaim in a lawsuit) alleging that the Work - - or a Contribution incorporated within the Work constitutes direct - - or contributory patent infringement, then any patent licenses - - granted to You under this License for that Work shall terminate - - as of the date such litigation is filed. - - - - -4. Redistribution. You may reproduce and distribute copies of the - - Work or Derivative Works thereof in any medium, with or without - - modifications, and in Source or Object form, provided that You - - meet the following conditions: - - - - - (a) You must give any other recipients of the Work or - - Derivative Works a copy of this License; and - - - - - (b) You must cause any modified files to carry prominent notices - - stating that You changed the files; and - - - - - (c) You must retain, in the Source form of any Derivative Works - - that You distribute, all copyright, patent, trademark, and - - attribution notices from the Source form of the Work, - - excluding those notices that do not pertain to any part of - - the Derivative Works; and - - - - - (d) If the Work includes a "NOTICE" text file as part of its - - distribution, then any Derivative Works that You distribute must - - include a readable copy of the attribution notices contained - - within such NOTICE file, excluding those notices that do not - - pertain to any part of the Derivative Works, in at least one - - of the following places: within a NOTICE text file distributed - - as part of the Derivative Works; within the Source form or - - documentation, if provided along with the Derivative Works; or, - - within a display generated by the Derivative Works, if and - - wherever such third-party notices normally appear. The contents - - of the NOTICE file are for informational purposes only and - - do not modify the License. You may add Your own attribution - - notices within Derivative Works that You distribute, alongside - - or as an addendum to the NOTICE text from the Work, provided - - that such additional attribution notices cannot be construed - - as modifying the License. - - - - - You may add Your own copyright statement to Your modifications and - - may provide additional or different license terms and conditions - - for use, reproduction, or distribution of Your modifications, or - - for any such Derivative Works as a whole, provided Your use, - - reproduction, and distribution of the Work otherwise complies with - - the conditions stated in this License. - - - - -5. Submission of Contributions. Unless You explicitly state otherwise, - - any Contribution intentionally submitted for inclusion in the Work - - by You to the Licensor shall be under the terms and conditions of - - this License, without any additional terms or conditions. - - Notwithstanding the above, nothing herein shall supersede or modify - - the terms of any separate license agreement you may have executed - - with Licensor regarding such Contributions. - - - - -6. Trademarks. This License does not grant permission to use the trade - - names, trademarks, service marks, or product names of the Licensor, - - except as required for reasonable and customary use in describing the - - origin of the Work and reproducing the content of the NOTICE file. - - - - -7. Disclaimer of Warranty. Unless required by applicable law or - - agreed to in writing, Licensor provides the Work (and each - - Contributor provides its Contributions) on an "AS IS" BASIS, - - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - - implied, including, without limitation, any warranties or conditions - - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - - PARTICULAR PURPOSE. You are solely responsible for determining the - - appropriateness of using or redistributing the Work and assume any - - risks associated with Your exercise of permissions under this License. - - - - -8. Limitation of Liability. In no event and under no legal theory, - - whether in tort (including negligence), contract, or otherwise, - - unless required by applicable law (such as deliberate and grossly - - negligent acts) or agreed to in writing, shall any Contributor be - - liable to You for damages, including any direct, indirect, special, - - incidental, or consequential damages of any character arising as a - - result of this License or out of the use or inability to use the - - Work (including but not limited to damages for loss of goodwill, - - work stoppage, computer failure or malfunction, or any and all - - other commercial damages or losses), even if such Contributor - - has been advised of the possibility of such damages. - - - - -9. Accepting Warranty or Additional Liability. While redistributing - - the Work or Derivative Works thereof, You may choose to offer, - - and charge a fee for, acceptance of support, warranty, indemnity, - - or other liability obligations and/or rights consistent with this - - License. However, in accepting such obligations, You may act only - - on Your own behalf and on Your sole responsibility, not on behalf - - of any other Contributor, and only if You agree to indemnify, - - defend, and hold each Contributor harmless for any liability - - incurred by, or claims asserted against, such Contributor by reason - - of your accepting any such warranty or additional liability. - - - - -END OF TERMS AND CONDITIONS - - - - -APPENDIX: How to apply the Apache License to your work. - - - - - To apply the Apache License to your work, attach the following - - boilerplate notice, with the fields enclosed by brackets "{}" - - replaced with your own identifying information. (Don't include - - the brackets!) The text should be enclosed in the appropriate - - comment syntax for the file format. We also recommend that a - - file or class name and description of purpose be included on the - - same "printed page" as the copyright notice for easier - - identification within third-party archives. - - - - -Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd. All rights reserved. - - - - -Licensed under the Apache License, Version 2.0 (the "License"); - -you may not use this file except in compliance with the License. - -You may obtain a copy of the License at - - - - - http://www.apache.org/licenses/LICENSE-2.0 - - - - -Unless required by applicable law or agreed to in writing, software - -distributed under the License is distributed on an "AS IS" BASIS, - -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - -See the License for the specific language governing permissions and - -limitations under the License. + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. \ No newline at end of file diff --git a/README.md b/README.md index f685b311..c3017fc2 100644 --- a/README.md +++ b/README.md @@ -1,121 +1,138 @@ - ---- -![KnowStreaing](https://user-images.githubusercontent.com/71620349/183546097-71451983-d00e-4ad4-afb0-43fb597c69a9.png) - -**一站式`Apache Kafka`管控平台** - -`LogiKM开源至今备受关注,考虑到开源项目应该更贴合Apache Kafka未来发展方向,经项目组慎重考虑,我们将其品牌升级成Know Streaming,新的大版本更新马上就绪,感谢大家一如既往的支持!也欢迎Kafka爱好者一起共建社区` - -阅读本README文档,您可以了解到滴滴Know Streaming的用户群体、产品定位等信息,并通过体验地址,快速体验Kafka集群指标监控与运维管控的全流程。 - - -## 1 产品简介 -滴滴Know Streaming脱胎于滴滴内部多年的Kafka运营实践经验,是面向Kafka用户、Kafka运维人员打造的共享多租户Kafka云平台。专注于Kafka运维管控、监控告警、资源治理等核心场景,经历过大规模集群、海量大数据的考验。内部满意度高达90%的同时,还与多家知名企业达成商业化合作。 - -### 1.1 快速体验地址 - -- 体验地址(新的体验地址马上就来) http://117.51.150.133:8080 账号密码 admin/admin - -### 1.2 体验地图 -相比较于同类产品的用户视角单一(大多为管理员视角),滴滴Logi-KafkaManager建立了基于分角色、多场景视角的体验地图。分别是:**用户体验地图、运维体验地图、运营体验地图** - -#### 1.2.1 用户体验地图 -- 平台租户申请  :申请应用(App)作为Kafka中的用户名,并用 AppID+password作为身份验证 -- 集群资源申请  :按需申请、按需使用。可使用平台提供的共享集群,也可为应用申请独立的集群 -- Topic   申   请  :可根据应用(App)创建Topic,或者申请其他topic的读写权限 -- Topic   运   维  :Topic数据采样、调整配额、申请分区等操作 -- 指   标  监   控  :基于Topic生产消费各环节耗时统计,监控不同分位数性能指标 -- 消 费 组 运 维 :支持将消费偏移重置至指定时间或指定位置 - -#### 1.2.2 运维体验地图 -- 多版本集群管控  :支持从`0.10.2`到`2.x`版本 -- 集    群    监   控  :集群Topic、Broker等多维度历史与实时关键指标查看,建立健康分体系 -- 集    群    运   维  :划分部分Broker作为Region,使用Region定义资源划分单位,并按照业务、保障能力区分逻辑集群 -- Broker    运    维  :包括优先副本选举等操作 -- Topic      运    维  :包括创建、查询、扩容、修改属性、迁移、下线等 - - -#### 1.2.3 运营体验地图 -- 资  源  治  理  :沉淀资源治理方法。针对Topic分区热点、分区不足等高频常见问题,沉淀资源治理方法,实现资源治理专家化 -- 资  源  审  批  :工单体系。Topic创建、调整配额、申请分区等操作,由专业运维人员审批,规范资源使用,保障平台平稳运行 -- 账  单  体  系  :成本控制。Topic资源、集群资源按需申请、按需使用。根据流量核算费用,帮助企业建设大数据成本核算体系 - -### 1.3 核心优势 -- 高 效 的 问 题 定 位  :监控多项核心指标,统计不同分位数据,提供种类丰富的指标监控报表,帮助用户、运维人员快速高效定位问题 -- 便 捷 的 集 群 运 维  :按照Region定义集群资源划分单位,将逻辑集群根据保障等级划分。在方便资源隔离、提高扩展能力的同时,实现对服务端的强管控 -- 专 业 的 资 源 治 理  :基于滴滴内部多年运营实践,沉淀资源治理方法,建立健康分体系。针对Topic分区热点、分区不足等高频常见问题,实现资源治理专家化 -- 友 好 的 运 维 生 态  :与Prometheus、Grafana、滴滴夜莺监控告警系统打通,集成指标分析、监控告警、集群部署、集群升级等能力。形成运维生态,凝练专家服务,使运维更高效 - -### 1.4 滴滴Logi-KafkaManager架构图 - -![kafka-manager-arch](https://img-ys011.didistatic.com/static/dicloudpub/do1_xgDHNDLj2ChKxctSuf72) - - -## 2 相关文档 - -### 2.1 产品文档 -- [滴滴Know Streaming 安装手册](docs/install_guide/install_guide_cn.md) -- [滴滴Know Streaming 接入集群](docs/user_guide/add_cluster/add_cluster.md) -- [滴滴Know Streaming 用户使用手册](docs/user_guide/user_guide_cn.md) -- [滴滴Know Streaming FAQ](docs/user_guide/faq.md) - -### 2.2 社区文章 -- [滴滴云官网产品介绍](https://www.didiyun.com/production/logi-KafkaManager.html) -- [7年沉淀之作--滴滴Logi日志服务套件](https://mp.weixin.qq.com/s/-KQp-Qo3WKEOc9wIR2iFnw) -- [滴滴Know Streaming 一站式Kafka管控平台](https://mp.weixin.qq.com/s/9qSZIkqCnU6u9nLMvOOjIQ) -- [滴滴Know Streaming 开源之路](https://xie.infoq.cn/article/0223091a99e697412073c0d64) -- [滴滴Know Streaming 系列视频教程](https://space.bilibili.com/442531657/channel/seriesdetail?sid=571649) -- [kafka最强最全知识图谱](https://www.szzdzhp.com/kafka/) -- [滴滴Know Streaming新用户入门系列文章专栏 --石臻臻](https://www.szzdzhp.com/categories/LogIKM/) -- [kafka实践(十五):滴滴开源Kafka管控平台 Know Streaming研究--A叶子叶来](https://blog.csdn.net/yezonggang/article/details/113106244) -- [基于云原生应用管理平台Rainbond安装 滴滴Know Streaming](https://www.rainbond.com/docs/opensource-app/logikm/?channel=logikm) - -## 3 Know Streaming开源用户交流群 - -![image](https://user-images.githubusercontent.com/5287750/111266722-e531d800-8665-11eb-9242-3484da5a3099.png) - -想跟各个大佬交流Kafka Es 等中间件/大数据相关技术请 加微信进群。 - -微信加群:添加mike_zhangliangPenceXie的微信号备注Know Streaming加群或关注公众号 云原生可观测性 回复 "Know Streaming加群" - -## 4 知识星球 - -image - -
-
- ✅我们正在组建国内最大最权威的 -
-
-
-【Kafka中文社区】 -
- -在这里你可以结交各大互联网Kafka大佬以及3000+Kafka爱好者,一起实现知识共享,实时掌控最新行业资讯,期待您的加入中~https://z.didi.cn/5gSF9 - -有问必答~! - -互动有礼~! - -PS:提问请尽量把问题一次性描述清楚,并告知环境信息情况哦~!如使用版本、操作步骤、报错/警告信息等,方便大V们快速解答~ - -## 5 项目成员 - -### 5.1 内部核心人员 - -`iceyuhui`、`liuyaguang`、`limengmonty`、`zhangliangmike`、`zhaoqingrong`、`xiepeng`、`nullhuangyiming`、`zengqiao`、`eilenexuzhe`、`huangjiaweihjw`、`zhaoyinrui`、`marzkonglingxu`、`joysunchao`、`石臻臻` - - -### 5.2 外部贡献者 - -`fangjunyu`、`zhoutaiyang` - - -## 6 协议 - -`Know Streaming`基于`Apache-2.0`协议进行分发和使用,更多信息参见[协议文件](./LICENSE) - -## 7 Star History - -[![Star History Chart](https://api.star-history.com/svg?repos=didi/KnowStreaming&type=Date)](https://star-history.com/#didi/KnowStreaming&Date) - + +

+ +

+ +

+ 产品官网 | + 下载地址 | + 文档资源 | + 体验环境 +

+ +

+ + + LastCommit + + + + + License + + + + + License + + + + + Issues + + + + + Slack + + +

+ + +--- + + +## `Know Streaming` 简介 + + +`Know Streaming`专注于Kafka运维管控、监控告警、资源治理、多活容灾等核心场景,经历大规模集群、海量大数据考验,在用户体验、监控、运维管控上进行了平台化、可视化、智能化的建设,提供一系列特色的功能,极大地方便了用户和运维人员的日常使用。与其他Kafka管控产品相比,`Know Streaming` 具有以下特点: + +- 👀  **简单易用**:提炼高频的 CLI 能力,设计合理的产品使用路径,绘制清新美观的 GUI 页面,始终将简单易用作为产品的主要目标。 + + +- 🌪️  **功能丰富**:主要包含`多集群管理`和`系统管理`两大块,具体包含: + 1. 多集群管理:包括集群管理、Broker管理、Topic管理、Group管理、Security管理、Jobs管理等六大功能模块,几乎涵盖 CLI 的所有高频能力。 + 2. 系统管理:包括配置管理、用户管理、审计日志等3大功能模块,基本满足开源用户的使用需要。 + + +- 👏  **版本兼容**:支持 0.10 及以上,**`ZK`** 或 **`Raft`** 运行模式的Kafka版本,此外在兼容架构上具备良好的扩展性。 + + +- 🚀  **专业能力**:不仅是 CLI 到 GUI 的优秀翻译,更是涵盖一系列专业能力的产品,包括但不限于: + 1. 观测提升: **`多维度指标观测大盘`**、**`观测指标最佳实践`** 等功能。 + 2. 异常巡检:**`集群多维度健康巡检`**、 **`集群多维度健康分`** 等功能。 + 3. 能力增强:**`Topic扩缩副本`**、**`Topic副本迁移`** 等功能。 + + +- ⚡️  **支持分布式**:具备水平扩展能力,只需要增加节点即可获取更强的采集及对外服务能力。 + +  +**产品图** + +

+ + + +

+ + + + +## 文档资源 + +**`开发相关手册`** + +- [打包编译手册](docs/install_guide/源码编译打包手册.md) +- [单机部署手册](docs/install_guide/单机部署手册.md) +- [版本升级手册](docs/install_guide/版本升级手册.md) +- [本地源码启动手册](docs/dev_guide/本地源码启动手册.md) + +**`产品相关手册`** + +- [产品使用指南](docs/user_guide/用户使用手册.md) +- [2.x与3.x新旧对比手册](docs/user_guide/新旧对比手册.md) +- [FAQ](docs/user_guide/faq.md) + + +**点击 [这里](https://doc.knowstreaming.com/product/1-quick-start),也可以从官网获取到更多文档** + + + + + +## 成为社区贡献者 + +点击 [这里](CONTRIBUTING.md),了解如何成为 Know Streaming 的贡献者 + + + +## 加入技术交流群 + +**`1、知识星球`** + +

+ +

+ +
+
+
+
+
+
+
+
+ +👍 我们正在组建国内最大,最权威的 **[Kafka中文社区](https://z.didi.cn/5gSF9)** + +在这里你可以结交各大互联网的 Kafka大佬 以及 3000+ Kafka爱好者,一起实现知识共享,实时掌控最新行业资讯,期待 👏   您的加入中~ https://z.didi.cn/5gSF9 + +有问必答~! 互动有礼~! + +PS: 提问请尽量把问题一次性描述清楚,并告知环境信息情况~!如使用版本、操作步骤、报错/警告信息等,方便大V们快速解答~ + +  + +**`2、微信群`** + +微信加群:添加`mike_zhangliang`、`danke-x`的微信号备注Logi加群。 \ No newline at end of file diff --git a/Releases_Notes.md b/Releases_Notes.md index c1d75147..b991fe7e 100644 --- a/Releases_Notes.md +++ b/Releases_Notes.md @@ -1,12 +1,13 @@ --- -![kafka-manager-logo](./docs/assets/images/common/logo_name.png) +![Logo](docs/assets/KnowStreamingLogo.png) **一站式`Apache Kafka`集群指标监控与运维管控平台** --- + ## v2.6.0 版本上线时间:2022-01-24 @@ -40,6 +41,16 @@ - 修复Dockerfile执行时提示缺少application.yml文件的问题 - 修复逻辑集群更新时,会报空指针的问题 + +## v2.5.0 + +版本上线时间:2021-07-10 + +### 体验优化 +- 更改产品名为LogiKM +- 更新产品图标 + + ## v2.4.1+ 版本上线时间:2021-05-21 @@ -49,7 +60,7 @@ - 增加接口调用可绕过登录的功能(v2.4.1) ### 体验优化 -- tomcat 版本提升至8.5.66(v2.4.2) +- Tomcat 版本提升至8.5.66(v2.4.2) - op接口优化,拆分util接口为topic、leader两类接口(v2.4.1) - 简化Gateway配置的Key长度(v2.4.1) diff --git a/bin/init_es_template.sh b/bin/init_es_template.sh new file mode 100644 index 00000000..e6beba96 --- /dev/null +++ b/bin/init_es_template.sh @@ -0,0 +1,655 @@ +esaddr=127.0.0.1 +port=8060 +curl -s --connect-timeout 10 -o /dev/null http://${esaddr}:${port}/_cat/nodes >/dev/null 2>&1 +if [ "$?" != "0" ];then + echo "Elasticserach 访问失败, 请安装完后检查并重新执行该脚本 " + exit +fi + +curl -s --connect-timeout 10 -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_broker_metric -d '{ + "order" : 10, + "index_patterns" : [ + "ks_kafka_broker_metric*" + ], + "settings" : { + "index" : { + "number_of_shards" : "10" + } + }, + "mappings" : { + "properties" : { + "brokerId" : { + "type" : "long" + }, + "routingValue" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "clusterPhyId" : { + "type" : "long" + }, + "metrics" : { + "properties" : { + "NetworkProcessorAvgIdle" : { + "type" : "float" + }, + "UnderReplicatedPartitions" : { + "type" : "float" + }, + "BytesIn_min_15" : { + "type" : "float" + }, + "HealthCheckTotal" : { + "type" : "float" + }, + "RequestHandlerAvgIdle" : { + "type" : "float" + }, + "connectionsCount" : { + "type" : "float" + }, + "BytesIn_min_5" : { + "type" : "float" + }, + "HealthScore" : { + "type" : "float" + }, + "BytesOut" : { + "type" : "float" + }, + "BytesOut_min_15" : { + "type" : "float" + }, + "BytesIn" : { + "type" : "float" + }, + "BytesOut_min_5" : { + "type" : "float" + }, + "TotalRequestQueueSize" : { + "type" : "float" + }, + "MessagesIn" : { + "type" : "float" + }, + "TotalProduceRequests" : { + "type" : "float" + }, + "HealthCheckPassed" : { + "type" : "float" + }, + "TotalResponseQueueSize" : { + "type" : "float" + } + } + }, + "key" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "timestamp" : { + "format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis", + "index" : true, + "type" : "date", + "doc_values" : true + } + } + }, + "aliases" : { } + }' + +curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_cluster_metric -d '{ + "order" : 10, + "index_patterns" : [ + "ks_kafka_cluster_metric*" + ], + "settings" : { + "index" : { + "number_of_shards" : "10" + } + }, + "mappings" : { + "properties" : { + "routingValue" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "clusterPhyId" : { + "type" : "long" + }, + "metrics" : { + "properties" : { + "Connections" : { + "type" : "double" + }, + "BytesIn_min_15" : { + "type" : "double" + }, + "PartitionURP" : { + "type" : "double" + }, + "HealthScore_Topics" : { + "type" : "double" + }, + "EventQueueSize" : { + "type" : "double" + }, + "ActiveControllerCount" : { + "type" : "double" + }, + "GroupDeads" : { + "type" : "double" + }, + "BytesIn_min_5" : { + "type" : "double" + }, + "HealthCheckTotal_Topics" : { + "type" : "double" + }, + "Partitions" : { + "type" : "double" + }, + "BytesOut" : { + "type" : "double" + }, + "Groups" : { + "type" : "double" + }, + "BytesOut_min_15" : { + "type" : "double" + }, + "TotalRequestQueueSize" : { + "type" : "double" + }, + "HealthCheckPassed_Groups" : { + "type" : "double" + }, + "TotalProduceRequests" : { + "type" : "double" + }, + "HealthCheckPassed" : { + "type" : "double" + }, + "TotalLogSize" : { + "type" : "double" + }, + "GroupEmptys" : { + "type" : "double" + }, + "PartitionNoLeader" : { + "type" : "double" + }, + "HealthScore_Brokers" : { + "type" : "double" + }, + "Messages" : { + "type" : "double" + }, + "Topics" : { + "type" : "double" + }, + "PartitionMinISR_E" : { + "type" : "double" + }, + "HealthCheckTotal" : { + "type" : "double" + }, + "Brokers" : { + "type" : "double" + }, + "Replicas" : { + "type" : "double" + }, + "HealthCheckTotal_Groups" : { + "type" : "double" + }, + "GroupRebalances" : { + "type" : "double" + }, + "MessageIn" : { + "type" : "double" + }, + "HealthScore" : { + "type" : "double" + }, + "HealthCheckPassed_Topics" : { + "type" : "double" + }, + "HealthCheckTotal_Brokers" : { + "type" : "double" + }, + "PartitionMinISR_S" : { + "type" : "double" + }, + "BytesIn" : { + "type" : "double" + }, + "BytesOut_min_5" : { + "type" : "double" + }, + "GroupActives" : { + "type" : "double" + }, + "MessagesIn" : { + "type" : "double" + }, + "GroupReBalances" : { + "type" : "double" + }, + "HealthCheckPassed_Brokers" : { + "type" : "double" + }, + "HealthScore_Groups" : { + "type" : "double" + }, + "TotalResponseQueueSize" : { + "type" : "double" + }, + "Zookeepers" : { + "type" : "double" + }, + "LeaderMessages" : { + "type" : "double" + }, + "HealthScore_Cluster" : { + "type" : "double" + }, + "HealthCheckPassed_Cluster" : { + "type" : "double" + }, + "HealthCheckTotal_Cluster" : { + "type" : "double" + } + } + }, + "key" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "timestamp" : { + "format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis", + "type" : "date" + } + } + }, + "aliases" : { } + }' + +curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_group_metric -d '{ + "order" : 10, + "index_patterns" : [ + "ks_kafka_group_metric*" + ], + "settings" : { + "index" : { + "number_of_shards" : "10" + } + }, + "mappings" : { + "properties" : { + "group" : { + "type" : "keyword" + }, + "partitionId" : { + "type" : "long" + }, + "routingValue" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "clusterPhyId" : { + "type" : "long" + }, + "topic" : { + "type" : "keyword" + }, + "metrics" : { + "properties" : { + "HealthScore" : { + "type" : "float" + }, + "Lag" : { + "type" : "float" + }, + "OffsetConsumed" : { + "type" : "float" + }, + "HealthCheckTotal" : { + "type" : "float" + }, + "HealthCheckPassed" : { + "type" : "float" + } + } + }, + "groupMetric" : { + "type" : "keyword" + }, + "key" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "timestamp" : { + "format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis", + "index" : true, + "type" : "date", + "doc_values" : true + } + } + }, + "aliases" : { } + }' + +curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_partition_metric -d '{ + "order" : 10, + "index_patterns" : [ + "ks_kafka_partition_metric*" + ], + "settings" : { + "index" : { + "number_of_shards" : "10" + } + }, + "mappings" : { + "properties" : { + "brokerId" : { + "type" : "long" + }, + "partitionId" : { + "type" : "long" + }, + "routingValue" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "clusterPhyId" : { + "type" : "long" + }, + "topic" : { + "type" : "keyword" + }, + "metrics" : { + "properties" : { + "LogStartOffset" : { + "type" : "float" + }, + "Messages" : { + "type" : "float" + }, + "LogEndOffset" : { + "type" : "float" + } + } + }, + "key" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "timestamp" : { + "format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis", + "index" : true, + "type" : "date", + "doc_values" : true + } + } + }, + "aliases" : { } + }' + +curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_replication_metric -d '{ + "order" : 10, + "index_patterns" : [ + "ks_kafka_partition_metric*" + ], + "settings" : { + "index" : { + "number_of_shards" : "10" + } + }, + "mappings" : { + "properties" : { + "brokerId" : { + "type" : "long" + }, + "partitionId" : { + "type" : "long" + }, + "routingValue" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "clusterPhyId" : { + "type" : "long" + }, + "topic" : { + "type" : "keyword" + }, + "metrics" : { + "properties" : { + "LogStartOffset" : { + "type" : "float" + }, + "Messages" : { + "type" : "float" + }, + "LogEndOffset" : { + "type" : "float" + } + } + }, + "key" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "timestamp" : { + "format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis", + "index" : true, + "type" : "date", + "doc_values" : true + } + } + }, + "aliases" : { } + }[root@10-255-0-23 template]# cat ks_kafka_replication_metric +PUT _template/ks_kafka_replication_metric +{ + "order" : 10, + "index_patterns" : [ + "ks_kafka_replication_metric*" + ], + "settings" : { + "index" : { + "number_of_shards" : "10" + } + }, + "mappings" : { + "properties" : { + "timestamp" : { + "format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis", + "index" : true, + "type" : "date", + "doc_values" : true + } + } + }, + "aliases" : { } + }' + +curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_topic_metric -d '{ + "order" : 10, + "index_patterns" : [ + "ks_kafka_topic_metric*" + ], + "settings" : { + "index" : { + "number_of_shards" : "10" + } + }, + "mappings" : { + "properties" : { + "brokerId" : { + "type" : "long" + }, + "routingValue" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "topic" : { + "type" : "keyword" + }, + "clusterPhyId" : { + "type" : "long" + }, + "metrics" : { + "properties" : { + "BytesIn_min_15" : { + "type" : "float" + }, + "Messages" : { + "type" : "float" + }, + "BytesRejected" : { + "type" : "float" + }, + "PartitionURP" : { + "type" : "float" + }, + "HealthCheckTotal" : { + "type" : "float" + }, + "ReplicationCount" : { + "type" : "float" + }, + "ReplicationBytesOut" : { + "type" : "float" + }, + "ReplicationBytesIn" : { + "type" : "float" + }, + "FailedFetchRequests" : { + "type" : "float" + }, + "BytesIn_min_5" : { + "type" : "float" + }, + "HealthScore" : { + "type" : "float" + }, + "LogSize" : { + "type" : "float" + }, + "BytesOut" : { + "type" : "float" + }, + "BytesOut_min_15" : { + "type" : "float" + }, + "FailedProduceRequests" : { + "type" : "float" + }, + "BytesIn" : { + "type" : "float" + }, + "BytesOut_min_5" : { + "type" : "float" + }, + "MessagesIn" : { + "type" : "float" + }, + "TotalProduceRequests" : { + "type" : "float" + }, + "HealthCheckPassed" : { + "type" : "float" + } + } + }, + "brokerAgg" : { + "type" : "keyword" + }, + "key" : { + "type" : "text", + "fields" : { + "keyword" : { + "ignore_above" : 256, + "type" : "keyword" + } + } + }, + "timestamp" : { + "format" : "yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis", + "index" : true, + "type" : "date", + "doc_values" : true + } + } + }, + "aliases" : { } + }' + +for i in {0..6}; +do + logdate=_$(date -d "${i} day ago" +%Y-%m-%d) + curl -s --connect-timeout 10 -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_broker_metric${logdate} && \ + curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_cluster_metric${logdate} && \ + curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_group_metric${logdate} && \ + curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_partition_metric${logdate} && \ + curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_replication_metric${logdate} && \ + curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_topic_metric${logdate} || \ + exit 2 +done \ No newline at end of file diff --git a/bin/shutdown.sh b/bin/shutdown.sh new file mode 100644 index 00000000..c5317df8 --- /dev/null +++ b/bin/shutdown.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +cd `dirname $0`/../libs +target_dir=`pwd` + +pid=`ps ax | grep -i 'ks-km' | grep ${target_dir} | grep java | grep -v grep | awk '{print $1}'` +if [ -z "$pid" ] ; then + echo "No ks-km running." + exit -1; +fi + +echo "The ks-km (${pid}) is running..." + +kill ${pid} + +echo "Send shutdown request to ks-km (${pid}) OK" diff --git a/bin/standalone-deploy.sh b/bin/standalone-deploy.sh new file mode 100644 index 00000000..d45f412c --- /dev/null +++ b/bin/standalone-deploy.sh @@ -0,0 +1,86 @@ +#!/bin/bash +set -x + +function Install_Java(){ + cd $dir + wget https://s3-gzpu.didistatic.com/pub/jdk11.tar.gz + tar -zxf $dir/jdk11.tar.gz -C /usr/local/ + mv -f /usr/local/jdk-11.0.2 /usr/local/java11 >/dev/null 2>&1 + echo "export JAVA_HOME=/usr/local/java11" >> ~/.bashrc + echo "export CLASSPATH=/usr/java/java11/lib" >> ~/.bashrc + echo "export PATH=\$JAVA_HOME/bin:\$PATH:\$HOME/bin" >> ~/.bashrc + source ~/.bashrc +} + +function Install_Mysql(){ + cd $dir + wget https://s3-gzpu.didistatic.com/pub/mysql5.7.tar.gz + rpm -qa | grep -E "mariadb|mysql" | xargs yum -y remove >/dev/null 2>&1 + mv -f /var/lib/mysql/ /var/lib/mysqlbak$(date "+%s") >/dev/null 2>&1 + mkdir -p $dir/mysql/ && cd $dir/mysql/ + tar -zxf $dir/mysql5.7.tar.gz -C $dir/mysql/ + yum -y localinstall mysql* libaio* + systemctl start mysqld + systemctl enable mysqld >/dev/null 2>&1 + old_pass=`grep 'temporary password' /var/log/mysqld.log | awk '{print $NF}' | tail -n 1` + mysql -NBe "alter user USER() identified by '$mysql_pass';" --connect-expired-password -uroot -p$old_pass + if [ $? -eq 0 ];then + echo "Mysql database installation completed" + else + echo "Mysql database configuration failed. The script exits" + exit + fi +} + +function Install_ElasticSearch(){ + kill -9 $(ps -ef | grep elasticsearch | grep -v "grep" | awk '{print $2}') >/dev/null 2>&1 + id esuser >/dev/null 2>&1 + if [ "$?" != "0" ];then + useradd esuser + echo "esuser soft nofile 655350" >>/etc/security/limits.conf + echo "esuser hard nofile 655350" >>/etc/security/limits.conf + echo "vm.max_map_count = 655360" >>/etc/sysctl.conf + sysctl -p >/dev/null 2>&1 + fi + mkdir -p /km_es/es_data && cd /km_es/ >/dev/null 2>&1 + wget https://s3-gzpu.didistatic.com/pub/elasticsearch.tar.gz + tar -zxf elasticsearch.tar.gz -C /km_es/ + chown -R esuser:esuser /km_es/ + su - esuser <<-EOF + export JAVA_HOME=/usr/local/java11 + sh /km_es/elasticsearch/control.sh start + EOF + sleep 5 + es_status=`sh /km_es/elasticsearch/control.sh status | grep -o "started"` + if [ "$es_status" = "started" ];then + echo "elasticsearch started successfully~ " + else + echo "Elasticsearch failed to start. The script exited" + exit + fi +} + +function Install_KnowStreaming(){ + cd $dir + wget https://s3-gzpu.didistatic.com/pub/knowstreaming/KnowStreaming-3.0.0-beta.tar.gz + tar -zxf KnowStreaming-3.0.0-beta.tar.gz -C $dir/ + mysql -uroot -p$mysql_pass -e "create database know_streaming;" + mysql -uroot -p$mysql_pass know_streaming < ./KnowStreaming/init/sql/ddl-ks-km.sql + mysql -uroot -p$mysql_pass know_streaming < ./KnowStreaming/init/sql/ddl-logi-job.sql + mysql -uroot -p$mysql_pass know_streaming < ./KnowStreaming/init/sql/ddl-logi-security.sql + mysql -uroot -p$mysql_pass know_streaming < ./KnowStreaming/init/sql/dml-ks-km.sql + mysql -uroot -p$mysql_pass know_streaming < ./KnowStreaming/init/sql/dml-logi.sql + sh ./KnowStreaming/init/template/template.sh + sed -i "s/mysql_pass/"$mysql_pass"/g" ./KnowStreaming/conf/application.yml + cd $dir/KnowStreaming/bin/ && sh startup.sh + +} + +dir=`pwd` +mysql_pass=`date +%s |sha256sum |base64 |head -c 10 ;echo`"_Di2" +echo "$mysql_pass" > $dir/mysql.password + +Install_Java +Install_Mysql +Install_ElasticSearch +Install_KnowStreaming \ No newline at end of file diff --git a/distribution/bin/startup.sh b/bin/startup.sh similarity index 96% rename from distribution/bin/startup.sh rename to bin/startup.sh index ead6cde9..8081f2dd 100644 --- a/distribution/bin/startup.sh +++ b/bin/startup.sh @@ -31,7 +31,7 @@ fi -export WEB_SERVER="kafka-manager" +export WEB_SERVER="ks-km" export JAVA_HOME export JAVA="$JAVA_HOME/bin/java" export BASE_DIR=`cd $(dirname $0)/..; pwd` @@ -55,7 +55,7 @@ else fi -JAVA_OPT="${JAVA_OPT} -jar ${BASE_DIR}/target/${WEB_SERVER}.jar" +JAVA_OPT="${JAVA_OPT} -jar ${BASE_DIR}/libs/${WEB_SERVER}.jar" JAVA_OPT="${JAVA_OPT} --spring.config.additional-location=${CUSTOM_SEARCH_LOCATIONS}" JAVA_OPT="${JAVA_OPT} --logging.config=${BASE_DIR}/conf/logback-spring.xml" JAVA_OPT="${JAVA_OPT} --server.max-http-header-size=524288" @@ -72,6 +72,7 @@ echo "$JAVA ${JAVA_OPT}" if [ ! -f "${BASE_DIR}/logs/start.out" ]; then touch "${BASE_DIR}/logs/start.out" fi + # start echo -e "---- 启动脚本 ------\n $JAVA ${JAVA_OPT}" > ${BASE_DIR}/logs/start.out 2>&1 & diff --git a/container/dockerfiles/docker-depends/config.yaml b/container/dockerfiles/docker-depends/config.yaml deleted file mode 100644 index d4b7b547..00000000 --- a/container/dockerfiles/docker-depends/config.yaml +++ /dev/null @@ -1,5 +0,0 @@ ---- - startDelaySeconds: 0 - ssl: false - lowercaseOutputName: false - lowercaseOutputLabelNames: false diff --git a/container/dockerfiles/docker-depends/jmx_prometheus_javaagent-0.15.0.jar b/container/dockerfiles/docker-depends/jmx_prometheus_javaagent-0.15.0.jar deleted file mode 100644 index d896a217..00000000 Binary files a/container/dockerfiles/docker-depends/jmx_prometheus_javaagent-0.15.0.jar and /dev/null differ diff --git a/container/dockerfiles/mysql/Dockerfile b/container/dockerfiles/mysql/Dockerfile deleted file mode 100644 index 9cae4b56..00000000 --- a/container/dockerfiles/mysql/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM mysql:5.7.37 - -COPY mysqld.cnf /etc/mysql/mysql.conf.d/ -ENV TZ=Asia/Shanghai -ENV MYSQL_ROOT_PASSWORD=root - -RUN apt-get update \ - && apt -y install wget \ - && wget https://ghproxy.com/https://raw.githubusercontent.com/didi/LogiKM/master/distribution/conf/create_mysql_table.sql -O /docker-entrypoint-initdb.d/create_mysql_table.sql - -EXPOSE 3306 - -VOLUME ["/var/lib/mysql"] \ No newline at end of file diff --git a/container/dockerfiles/mysql/mysqld.cnf b/container/dockerfiles/mysql/mysqld.cnf deleted file mode 100644 index cbe35f27..00000000 --- a/container/dockerfiles/mysql/mysqld.cnf +++ /dev/null @@ -1,24 +0,0 @@ -[client] -default-character-set = utf8 - -[mysqld] -character_set_server = utf8 -pid-file = /var/run/mysqld/mysqld.pid -socket = /var/run/mysqld/mysqld.sock -datadir = /var/lib/mysql -symbolic-links=0 - -max_allowed_packet = 10M -sort_buffer_size = 1M -read_rnd_buffer_size = 2M -max_connections=2000 - -lower_case_table_names=1 -character-set-server=utf8 - -max_allowed_packet = 1G -sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION -group_concat_max_len = 102400 -default-time-zone = '+08:00' -[mysql] -default-character-set = utf8 \ No newline at end of file diff --git a/container/helm/Chart.lock b/container/helm/Chart.lock deleted file mode 100644 index 04958b2d..00000000 --- a/container/helm/Chart.lock +++ /dev/null @@ -1,6 +0,0 @@ -dependencies: -- name: mysql - repository: https://charts.bitnami.com/bitnami - version: 8.6.3 -digest: sha256:d250c463c1d78ba30a24a338a06a551503c7a736621d974fe4999d2db7f6143e -generated: "2021-06-24T11:34:54.625217+08:00" diff --git a/container/helm/Chart.yaml b/container/helm/Chart.yaml deleted file mode 100644 index 088abfb0..00000000 --- a/container/helm/Chart.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: v2 -name: didi-km -description: Logi-KafkaManager - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.0 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "2.4.2" -dependencies: - - condition: mysql.enabled - name: mysql - repository: https://charts.bitnami.com/bitnami - version: 8.x.x diff --git a/container/helm/charts/mysql-8.6.3.tgz b/container/helm/charts/mysql-8.6.3.tgz deleted file mode 100644 index c5fde140..00000000 Binary files a/container/helm/charts/mysql-8.6.3.tgz and /dev/null differ diff --git a/container/helm/templates/NOTES.txt b/container/helm/templates/NOTES.txt deleted file mode 100644 index e9c3e7e8..00000000 --- a/container/helm/templates/NOTES.txt +++ /dev/null @@ -1,22 +0,0 @@ -1. Get the application URL by running these commands: -{{- if .Values.ingress.enabled }} -{{- range $host := .Values.ingress.hosts }} - {{- range .paths }} - http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} - {{- end }} -{{- end }} -{{- else if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "didi-km.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "didi-km.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "didi-km.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - echo http://$SERVICE_IP:{{ .Values.service.port }} -{{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "didi-km.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") - echo "Visit http://127.0.0.1:8080 to use your application" - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT -{{- end }} diff --git a/container/helm/templates/_helpers.tpl b/container/helm/templates/_helpers.tpl deleted file mode 100644 index 23314fd4..00000000 --- a/container/helm/templates/_helpers.tpl +++ /dev/null @@ -1,62 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "didi-km.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "didi-km.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "didi-km.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "didi-km.labels" -}} -helm.sh/chart: {{ include "didi-km.chart" . }} -{{ include "didi-km.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "didi-km.selectorLabels" -}} -app.kubernetes.io/name: {{ include "didi-km.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "didi-km.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "didi-km.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} diff --git a/container/helm/templates/configmap.yaml b/container/helm/templates/configmap.yaml deleted file mode 100644 index 1428cf11..00000000 --- a/container/helm/templates/configmap.yaml +++ /dev/null @@ -1,110 +0,0 @@ -{{- define "datasource.mysql" -}} -{{- if .Values.mysql.enabled }} - {{- printf "%s-mysql" (include "didi-km.fullname" .) -}} -{{- else -}} - {{- printf "%s" .Values.externalDatabase.host -}} -{{- end -}} -{{- end -}} - -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "didi-km.fullname" . }}-configs - labels: - {{- include "didi-km.labels" . | nindent 4 }} -data: - application.yml: | - server: - port: 8080 - tomcat: - accept-count: 1000 - max-connections: 10000 - max-threads: 800 - min-spare-threads: 100 - - spring: - application: - name: kafkamanager - datasource: - kafka-manager: - jdbc-url: jdbc:mysql://{{ include "datasource.mysql" . }}:3306/{{ .Values.mysql.auth.database }}?characterEncoding=UTF-8&serverTimezone=GMT%2B8&useSSL=false - username: {{ .Values.mysql.auth.username }} - password: {{ .Values.mysql.auth.password }} - driver-class-name: com.mysql.jdbc.Driver - main: - allow-bean-definition-overriding: true - - profiles: - active: dev - servlet: - multipart: - max-file-size: 100MB - max-request-size: 100MB - - logging: - config: classpath:logback-spring.xml - - custom: - idc: cn - jmx: - max-conn: 20 - store-metrics-task: - community: - broker-metrics-enabled: true - topic-metrics-enabled: true - didi: - app-topic-metrics-enabled: false - topic-request-time-metrics-enabled: false - topic-throttled-metrics-enabled: false - save-days: 7 - - # 任务相关的开关 - task: - op: - sync-topic-enabled: false # 未落盘的Topic定期同步到DB中 - - account: - # ldap settings - ldap: - enabled: false - url: ldap://127.0.0.1:389/ - basedn: dc=tsign,dc=cn - factory: com.sun.jndi.ldap.LdapCtxFactory - filter: sAMAccountName - security: - authentication: simple - principal: cn=admin,dc=tsign,dc=cn - credentials: admin - auth-user-registration: false - auth-user-registration-role: normal - - kcm: - enabled: false - storage: - base-url: http://127.0.0.1 - n9e: - base-url: http://127.0.0.1:8004 - user-token: 12345678 - timeout: 300 - account: root - script-file: kcm_script.sh - - monitor: - enabled: false - n9e: - nid: 2 - user-token: 1234567890 - mon: - base-url: http://127.0.0.1:8032 - sink: - base-url: http://127.0.0.1:8006 - rdb: - base-url: http://127.0.0.1:80 - - notify: - kafka: - cluster-id: 95 - topic-name: didi-kafka-notify - order: - detail-url: http://127.0.0.1 - diff --git a/container/helm/templates/deployment.yaml b/container/helm/templates/deployment.yaml deleted file mode 100644 index 80ab2c29..00000000 --- a/container/helm/templates/deployment.yaml +++ /dev/null @@ -1,64 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "didi-km.fullname" . }} - labels: - {{- include "didi-km.labels" . | nindent 4 }} -spec: - {{- if not .Values.autoscaling.enabled }} - replicas: {{ .Values.replicaCount }} - {{- end }} - selector: - matchLabels: - {{- include "didi-km.selectorLabels" . | nindent 6 }} - template: - metadata: - {{- with .Values.podAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "didi-km.selectorLabels" . | nindent 8 }} - spec: - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 8 }} - {{- end }} - serviceAccountName: {{ include "didi-km.serviceAccountName" . }} - securityContext: - {{- toYaml .Values.podSecurityContext | nindent 8 }} - containers: - - name: {{ .Chart.Name }} - securityContext: - {{- toYaml .Values.securityContext | nindent 12 }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - ports: - - name: http - containerPort: 8080 - protocol: TCP - - name: jmx-metrics - containerPort: 9999 - protocol: TCP - resources: - {{- toYaml .Values.resources | nindent 12 }} - volumeMounts: - - name: configs - mountPath: /tmp/application.yml - subPath: application.yml - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - volumes: - - name: configs - configMap: - name: {{ include "didi-km.fullname" . }}-configs diff --git a/container/helm/templates/hpa.yaml b/container/helm/templates/hpa.yaml deleted file mode 100644 index 209d7ae4..00000000 --- a/container/helm/templates/hpa.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.autoscaling.enabled }} -apiVersion: autoscaling/v2beta1 -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "didi-km.fullname" . }} - labels: - {{- include "didi-km.labels" . | nindent 4 }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "didi-km.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} - - type: Resource - resource: - name: cpu - targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} - {{- end }} - {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} - - type: Resource - resource: - name: memory - targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} - {{- end }} -{{- end }} diff --git a/container/helm/templates/ingress.yaml b/container/helm/templates/ingress.yaml deleted file mode 100644 index 47aec7f2..00000000 --- a/container/helm/templates/ingress.yaml +++ /dev/null @@ -1,41 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "didi-km.fullname" . -}} -{{- $svcPort := .Values.service.port -}} -{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} -apiVersion: networking.k8s.io/v1beta1 -{{- else -}} -apiVersion: extensions/v1beta1 -{{- end }} -kind: Ingress -metadata: - name: {{ $fullName }} - labels: - {{- include "didi-km.labels" . | nindent 4 }} - {{- with .Values.ingress.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if .Values.ingress.tls }} - tls: - {{- range .Values.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} - {{- end }} - rules: - {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ .path }} - backend: - serviceName: {{ $fullName }} - servicePort: {{ $svcPort }} - {{- end }} - {{- end }} - {{- end }} diff --git a/container/helm/templates/service.yaml b/container/helm/templates/service.yaml deleted file mode 100644 index 7fcbc5ba..00000000 --- a/container/helm/templates/service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "didi-km.fullname" . }} - labels: - {{- include "didi-km.labels" . | nindent 4 }} -spec: - type: {{ .Values.service.type }} - ports: - - port: {{ .Values.service.port }} - targetPort: http - protocol: TCP - name: http - selector: - {{- include "didi-km.selectorLabels" . | nindent 4 }} diff --git a/container/helm/templates/serviceaccount.yaml b/container/helm/templates/serviceaccount.yaml deleted file mode 100644 index 4f2676ee..00000000 --- a/container/helm/templates/serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if .Values.serviceAccount.create -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "didi-km.serviceAccountName" . }} - labels: - {{- include "didi-km.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/container/helm/templates/tests/test-connection.yaml b/container/helm/templates/tests/test-connection.yaml deleted file mode 100644 index b5b41d4f..00000000 --- a/container/helm/templates/tests/test-connection.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: "{{ include "didi-km.fullname" . }}-test-connection" - labels: - {{- include "didi-km.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test -spec: - containers: - - name: wget - image: busybox - command: ['wget'] - args: ['{{ include "didi-km.fullname" . }}:{{ .Values.service.port }}'] - restartPolicy: Never diff --git a/container/helm/values.yaml b/container/helm/values.yaml deleted file mode 100644 index cbb6f3d4..00000000 --- a/container/helm/values.yaml +++ /dev/null @@ -1,93 +0,0 @@ -# Default values for didi-km. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -replicaCount: 1 - -image: - repository: docker.io/fengxsong/logi-kafka-manager - pullPolicy: IfNotPresent - # Overrides the image tag whose default is the chart appVersion. - tag: "v2.4.2" - -imagePullSecrets: [] -nameOverride: "" -# fullnameOverride must set same as release name -fullnameOverride: "km" - -serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - -podAnnotations: {} - -podSecurityContext: {} - # fsGroup: 2000 - -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -service: - type: ClusterIP - port: 8080 - -ingress: - enabled: false - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: chart-example.local - paths: [] - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: - cpu: 500m - memory: 2048Mi - requests: - cpu: 100m - memory: 200Mi - -autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 100 - targetCPUUtilizationPercentage: 80 - # targetMemoryUtilizationPercentage: 80 - -nodeSelector: {} - -tolerations: [] - -affinity: {} - -# more configurations are set with configmap in file template/configmap.yaml -externalDatabase: - host: "" -mysql: - # if enabled is set to false, then you should manually specified externalDatabase.host - enabled: true - architecture: standalone - auth: - rootPassword: "s3cretR00t" - database: "logi_kafka_manager" - username: "logi_kafka_manager" - password: "n0tp@55w0rd" diff --git a/distribution/bin/shutdown.sh b/distribution/bin/shutdown.sh deleted file mode 100644 index fdf2d01c..00000000 --- a/distribution/bin/shutdown.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -cd `dirname $0`/../target -target_dir=`pwd` - -pid=`ps ax | grep -i 'kafka-manager' | grep ${target_dir} | grep java | grep -v grep | awk '{print $1}'` -if [ -z "$pid" ] ; then - echo "No kafka-manager running." - exit -1; -fi - -echo "The kafka-manager (${pid}) is running..." - -kill ${pid} - -echo "Send shutdown request to kafka-manager (${pid}) OK" diff --git a/distribution/conf/application-docker.yml b/distribution/conf/application-docker.yml deleted file mode 100644 index a0178344..00000000 --- a/distribution/conf/application-docker.yml +++ /dev/null @@ -1,28 +0,0 @@ - -## kafka-manager的配置文件,该文件中的配置会覆盖默认配置 -## 下面的配置信息基本就是jar中的 application.yml默认配置了; -## 可以只修改自己变更的配置,其他的删除就行了; 比如只配置一下mysql - - -server: - port: 8080 - tomcat: - accept-count: 1000 - max-connections: 10000 - max-threads: 800 - min-spare-threads: 100 - -spring: - application: - name: kafkamanager - version: 2.6.0 - profiles: - active: dev - datasource: - kafka-manager: - jdbc-url: jdbc:mysql://${LOGI_MYSQL_HOST:mysql}:${LOGI_MYSQL_PORT:3306}/${LOGI_MYSQL_DATABASE:logi_kafka_manager}?characterEncoding=UTF-8&useSSL=false&serverTimezone=GMT%2B8 - username: ${LOGI_MYSQL_USER:root} - password: ${LOGI_MYSQL_PASSWORD:root} - driver-class-name: com.mysql.cj.jdbc.Driver - main: - allow-bean-definition-overriding: true \ No newline at end of file diff --git a/distribution/conf/application.yml b/distribution/conf/application.yml deleted file mode 100644 index 6b78c104..00000000 --- a/distribution/conf/application.yml +++ /dev/null @@ -1,28 +0,0 @@ - -## kafka-manager的配置文件,该文件中的配置会覆盖默认配置 -## 下面的配置信息基本就是jar中的 application.yml默认配置了; -## 可以只修改自己变更的配置,其他的删除就行了; 比如只配置一下mysql - - -server: - port: 8080 - tomcat: - accept-count: 1000 - max-connections: 10000 - max-threads: 800 - min-spare-threads: 100 - -spring: - application: - name: kafkamanager - profiles: - active: dev - datasource: - kafka-manager: - jdbc-url: jdbc:mysql://localhost:3306/logi_kafka_manager?characterEncoding=UTF-8&useSSL=false&serverTimezone=GMT%2B8 - username: root - password: 123456 - driver-class-name: com.mysql.cj.jdbc.Driver - main: - allow-bean-definition-overriding: true - diff --git a/distribution/conf/application.yml.example b/distribution/conf/application.yml.example deleted file mode 100644 index 7804efd3..00000000 --- a/distribution/conf/application.yml.example +++ /dev/null @@ -1,135 +0,0 @@ - -## kafka-manager的配置文件,该文件中的配置会覆盖默认配置 -## 下面的配置信息基本就是jar中的 application.yml默认配置了; -## 可以只修改自己变更的配置,其他的删除就行了; 比如只配置一下mysql - - -server: - port: 8080 - tomcat: - accept-count: 1000 - max-connections: 10000 - max-threads: 800 - min-spare-threads: 100 - -spring: - application: - name: kafkamanager - profiles: - active: dev - datasource: - kafka-manager: - jdbc-url: jdbc:mysql://localhost:3306/logi_kafka_manager?characterEncoding=UTF-8&useSSL=false&serverTimezone=GMT%2B8 - username: root - password: 123456 - driver-class-name: com.mysql.cj.jdbc.Driver - main: - allow-bean-definition-overriding: true - - servlet: - multipart: - max-file-size: 100MB - max-request-size: 100MB - -logging: - config: classpath:logback-spring.xml - -custom: - idc: cn - store-metrics-task: - community: - topic-metrics-enabled: true - didi: # 滴滴Kafka特有的指标 - app-topic-metrics-enabled: false - topic-request-time-metrics-enabled: false - topic-throttled-metrics-enabled: false - -# 任务相关的配置 -task: - op: - sync-topic-enabled: false # 未落盘的Topic定期同步到DB中 - order-auto-exec: # 工单自动化审批线程的开关 - topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启 - app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启 - metrics: - collect: # 收集指标 - broker-metrics-enabled: true # 收集Broker指标 - sink: # 上报指标 - cluster-metrics: # 上报cluster指标 - sink-db-enabled: true # 上报到db - broker-metrics: # 上报broker指标 - sink-db-enabled: true # 上报到db - delete: # 删除指标 - delete-limit-size: 1000 # 单次删除的批大小 - cluster-metrics-save-days: 14 # 集群指标保存天数 - broker-metrics-save-days: 14 # Broker指标保存天数 - topic-metrics-save-days: 7 # Topic指标保存天数 - topic-request-time-metrics-save-days: 7 # Topic请求耗时指标保存天数 - topic-throttled-metrics-save-days: 7 # Topic限流指标保存天数 - app-topic-metrics-save-days: 7 # App+Topic指标保存天数 - -thread-pool: - collect-metrics: - thread-num: 256 # 收集指标线程池大小 - queue-size: 5000 # 收集指标线程池的queue大小 - api-call: - thread-num: 16 # api服务线程池大小 - queue-size: 5000 # api服务线程池的queue大小 - -client-pool: - kafka-consumer: - min-idle-client-num: 24 # 最小空闲客户端数 - max-idle-client-num: 24 # 最大空闲客户端数 - max-total-client-num: 24 # 最大客户端数 - borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒 - -account: - jump-login: - gateway-api: false # 网关接口 - third-part-api: false # 第三方接口 - ldap: - enabled: false - url: ldap://127.0.0.1:389/ - basedn: dc=tsign,dc=cn - factory: com.sun.jndi.ldap.LdapCtxFactory - filter: sAMAccountName - security: - authentication: simple - principal: cn=admin,dc=tsign,dc=cn - credentials: admin - auth-user-registration: true - auth-user-registration-role: normal - -kcm: # 集群安装部署,仅安装broker - enabled: false # 是否开启 - s3: # s3 存储服务 - endpoint: s3.didiyunapi.com - access-key: 1234567890 - secret-key: 0987654321 - bucket: logi-kafka - n9e: # 夜莺 - base-url: http://127.0.0.1:8004 # 夜莺job服务地址 - user-token: 12345678 # 用户的token - timeout: 300 # 当台操作的超时时间 - account: root # 操作时使用的账号 - script-file: kcm_script.sh # 脚本,已内置好,在源码的kcm模块内,此处配置无需修改 - logikm-url: http://127.0.0.1:8080 # logikm部署地址,部署时kcm_script.sh会调用logikm检查部署中的一些状态 - -monitor: - enabled: false - n9e: - nid: 2 - user-token: 1234567890 - mon: - base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000 - sink: - base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000 - rdb: - base-url: http://127.0.0.1:8000 # 夜莺v4版本,默认端口统一调整为了8000 - -notify: - kafka: - cluster-id: 95 - topic-name: didi-kafka-notify - order: - detail-url: http://127.0.0.1 diff --git a/distribution/conf/create_mysql_table.sql b/distribution/conf/create_mysql_table.sql deleted file mode 100644 index f859d752..00000000 --- a/distribution/conf/create_mysql_table.sql +++ /dev/null @@ -1,594 +0,0 @@ --- create database -CREATE DATABASE logi_kafka_manager; - -USE logi_kafka_manager; - --- --- Table structure for table `account` --- - --- DROP TABLE IF EXISTS `account`; -CREATE TABLE `account` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `username` varchar(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '用户名', - `password` varchar(128) NOT NULL DEFAULT '' COMMENT '密码', - `role` tinyint(8) NOT NULL DEFAULT '0' COMMENT '角色类型, 0:普通用户 1:研发 2:运维', - `department` varchar(256) DEFAULT '' COMMENT '部门名', - `display_name` varchar(256) DEFAULT '' COMMENT '用户姓名', - `mail` varchar(256) DEFAULT '' COMMENT '邮箱', - `status` int(16) NOT NULL DEFAULT '0' COMMENT '0标识使用中,-1标识已废弃', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_username` (`username`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='账号表'; -INSERT INTO account(username, password, role) VALUES ('admin', '21232f297a57a5a743894a0e4a801fc3', 2); - --- --- Table structure for table `app` --- - --- DROP TABLE IF EXISTS `app`; -CREATE TABLE `app` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', - `app_id` varchar(128) NOT NULL DEFAULT '' COMMENT '应用id', - `name` varchar(192) NOT NULL DEFAULT '' COMMENT '应用名称', - `password` varchar(256) NOT NULL DEFAULT '' COMMENT '应用密码', - `type` int(11) NOT NULL DEFAULT '0' COMMENT '类型, 0:普通用户, 1:超级用户', - `applicant` varchar(64) NOT NULL DEFAULT '' COMMENT '申请人', - `principals` text COMMENT '应用负责人', - `description` text COMMENT '应用描述', - `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_name` (`name`), - UNIQUE KEY `uniq_app_id` (`app_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='应用信息'; - - --- --- Table structure for table `authority` --- - --- DROP TABLE IF EXISTS `authority`; -CREATE TABLE `authority` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', - `app_id` varchar(128) NOT NULL DEFAULT '' COMMENT '应用id', - `cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id', - `topic_name` varchar(192) NOT NULL DEFAULT '' COMMENT 'topic名称', - `access` int(11) NOT NULL DEFAULT '0' COMMENT '0:无权限, 1:读, 2:写, 3:读写', - `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_app_id_cluster_id_topic_name` (`app_id`,`cluster_id`,`topic_name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='权限信息(kafka-manager)'; - --- --- Table structure for table `broker` --- - --- DROP TABLE IF EXISTS `broker`; -CREATE TABLE `broker` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id', - `broker_id` int(16) NOT NULL DEFAULT '-1' COMMENT 'brokerid', - `host` varchar(128) NOT NULL DEFAULT '' COMMENT 'broker主机名', - `port` int(16) NOT NULL DEFAULT '-1' COMMENT 'broker端口', - `timestamp` bigint(20) NOT NULL DEFAULT '-1' COMMENT '启动时间', - `max_avg_bytes_in` bigint(20) NOT NULL DEFAULT '-1' COMMENT '峰值的均值流量', - `version` varchar(128) NOT NULL DEFAULT '' COMMENT 'broker版本', - `status` int(16) NOT NULL DEFAULT '0' COMMENT '状态: 0有效,-1无效', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_cluster_id_broker_id` (`cluster_id`,`broker_id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='broker信息表'; - --- --- Table structure for table `broker_metrics` --- - --- DROP TABLE IF EXISTS `broker_metrics`; -CREATE TABLE `broker_metrics` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', - `cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id', - `broker_id` int(16) NOT NULL DEFAULT '-1' COMMENT 'brokerid', - `metrics` text COMMENT '指标', - `messages_in` double(53,2) NOT NULL DEFAULT '0.00' COMMENT '每秒消息数流入', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - PRIMARY KEY (`id`), - KEY `idx_cluster_id_broker_id_gmt_create` (`cluster_id`,`broker_id`,`gmt_create`), - KEY `idx_gmt_create` (`gmt_create`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='broker-metric信息表'; - --- --- Table structure for table `cluster` --- - --- DROP TABLE IF EXISTS `cluster`; -CREATE TABLE `cluster` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '集群id', - `cluster_name` varchar(128) NOT NULL DEFAULT '' COMMENT '集群名称', - `zookeeper` varchar(512) NOT NULL DEFAULT '' COMMENT 'zk地址', - `bootstrap_servers` varchar(512) NOT NULL DEFAULT '' COMMENT 'server地址', - `kafka_version` varchar(32) NOT NULL DEFAULT '' COMMENT 'kafka版本', - `security_properties` text COMMENT 'Kafka安全认证参数', - `jmx_properties` text COMMENT 'JMX配置', - `status` tinyint(4) NOT NULL DEFAULT '1' COMMENT ' 监控标记, 0表示未监控, 1表示监控中', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_cluster_name` (`cluster_name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='cluster信息表'; - --- --- Table structure for table `cluster_metrics` --- - --- DROP TABLE IF EXISTS `cluster_metrics`; -CREATE TABLE `cluster_metrics` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', - `cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id', - `metrics` text COMMENT '指标', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - PRIMARY KEY (`id`), - KEY `idx_cluster_id_gmt_create` (`cluster_id`,`gmt_create`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='clustermetrics信息'; - --- --- Table structure for table `cluster_tasks` --- - --- DROP TABLE IF EXISTS `cluster_tasks`; -CREATE TABLE `cluster_tasks` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', - `uuid` varchar(128) NOT NULL DEFAULT '' COMMENT '任务UUID', - `cluster_id` bigint(128) NOT NULL DEFAULT '-1' COMMENT '集群id', - `task_type` varchar(128) NOT NULL DEFAULT '' COMMENT '任务类型', - `kafka_package` text COMMENT 'kafka包', - `kafka_package_md5` varchar(128) NOT NULL DEFAULT '' COMMENT 'kafka包的md5', - `server_properties` text COMMENT 'kafkaserver配置', - `server_properties_md5` varchar(128) NOT NULL DEFAULT '' COMMENT '配置文件的md5', - `agent_task_id` bigint(128) NOT NULL DEFAULT '-1' COMMENT '任务id', - `agent_rollback_task_id` bigint(128) NOT NULL DEFAULT '-1' COMMENT '回滚任务id', - `host_list` text COMMENT '升级的主机', - `pause_host_list` text COMMENT '暂停点', - `rollback_host_list` text COMMENT '回滚机器列表', - `rollback_pause_host_list` text COMMENT '回滚暂停机器列表', - `operator` varchar(64) NOT NULL DEFAULT '' COMMENT '操作人', - `task_status` int(11) NOT NULL DEFAULT '0' COMMENT '任务状态', - `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='集群任务(集群升级部署)'; - --- --- Table structure for table `config` --- - --- DROP TABLE IF EXISTS `config`; -CREATE TABLE `config` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `config_key` varchar(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '配置key', - `config_value` text COMMENT '配置value', - `config_description` text COMMENT '备注说明', - `status` int(16) NOT NULL DEFAULT '0' COMMENT '0标识使用中,-1标识已废弃', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_config_key` (`config_key`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='配置表'; - --- --- Table structure for table `controller` --- - --- DROP TABLE IF EXISTS `controller`; -CREATE TABLE `controller` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', - `cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id', - `broker_id` int(16) NOT NULL DEFAULT '-1' COMMENT 'brokerid', - `host` varchar(256) NOT NULL DEFAULT '' COMMENT '主机名', - `timestamp` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'controller变更时间', - `version` int(16) NOT NULL DEFAULT '-1' COMMENT 'controller格式版本', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_cluster_id_broker_id_timestamp` (`cluster_id`,`broker_id`,`timestamp`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='controller记录表'; - --- --- Table structure for table `gateway_config` --- - --- DROP TABLE IF EXISTS `gateway_config`; -CREATE TABLE `gateway_config` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `type` varchar(128) NOT NULL DEFAULT '' COMMENT '配置类型', - `name` varchar(128) NOT NULL DEFAULT '' COMMENT '配置名称', - `value` text COMMENT '配置值', - `version` bigint(20) unsigned NOT NULL DEFAULT '1' COMMENT '版本信息', - `description` text COMMENT '描述信息', - `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_type_name` (`type`,`name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='gateway配置'; -INSERT INTO gateway_config(type, name, value, `version`, `description`) values('SD_QUEUE_SIZE', 'SD_QUEUE_SIZE', 100000000, 1, '任意集群队列大小'); -INSERT INTO gateway_config(type, name, value, `version`, `description`) values('SD_APP_RATE', 'SD_APP_RATE', 100000000, 1, '任意一个App限速'); -INSERT INTO gateway_config(type, name, value, `version`, `description`) values('SD_IP_RATE', 'SD_IP_RATE', 100000000, 1, '任意一个IP限速'); -INSERT INTO gateway_config(type, name, value, `version`, `description`) values('SD_SP_RATE', 'app_01234567', 100000000, 1, '指定App限速'); -INSERT INTO gateway_config(type, name, value, `version`, `description`) values('SD_SP_RATE', '192.168.0.1', 100000000, 1, '指定IP限速'); - --- --- Table structure for table `heartbeat` --- - --- DROP TABLE IF EXISTS `heartbeat`; -CREATE TABLE `heartbeat` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `ip` varchar(128) NOT NULL DEFAULT '' COMMENT '主机ip', - `hostname` varchar(256) NOT NULL DEFAULT '' COMMENT '主机名', - `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_ip` (`ip`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='心跳信息'; - --- --- Table structure for table `kafka_acl` --- - --- DROP TABLE IF EXISTS `kafka_acl`; -CREATE TABLE `kafka_acl` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', - `app_id` varchar(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '用户id', - `cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id', - `topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称', - `access` int(11) NOT NULL DEFAULT '0' COMMENT '0:无权限, 1:读, 2:写, 3:读写', - `operation` int(11) NOT NULL DEFAULT '0' COMMENT '0:创建, 1:更新 2:删除, 以最新的一条数据为准', - `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='权限信息(kafka-broker)'; - --- --- Table structure for table `kafka_bill` --- - --- DROP TABLE IF EXISTS `kafka_bill`; -CREATE TABLE `kafka_bill` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id', - `topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称', - `principal` varchar(64) NOT NULL DEFAULT '' COMMENT '负责人', - `quota` double(53,2) NOT NULL DEFAULT '0.00' COMMENT '配额, 单位mb/s', - `cost` double(53,2) NOT NULL DEFAULT '0.00' COMMENT '成本, 单位元', - `cost_type` int(16) NOT NULL DEFAULT '0' COMMENT '成本类型, 0:共享集群, 1:独享集群, 2:独立集群', - `gmt_day` varchar(64) NOT NULL DEFAULT '' COMMENT '计价的日期, 例如2019-02-02的计价结果', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_cluster_id_topic_name_gmt_day` (`cluster_id`,`topic_name`,`gmt_day`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='kafka账单'; - --- --- Table structure for table `kafka_file` --- - --- DROP TABLE IF EXISTS `kafka_file`; -CREATE TABLE `kafka_file` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', - `cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id', - `storage_name` varchar(128) NOT NULL DEFAULT '' COMMENT '存储位置', - `file_name` varchar(128) NOT NULL DEFAULT '' COMMENT '文件名', - `file_md5` varchar(256) NOT NULL DEFAULT '' COMMENT '文件md5', - `file_type` int(16) NOT NULL DEFAULT '-1' COMMENT '0:kafka压缩包, 1:kafkaserver配置', - `description` text COMMENT '备注信息', - `operator` varchar(64) NOT NULL DEFAULT '' COMMENT '创建用户', - `status` int(16) NOT NULL DEFAULT '0' COMMENT '状态, 0:正常, -1:删除', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_cluster_id_file_name_storage_name` (`cluster_id`,`file_name`,`storage_name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='文件管理'; - --- --- Table structure for table `kafka_user` --- - --- DROP TABLE IF EXISTS `kafka_user`; -CREATE TABLE `kafka_user` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', - `app_id` varchar(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '应用id', - `password` varchar(256) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT '密码', - `user_type` int(11) NOT NULL DEFAULT '0' COMMENT '0:普通用户, 1:超级用户', - `operation` int(11) NOT NULL DEFAULT '0' COMMENT '0:创建, 1:更新 2:删除, 以最新一条的记录为准', - `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='kafka用户表'; -INSERT INTO app(app_id, name, password, type, applicant, principals, description) VALUES ('dkm_admin', 'KM管理员', 'km_kMl4N8as1Kp0CCY', 1, 'admin', 'admin', 'KM管理员应用-谨慎对外提供'); -INSERT INTO kafka_user(app_id, password, user_type, operation) VALUES ('dkm_admin', 'km_kMl4N8as1Kp0CCY', 1, 0); - - --- --- Table structure for table `logical_cluster` --- - -CREATE TABLE `logical_cluster` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `name` varchar(192) NOT NULL DEFAULT '' COMMENT '逻辑集群名称', - `identification` varchar(192) NOT NULL DEFAULT '' COMMENT '逻辑集群标识', - `mode` int(16) NOT NULL DEFAULT '0' COMMENT '逻辑集群类型, 0:共享集群, 1:独享集群, 2:独立集群', - `app_id` varchar(64) NOT NULL DEFAULT '' COMMENT '所属应用', - `cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id', - `region_list` varchar(256) NOT NULL DEFAULT '' COMMENT 'regionid列表', - `description` text COMMENT '备注说明', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_name` (`name`), - UNIQUE KEY `uniq_identification` (`identification`) -) ENGINE=InnoDB AUTO_INCREMENT=7 DEFAULT CHARSET=utf8 COMMENT='逻辑集群信息表'; - - --- --- Table structure for table `monitor_rule` --- - --- DROP TABLE IF EXISTS `monitor_rule`; -CREATE TABLE `monitor_rule` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', - `name` varchar(192) NOT NULL DEFAULT '' COMMENT '告警名称', - `strategy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '监控id', - `app_id` varchar(64) NOT NULL DEFAULT '' COMMENT 'appid', - `operator` varchar(64) NOT NULL DEFAULT '' COMMENT '操作人', - `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_name` (`name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='监控规则'; - --- --- Table structure for table `operate_record` --- - --- DROP TABLE IF EXISTS `operate_record`; -CREATE TABLE `operate_record` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `module_id` int(16) NOT NULL DEFAULT '-1' COMMENT '模块类型, 0:topic, 1:应用, 2:配额, 3:权限, 4:集群, -1:未知', - `operate_id` int(16) NOT NULL DEFAULT '-1' COMMENT '操作类型, 0:新增, 1:删除, 2:修改', - `resource` varchar(256) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称、app名称', - `content` text COMMENT '操作内容', - `operator` varchar(64) NOT NULL DEFAULT '' COMMENT '操作人', - `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - KEY `idx_module_id_operate_id_operator` (`module_id`,`operate_id`,`operator`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='操作记录'; - --- --- Table structure for table `reassign_task` --- - --- DROP TABLE IF EXISTS `reassign_task`; -CREATE TABLE `reassign_task` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', - `task_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '任务ID', - `name` varchar(256) NOT NULL DEFAULT '' COMMENT '任务名称', - `cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id', - `topic_name` varchar(192) NOT NULL DEFAULT '' COMMENT 'Topic名称', - `partitions` text COMMENT '分区', - `reassignment_json` text COMMENT '任务参数', - `real_throttle` bigint(20) NOT NULL DEFAULT '0' COMMENT '限流值', - `max_throttle` bigint(20) NOT NULL DEFAULT '0' COMMENT '限流上限', - `min_throttle` bigint(20) NOT NULL DEFAULT '0' COMMENT '限流下限', - `begin_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '开始时间', - `operator` varchar(64) NOT NULL DEFAULT '' COMMENT '操作人', - `description` varchar(256) NOT NULL DEFAULT '' COMMENT '备注说明', - `status` int(16) NOT NULL DEFAULT '0' COMMENT '任务状态', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '任务创建时间', - `gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '任务修改时间', - `original_retention_time` bigint(20) NOT NULL DEFAULT '86400000' COMMENT 'Topic存储时间', - `reassign_retention_time` bigint(20) NOT NULL DEFAULT '86400000' COMMENT '迁移时的存储时间', - `src_brokers` text COMMENT '源Broker', - `dest_brokers` text COMMENT '目标Broker', - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic迁移信息'; - --- --- Table structure for table `region` --- - --- DROP TABLE IF EXISTS `region`; -CREATE TABLE `region` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `name` varchar(192) NOT NULL DEFAULT '' COMMENT 'region名称', - `cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id', - `broker_list` varchar(256) NOT NULL DEFAULT '' COMMENT 'broker列表', - `capacity` bigint(20) NOT NULL DEFAULT '0' COMMENT '容量(B/s)', - `real_used` bigint(20) NOT NULL DEFAULT '0' COMMENT '实际使用量(B/s)', - `estimate_used` bigint(20) NOT NULL DEFAULT '0' COMMENT '预估使用量(B/s)', - `description` text COMMENT '备注说明', - `status` int(16) NOT NULL DEFAULT '0' COMMENT '状态,0正常,1已满', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_name` (`name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='region信息表'; - --- --- Table structure for table `topic` --- - --- DROP TABLE IF EXISTS `topic`; -CREATE TABLE `topic` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id', - `topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称', - `app_id` varchar(64) NOT NULL DEFAULT '' COMMENT 'topic所属appid', - `peak_bytes_in` bigint(20) NOT NULL DEFAULT '0' COMMENT '峰值流量', - `description` text COMMENT '备注信息', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_cluster_id_topic_name` (`cluster_id`,`topic_name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic信息表'; - --- --- Table structure for table `topic_app_metrics` --- - --- DROP TABLE IF EXISTS `topic_app_metrics`; -CREATE TABLE `topic_app_metrics` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id', - `topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称', - `app_id` varchar(64) NOT NULL DEFAULT '' COMMENT 'appid', - `metrics` text COMMENT '指标', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - PRIMARY KEY (`id`), - KEY `idx_cluster_id_topic_name_app_id_gmt_create` (`cluster_id`,`topic_name`,`app_id`,`gmt_create`), - KEY `idx_gmt_create` (`gmt_create`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic app metrics'; - --- --- Table structure for table `topic_connections` --- - --- DROP TABLE IF EXISTS `topic_connections`; -CREATE TABLE `topic_connections` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `app_id` varchar(64) NOT NULL DEFAULT '' COMMENT '应用id', - `cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id', - `topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称', - `type` varchar(16) NOT NULL DEFAULT '' COMMENT 'producer or consumer', - `ip` varchar(32) NOT NULL DEFAULT '' COMMENT 'ip地址', - `client_version` varchar(8) NOT NULL DEFAULT '' COMMENT '客户端版本', - `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_app_id_cluster_id_topic_name_type_ip_client_version` (`app_id`,`cluster_id`,`topic_name`,`type`,`ip`,`client_version`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic连接信息表'; - --- --- Table structure for table `topic_expired` --- - --- DROP TABLE IF EXISTS `topic_expired`; -CREATE TABLE `topic_expired` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id', - `topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称', - `produce_connection_num` bigint(20) NOT NULL DEFAULT '0' COMMENT '发送连接数', - `fetch_connection_num` bigint(20) NOT NULL DEFAULT '0' COMMENT '消费连接数', - `expired_day` bigint(20) NOT NULL DEFAULT '0' COMMENT '过期天数', - `gmt_retain` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '保留截止时间', - `status` int(16) NOT NULL DEFAULT '0' COMMENT '-1:可下线, 0:过期待通知, 1+:已通知待反馈', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_cluster_id_topic_name` (`cluster_id`,`topic_name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic过期信息表'; - --- --- Table structure for table `topic_metrics` --- - --- DROP TABLE IF EXISTS `topic_metrics`; -CREATE TABLE `topic_metrics` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', - `cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id', - `topic_name` varchar(192) NOT NULL DEFAULT '' COMMENT 'topic名称', - `metrics` text COMMENT '指标数据JSON', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - PRIMARY KEY (`id`), - KEY `idx_cluster_id_topic_name_gmt_create` (`cluster_id`,`topic_name`,`gmt_create`), - KEY `idx_gmt_create` (`gmt_create`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topicmetrics表'; - --- --- Table structure for table `topic_report` --- - --- DROP TABLE IF EXISTS `topic_report`; -CREATE TABLE `topic_report` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `cluster_id` bigint(20) NOT NULL DEFAULT '0' COMMENT '集群id', - `topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称', - `start_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '开始上报时间', - `end_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '结束上报时间', - `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `modify_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_cluster_id_topic_name` (`cluster_id`,`topic_name`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='开启jmx采集的topic'; - --- --- Table structure for table `topic_request_time_metrics` --- - --- DROP TABLE IF EXISTS `topic_request_time_metrics`; -CREATE TABLE `topic_request_time_metrics` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id', - `topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称', - `metrics` text COMMENT '指标', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - PRIMARY KEY (`id`), - KEY `idx_cluster_id_topic_name_gmt_create` (`cluster_id`,`topic_name`,`gmt_create`), - KEY `idx_gmt_create` (`gmt_create`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic请求耗时信息'; - --- --- Table structure for table `topic_statistics` --- - --- DROP TABLE IF EXISTS `topic_statistics`; -CREATE TABLE `topic_statistics` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT '自增id', - `cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id', - `topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic名称', - `offset_sum` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'offset和', - `max_avg_bytes_in` double(53,2) NOT NULL DEFAULT '-1.00' COMMENT '峰值的均值流量', - `gmt_day` varchar(64) NOT NULL DEFAULT '' COMMENT '日期2020-03-30的形式', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `max_avg_messages_in` double(53,2) NOT NULL DEFAULT '-1.00' COMMENT '峰值的均值消息条数', - PRIMARY KEY (`id`), - UNIQUE KEY `uniq_cluster_id_topic_name_gmt_day` (`cluster_id`,`topic_name`,`gmt_day`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic统计信息表'; - --- --- Table structure for table `topic_throttled_metrics` --- - --- DROP TABLE IF EXISTS `topic_throttled_metrics`; -CREATE TABLE `topic_throttled_metrics` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id', - `topic_name` varchar(192) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'topic name', - `app_id` varchar(64) NOT NULL DEFAULT '' COMMENT 'app', - `produce_throttled` tinyint(8) NOT NULL DEFAULT '0' COMMENT '是否是生产耗时', - `fetch_throttled` tinyint(8) NOT NULL DEFAULT '0' COMMENT '是否是消费耗时', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - PRIMARY KEY (`id`), - KEY `idx_cluster_id_topic_name_app_id` (`cluster_id`,`topic_name`,`app_id`), - KEY `idx_gmt_create` (`gmt_create`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='topic限流信息'; - --- --- Table structure for table `work_order` --- - --- DROP TABLE IF EXISTS `work_order`; -CREATE TABLE `work_order` ( - `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id', - `type` int(16) NOT NULL DEFAULT '-1' COMMENT '工单类型', - `title` varchar(512) NOT NULL DEFAULT '' COMMENT '工单标题', - `applicant` varchar(64) NOT NULL DEFAULT '' COMMENT '申请人', - `description` text COMMENT '备注信息', - `approver` varchar(64) NOT NULL DEFAULT '' COMMENT '审批人', - `gmt_handle` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '审批时间', - `opinion` varchar(256) NOT NULL DEFAULT '' COMMENT '审批信息', - `extensions` text COMMENT '扩展信息', - `status` int(16) NOT NULL DEFAULT '0' COMMENT '工单状态, 0:待审批, 1:通过, 2:拒绝, 3:取消', - `gmt_create` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间', - `gmt_modify` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间', - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='工单表'; \ No newline at end of file diff --git a/distribution/conf/logback-spring.xml b/distribution/conf/logback-spring.xml deleted file mode 100644 index c1c16136..00000000 --- a/distribution/conf/logback-spring.xml +++ /dev/null @@ -1,215 +0,0 @@ - - - logback - - - - - - - - - - - - - - info - - - ${CONSOLE_LOG_PATTERN} - UTF-8 - - - - - - - - - ${log.path}/log_debug.log - - - %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n - UTF-8 - - - - - ${log.path}/log_debug_%d{yyyy-MM-dd}.%i.log - - 100MB - - - 7 - - - - debug - ACCEPT - DENY - - - - - - - ${log.path}/log_info.log - - - %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n - UTF-8 - - - - - ${log.path}/log_info_%d{yyyy-MM-dd}.%i.log - - 100MB - - - 7 - - - - info - ACCEPT - DENY - - - - - - - ${log.path}/log_warn.log - - - %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n - UTF-8 - - - - ${log.path}/log_warn_%d{yyyy-MM-dd}.%i.log - - 100MB - - - 7 - - - - warn - ACCEPT - DENY - - - - - - - - ${log.path}/log_error.log - - - %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n - UTF-8 - - - - ${log.path}/log_error_%d{yyyy-MM-dd}.%i.log - - 100MB - - - 7 - - - - ERROR - ACCEPT - DENY - - - - - - ${log.path}/metrics/collector_metrics.log - - %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n - UTF-8 - - - ${log.path}/metrics/collector_metrics_%d{yyyy-MM-dd}.%i.log - - 100MB - - 3 - - - - - - ${log.path}/metrics/api_metrics.log - - %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n - UTF-8 - - - ${log.path}/metrics/api_metrics_%d{yyyy-MM-dd}.%i.log - - 100MB - - 3 - - - - - - ${log.path}/metrics/scheduled_tasks.log - - %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n - UTF-8 - - - ${log.path}/metrics/scheduled_tasks_%d{yyyy-MM-dd}.%i.log - - 100MB - - 5 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/distribution/conf/settings.xml b/distribution/conf/settings.xml deleted file mode 100644 index 9b72cb5f..00000000 --- a/distribution/conf/settings.xml +++ /dev/null @@ -1,10 +0,0 @@ - - - - aliyunmaven - * - 阿里云公共仓库 - https://maven.aliyun.com/repository/public - - - \ No newline at end of file diff --git a/distribution/pom.xml b/distribution/pom.xml deleted file mode 100644 index 6b61525c..00000000 --- a/distribution/pom.xml +++ /dev/null @@ -1,64 +0,0 @@ - - - - - - kafka-manager - com.xiaojukeji.kafka - ${kafka-manager.revision} - - - 4.0.0 - - distribution - distribution - pom - - - - ${project.groupId} - kafka-manager-web - ${kafka-manager.revision} - - - - - - - release-kafka-manager - - - ${project.groupId} - kafka-manager-web - ${kafka-manager.revision} - - - - - - org.apache.maven.plugins - maven-assembly-plugin - - - release-km.xml - - posix - - - - make-assembly - install - - single - - - - - - kafka-manager - - - - diff --git a/distribution/readme.md b/distribution/readme.md deleted file mode 100644 index 9d40efa4..00000000 --- a/distribution/readme.md +++ /dev/null @@ -1,22 +0,0 @@ -## 说明 - -### 1.创建mysql数据库文件 -> conf/create_mysql_table.sql - -### 2. 修改配置文件 -> conf/application.yml.example -> 请将application.yml.example 复制一份改名为application.yml; -> 并放在同级目录下(conf/); 并修改成自己的配置 -> 这里的优先级比jar包内配置文件的默认值高; -> - -### 3.启动/关闭kafka-manager -> sh bin/startup.sh 启动 -> -> sh shutdown.sh 关闭 -> - - -### 4.升级jar包 -> 如果是升级, 可以看看文件 `upgrade_config.md` 的配置变更历史; -> \ No newline at end of file diff --git a/distribution/release-km.xml b/distribution/release-km.xml deleted file mode 100755 index d1b0db1c..00000000 --- a/distribution/release-km.xml +++ /dev/null @@ -1,51 +0,0 @@ - - - - ${project.version} - true - - dir - tar.gz - zip - - - - - conf/** - - - - - - bin/* - - 0755 - - - - - - - readme.md - readme.md - - - upgrade_config.md - upgrade_config.md - - - - ../kafka-manager-web/target/kafka-manager.jar - target/ - - - - - - true - - com.xiaojukeji.kafka:kafka-manager-web - - - - diff --git a/distribution/upgrade_config.md b/distribution/upgrade_config.md deleted file mode 100644 index 06eb01b1..00000000 --- a/distribution/upgrade_config.md +++ /dev/null @@ -1,52 +0,0 @@ - -## 版本升级配置变更 -> 本文件 从 V2.2.0 开始记录; 如果配置有变更则会填写到下文中; 如果没有,则表示无变更; -> 当您从一个很低的版本升级时候,应该依次执行中间有过变更的sql脚本 - - - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - -### 1.升级至`V2.2.0`版本 - -#### 1.mysql变更 - -`2.2.0`版本在`cluster`表及`logical_cluster`各增加了一个字段,因此需要执行下面的sql进行字段的增加。 - -```sql -# 往cluster表中增加jmx_properties字段, 这个字段会用于存储jmx相关的认证以及配置信息 -ALTER TABLE `cluster` ADD COLUMN `jmx_properties` TEXT NULL COMMENT 'JMX配置' AFTER `security_properties`; - -# 往logical_cluster中增加identification字段, 同时数据和原先name数据相同, 最后增加一个唯一键. -# 此后, name字段还是表示集群名称, 而identification字段表示的是集群标识, 只能是字母数字及下划线组成, -# 数据上报到监控系统时, 集群这个标识采用的字段就是identification字段, 之前使用的是name字段. -ALTER TABLE `logical_cluster` ADD COLUMN `identification` VARCHAR(192) NOT NULL DEFAULT '' COMMENT '逻辑集群标识' AFTER `name`; - -UPDATE `logical_cluster` SET `identification`=`name` WHERE id>=0; - -ALTER TABLE `logical_cluster` ADD INDEX `uniq_identification` (`identification` ASC); -``` - -### 升级至`2.3.0`版本 - -#### 1.mysql变更 -`2.3.0`版本在`gateway_config`表增加了一个描述说明的字段,因此需要执行下面的sql进行字段的增加。 - -```sql -ALTER TABLE `gateway_config` -ADD COLUMN `description` TEXT NULL COMMENT '描述信息' AFTER `version`; -``` - -### 升级至`2.6.0`版本 - -#### 1.mysql变更 -`2.6.0`版本在`account`表增加用户姓名,部门名,邮箱三个字段,因此需要执行下面的sql进行字段的增加。 - -```sql -ALTER TABLE `account` -ADD COLUMN `display_name` VARCHAR(256) NOT NULL DEFAULT '' COMMENT '用户名' AFTER `role`, -ADD COLUMN `department` VARCHAR(256) NOT NULL DEFAULT '' COMMENT '部门名' AFTER `display_name`, -ADD COLUMN `mail` VARCHAR(256) NOT NULL DEFAULT '' COMMENT '邮箱' AFTER `department`; -``` diff --git a/docs/assets/KnowStreamingLogo.png b/docs/assets/KnowStreamingLogo.png new file mode 100644 index 00000000..206c2b6a Binary files /dev/null and b/docs/assets/KnowStreamingLogo.png differ diff --git a/docs/assets/images/common/arch.png b/docs/assets/images/common/arch.png deleted file mode 100644 index 14ec5936..00000000 Binary files a/docs/assets/images/common/arch.png and /dev/null differ diff --git a/docs/assets/images/common/logo_name.png b/docs/assets/images/common/logo_name.png deleted file mode 100644 index b8f40ae3..00000000 Binary files a/docs/assets/images/common/logo_name.png and /dev/null differ diff --git a/docs/assets/readme/KnowStreamingPageDemo.jpg b/docs/assets/readme/KnowStreamingPageDemo.jpg new file mode 100644 index 00000000..a8d97df1 Binary files /dev/null and b/docs/assets/readme/KnowStreamingPageDemo.jpg differ diff --git a/docs/assets/readme/WeChat.png b/docs/assets/readme/WeChat.png new file mode 100644 index 00000000..262d0aae Binary files /dev/null and b/docs/assets/readme/WeChat.png differ diff --git a/docs/assets/readme/ZSXQ.jpg b/docs/assets/readme/ZSXQ.jpg new file mode 100644 index 00000000..ff73c44c Binary files /dev/null and b/docs/assets/readme/ZSXQ.jpg differ diff --git a/docs/dev_guide/LogiKM单元测试和集成测试.md b/docs/dev_guide/LogiKM单元测试和集成测试.md deleted file mode 100644 index 2a23d44b..00000000 --- a/docs/dev_guide/LogiKM单元测试和集成测试.md +++ /dev/null @@ -1,47 +0,0 @@ - ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - - -# LogiKM单元测试和集成测试 - -## 1、单元测试 -### 1.1 单元测试介绍 -单元测试又称模块测试,是针对软件设计的最小单位——程序模块进行正确性检验的测试工作。 -其目的在于检查每个程序单元能否正确实现详细设计说明中的模块功能、性能、接口和设计约束等要求, -发现各模块内部可能存在的各种错误。单元测试需要从程序的内部结构出发设计测试用例。 -多个模块可以平行地独立进行单元测试。 - -### 1.2 LogiKM单元测试思路 -LogiKM单元测试思路主要是测试Service层的方法,通过罗列方法的各种参数, -判断方法返回的结果是否符合预期。单元测试的基类加了@SpringBootTest注解,即每次运行单测用例都启动容器 - -### 1.3 LogiKM单元测试注意事项 -1. 单元测试用例在kafka-manager-core以及kafka-manager-extends下的test包中 -2. 配置在resources/application.yml,包括运行单元测试用例启用的数据库配置等等 -3. 编译打包项目时,加上参数-DskipTests可不执行测试用例,例如使用命令行mvn -DskipTests进行打包 - - - - -## 2、集成测试 -### 2.1 集成测试介绍 -集成测试又称组装测试,是一种黑盒测试。通常在单元测试的基础上,将所有的程序模块进行有序的、递增的测试。 -集成测试是检验程序单元或部件的接口关系,逐步集成为符合概要设计要求的程序部件或整个系统。 - -### 2.2 LogiKM集成测试思路 -LogiKM集成测试主要思路是对Controller层的接口发送Http请求。 -通过罗列测试用例,模拟用户的操作,对接口发送Http请求,判断结果是否达到预期。 -本地运行集成测试用例时,无需加@SpringBootTest注解(即无需每次运行测试用例都启动容器) - -### 2.3 LogiKM集成测试注意事项 -1. 集成测试用例在kafka-manager-web的test包下 -2. 因为对某些接口发送Http请求需要先登陆,比较麻烦,可以绕过登陆,方法可见教程见docs -> user_guide -> call_api_bypass_login -3. 集成测试的配置在resources/integrationTest-settings.properties文件下,包括集群地址,zk地址的配置等等 -4. 如果需要运行集成测试用例,需要本地先启动LogiKM项目 -5. 编译打包项目时,加上参数-DskipTests可不执行测试用例,例如使用命令行mvn -DskipTests进行打包 \ No newline at end of file diff --git a/docs/dev_guide/assets/dynamic_config_manager/sync_topic_to_db.jpg b/docs/dev_guide/assets/dynamic_config_manager/sync_topic_to_db.jpg deleted file mode 100644 index 460e2756..00000000 Binary files a/docs/dev_guide/assets/dynamic_config_manager/sync_topic_to_db.jpg and /dev/null differ diff --git a/docs/dev_guide/assets/increase_the_indicators_reported_to_monitor_system/collect_topic_metrics.jpg b/docs/dev_guide/assets/increase_the_indicators_reported_to_monitor_system/collect_topic_metrics.jpg deleted file mode 100644 index c2613d0a..00000000 Binary files a/docs/dev_guide/assets/increase_the_indicators_reported_to_monitor_system/collect_topic_metrics.jpg and /dev/null differ diff --git a/docs/dev_guide/assets/increase_the_indicators_reported_to_monitor_system/sink_metrcis.png b/docs/dev_guide/assets/increase_the_indicators_reported_to_monitor_system/sink_metrcis.png deleted file mode 100644 index ba27bc1c..00000000 Binary files a/docs/dev_guide/assets/increase_the_indicators_reported_to_monitor_system/sink_metrcis.png and /dev/null differ diff --git a/docs/dev_guide/assets/kcm/kcm_principle.png b/docs/dev_guide/assets/kcm/kcm_principle.png deleted file mode 100644 index d206f57c..00000000 Binary files a/docs/dev_guide/assets/kcm/kcm_principle.png and /dev/null differ diff --git a/docs/dev_guide/assets/monitor_system_integrate_with_self/change_config.jpg b/docs/dev_guide/assets/monitor_system_integrate_with_self/change_config.jpg deleted file mode 100644 index bd03a303..00000000 Binary files a/docs/dev_guide/assets/monitor_system_integrate_with_self/change_config.jpg and /dev/null differ diff --git a/docs/dev_guide/assets/monitor_system_integrate_with_self/integrate_ms.jpg b/docs/dev_guide/assets/monitor_system_integrate_with_self/integrate_ms.jpg deleted file mode 100644 index fde62c49..00000000 Binary files a/docs/dev_guide/assets/monitor_system_integrate_with_self/integrate_ms.jpg and /dev/null differ diff --git a/docs/dev_guide/assets/monitor_system_integrate_with_self/open_sink_schedule.jpg b/docs/dev_guide/assets/monitor_system_integrate_with_self/open_sink_schedule.jpg deleted file mode 100644 index d1b4c270..00000000 Binary files a/docs/dev_guide/assets/monitor_system_integrate_with_self/open_sink_schedule.jpg and /dev/null differ diff --git a/docs/dev_guide/assets/monitor_system_integrate_with_self/sink_metrics.jpg b/docs/dev_guide/assets/monitor_system_integrate_with_self/sink_metrics.jpg deleted file mode 100644 index 119e95b0..00000000 Binary files a/docs/dev_guide/assets/monitor_system_integrate_with_self/sink_metrics.jpg and /dev/null differ diff --git a/docs/dev_guide/assets/multi_version_compatible/registerHandler.png b/docs/dev_guide/assets/multi_version_compatible/registerHandler.png new file mode 100644 index 00000000..f7b040dc Binary files /dev/null and b/docs/dev_guide/assets/multi_version_compatible/registerHandler.png differ diff --git a/docs/dev_guide/assets/startup_using_source_code/IDEA配置.jpg b/docs/dev_guide/assets/startup_using_source_code/IDEA配置.jpg new file mode 100644 index 00000000..237aaa42 Binary files /dev/null and b/docs/dev_guide/assets/startup_using_source_code/IDEA配置.jpg differ diff --git a/docs/dev_guide/connect_jmx_failed.md b/docs/dev_guide/connect_jmx_failed.md deleted file mode 100644 index 0ac57785..00000000 --- a/docs/dev_guide/connect_jmx_failed.md +++ /dev/null @@ -1,107 +0,0 @@ - ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - -## JMX-连接失败问题解决 - -集群正常接入Logi-KafkaManager之后,即可以看到集群的Broker列表,此时如果查看不了Topic的实时流量,或者是Broker的实时流量信息时,那么大概率就是JMX连接的问题了。 - -下面我们按照步骤来一步一步的检查。 - -### 1、问题&说明 - -**类型一:JMX配置未开启** - -未开启时,直接到`2、解决方法`查看如何开启即可。 - -![check_jmx_opened](./assets/connect_jmx_failed/check_jmx_opened.jpg) - - -**类型二:配置错误** - -`JMX`端口已经开启的情况下,有的时候开启的配置不正确,此时也会导致出现连接失败的问题。这里大概列举几种原因: - -- `JMX`配置错误:见`2、解决方法`。 -- 存在防火墙或者网络限制:网络通的另外一台机器`telnet`试一下看是否可以连接上。 -- 需要进行用户名及密码的认证:见`3、解决方法 —— 认证的JMX`。 -- 当logikm和kafka不在同一台机器上时,kafka的Jmx端口不允许其他机器访问:见`4、解决方法`。 - - -错误日志例子: -``` -# 错误一: 错误提示的是真实的IP,这样的话基本就是JMX配置的有问题了。 -2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:192.168.0.1 port:9999. -java.rmi.ConnectException: Connection refused to host: 192.168.0.1; nested exception is: - - -# 错误二:错误提示的是127.0.0.1这个IP,这个是机器的hostname配置的可能有问题。 -2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:127.0.0.1 port:9999. -java.rmi.ConnectException: Connection refused to host: 127.0.0.1;; nested exception is: -``` - -### 2、解决方法 - -这里仅介绍一下比较通用的解决方式,如若有更好的方式,欢迎大家指导告知一下。 - -修改`kafka-server-start.sh`文件: -``` -# 在这个下面增加JMX端口的配置 -if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then - export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" - export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999 -fi -``` - -  - -修改`kafka-run-class.sh`文件 -``` -# JMX settings -if [ -z "$KAFKA_JMX_OPTS" ]; then - KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=${当前机器的IP}" -fi - -# JMX port to use -if [ $JMX_PORT ]; then - KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT" -fi -``` - - -### 3、解决方法 —— 认证的JMX - -如果您是直接看的这个部分,建议先看一下上一节:`2、解决方法`以确保`JMX`的配置没有问题了。 - -在JMX的配置等都没有问题的情况下,如果是因为认证的原因导致连接不了的,此时可以使用下面介绍的方法进行解决。 - -**当前这块后端刚刚开发完成,可能还不够完善,有问题随时沟通。** - -`Logi-KafkaManager 2.2.0+`之后的版本后端已经支持`JMX`认证方式的连接,但是还没有界面,此时我们可以往`cluster`表的`jmx_properties`字段写入`JMX`的认证信息。 - -这个数据是`json`格式的字符串,例子如下所示: - -```json -{ - "maxConn": 10, # KM对单台Broker的最大JMX连接数 - "username": "xxxxx", # 用户名 - "password": "xxxx", # 密码 - "openSSL": true, # 开启SSL, true表示开启ssl, false表示关闭 -} -``` - -  - -SQL的例子: -```sql -UPDATE cluster SET jmx_properties='{ "maxConn": 10, "username": "xxxxx", "password": "xxxx", "openSSL": false }' where id={xxx}; -``` -### 4、解决方法 —— 不允许其他机器访问 -![1971b46243fe1d547063ee55b1505ed](https://user-images.githubusercontent.com/2869938/154413486-f6531946-8c4c-447e-aa2e-b112e5e623d6.png) - -该图中的127.0.0.1表明该端口只允许本机访问. -在cdh中可以点击配置->搜索jmx->寻找broker_java_opts 修改com.sun.management.jmxremote.host和java.rmi.server.hostname为本机ip diff --git a/docs/dev_guide/drawio/KCM实现原理.drawio b/docs/dev_guide/drawio/KCM实现原理.drawio deleted file mode 100644 index d2742a80..00000000 --- a/docs/dev_guide/drawio/KCM实现原理.drawio +++ /dev/null @@ -1,89 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/docs/dev_guide/dynamic_config_manager.md b/docs/dev_guide/dynamic_config_manager.md deleted file mode 100644 index c3365138..00000000 --- a/docs/dev_guide/dynamic_config_manager.md +++ /dev/null @@ -1,169 +0,0 @@ - ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - -# 动态配置管理 - -## 0、目录 - -- 1、Topic定时同步任务 -- 2、专家服务——Topic分区热点 -- 3、专家服务——Topic分区不足 -- 4、专家服务——Topic资源治理 -- 5、账单配置 - - -## 1、Topic定时同步任务 - -### 1.1、配置的用途 -`Logi-KafkaManager`在设计上,所有的资源都是挂在应用(app)下面。 如果接入的Kafka集群已经存在Topic了,那么会导致这些Topic不属于任何的应用,从而导致很多管理上的不便。 - -因此,需要有一个方式将这些无主的Topic挂到某个应用下面。 - -这里提供了一个配置,会定时自动将集群无主的Topic挂到某个应用下面下面。 - -### 1.2、相关实现 - -就是一个定时任务,该任务会定期做同步的工作。具体代码的位置在`com.xiaojukeji.kafka.manager.task.dispatch.op`包下面的`SyncTopic2DB`类。 - -### 1.3、配置说明 - -**步骤一:开启该功能** - -在application.yml文件中,增加如下配置,已经有该配置的话,直接把false修改为true即可 -```yml -# 任务相关的开关 -task: - op: - sync-topic-enabled: true # 无主的Topic定期同步到DB中 -``` - -**步骤二:配置管理中指定挂在那个应用下面** - -配置的位置: - -![sync_topic_to_db](./assets/dynamic_config_manager/sync_topic_to_db.jpg) - -配置键:`SYNC_TOPIC_2_DB_CONFIG_KEY` - -配置值(JSON数组): -- clusterId:需要进行定时同步的集群ID -- defaultAppId:该集群无主的Topic将挂在哪个应用下面 -- addAuthority:是否需要加上权限, 默认是false。因为考虑到这个挂载只是临时的,我们不希望用户使用这个App,同时后续可能移交给真正的所属的应用,因此默认是不加上权限。 - -**注意,这里的集群ID,或者是应用ID不存在的话,会导致配置不生效。该任务对已经在DB中的Topic不会进行修改** -```json -[ - { - "clusterId": 1234567, - "defaultAppId": "ANONYMOUS", - "addAuthority": false - }, - { - "clusterId": 7654321, - "defaultAppId": "ANONYMOUS", - "addAuthority": false - } -] -``` - ---- - -## 2、专家服务——Topic分区热点 - -在`Region`所圈定的Broker范围内,某个Topic的Leader数在这些圈定的Broker上分布不均衡时,我们认为该Topic是存在热点的Topic。 - -备注:单纯的查看Leader数的分布,确实存在一定的局限性,这块欢迎贡献更多的热点定义于代码。 - - -Topic分区热点相关的动态配置(页面在运维管控->平台管理->配置管理): - -配置Key: -``` -REGION_HOT_TOPIC_CONFIG -``` - -配置Value: -```json -{ - "maxDisPartitionNum": 2, # Region内Broker间的leader数差距超过2时,则认为是存在热点的Topic - "minTopicBytesInUnitB": 1048576, # 流量低于该值的Topic不做统计 - "ignoreClusterIdList": [ # 忽略的集群 - 50 - ] -} -``` - ---- - -## 3、专家服务——Topic分区不足 - -总流量除以分区数,超过指定值时,则我们认为存在Topic分区不足。 - -Topic分区不足相关的动态配置(页面在运维管控->平台管理->配置管理): - -配置Key: -``` -TOPIC_INSUFFICIENT_PARTITION_CONFIG -``` - -配置Value: -```json -{ - "maxBytesInPerPartitionUnitB": 3145728, # 单分区流量超过该值, 则认为分区不去 - "minTopicBytesInUnitB": 1048576, # 流量低于该值的Topic不做统计 - "ignoreClusterIdList": [ # 忽略的集群 - 50 - ] -} -``` -## 4、专家服务——Topic资源治理 - -首先,我们认为在一定的时间长度内,Topic的分区offset没有任何变化的Topic,即没有数据写入的Topic,为过期的Topic。 - -Topic分区不足相关的动态配置(页面在运维管控->平台管理->配置管理): - -配置Key: -``` -EXPIRED_TOPIC_CONFIG -``` - -配置Value: -```json -{ - "minExpiredDay": 30, #过期时间大于此值才显示, - "filterRegex": ".*XXX\\s+", #忽略符合此正则规则的Topic - "ignoreClusterIdList": [ # 忽略的集群 - 50 - ] -} -``` - -## 5、账单配置 - -Logi-KafkaManager除了作为Kafka运维管控平台之外,实际上还会有一些资源定价相关的功能。 - -当前定价方式:当月Topic的maxAvgDay天的峰值的均值流量作为Topic的使用额度。使用的额度 * 单价 * 溢价(预留buffer) 就等于当月的费用。 -详细的计算逻辑见:com.xiaojukeji.kafka.manager.task.dispatch.biz.CalKafkaTopicBill; 和 com.xiaojukeji.kafka.manager.task.dispatch.biz.CalTopicStatistics; - -这块在计算Topic的费用的配置如下所示: - -配置Key: -``` -KAFKA_TOPIC_BILL_CONFIG -``` - -配置Value: - -```json -{ - "maxAvgDay": 10, # 使用额度的计算规则 - "quotaRatio": 1.5, # 溢价率 - "priseUnitMB": 100 # 单价,即单MB/s流量多少钱 -} -``` diff --git a/docs/dev_guide/gateway_config_manager.md b/docs/dev_guide/gateway_config_manager.md deleted file mode 100644 index 8c656531..00000000 --- a/docs/dev_guide/gateway_config_manager.md +++ /dev/null @@ -1,10 +0,0 @@ - ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - -# Kafka-Gateway 配置说明 \ No newline at end of file diff --git a/docs/dev_guide/monitor_system_integrate_with_n9e.md b/docs/dev_guide/monitor_system_integrate_with_n9e.md deleted file mode 100644 index e5456ce3..00000000 --- a/docs/dev_guide/monitor_system_integrate_with_n9e.md +++ /dev/null @@ -1,42 +0,0 @@ - ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - -# 监控系统集成——夜莺 - -- `Kafka-Manager`通过将 监控的数据 以及 监控的规则 都提交给夜莺,然后依赖夜莺的监控系统从而实现监控告警功能。 - -- 监控数据上报 & 告警规则的创建等能力已经具备。但类似查看告警历史,告警触发时的监控数据等正在集成中(暂时可以到夜莺系统进行查看),欢迎有兴趣的同学进行共建 或 贡献代码。 - -## 1、配置说明 - -```yml -# 配置文件中关于监控部分的配置 -monitor: - enabled: false - n9e: - nid: 2 - user-token: 123456 - # 夜莺 mon监控服务 地址 - mon: - base-url: http://127.0.0.1:8006 - # 夜莺 transfer上传服务 地址 - sink: - base-url: http://127.0.0.1:8008 - # 夜莺 rdb资源服务 地址 - rdb: - base-url: http://127.0.0.1:80 - -# enabled: 表示是否开启监控告警的功能, true: 开启, false: 不开启 -# n9e.nid: 夜莺的节点ID -# n9e.user-token: 用户的密钥,在夜莺的个人设置中 -# n9e.mon.base-url: 监控地址 -# n9e.sink.base-url: 数据上报地址 -# n9e.rdb.base-url: 用户资源中心地址 -``` - diff --git a/docs/dev_guide/monitor_system_integrate_with_self.md b/docs/dev_guide/monitor_system_integrate_with_self.md deleted file mode 100644 index ed3fe30c..00000000 --- a/docs/dev_guide/monitor_system_integrate_with_self.md +++ /dev/null @@ -1,54 +0,0 @@ - ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - -# 监控系统集成 - -- 监控系统默认与 [夜莺] (https://github.com/didi/nightingale) 进行集成; -- 对接自有的监控系统需要进行简单的二次开发,即实现部分监控告警模块的相关接口即可; -- 集成会有两块内容,一个是指标数据上报的集成,还有一个是监控告警规则的集成; - -## 1、指标数据上报集成 - -仅完成这一步的集成之后,即可将监控数据上报到监控系统中,此时已能够在自己的监控系统进行监控告警规则的配置了。 - -**步骤一:实现指标上报的接口** - -- 按照自己内部监控系统的数据格式要求,将数据进行组装成符合自己内部监控系统要求的数据进行上报,具体的可以参考夜莺集成的实现代码。 -- 至于会上报哪些指标,可以查看有哪些地方调用了该接口。 - -![sink_metrics](./assets/monitor_system_integrate_with_self/sink_metrics.jpg) - -**步骤二:相关配置修改** - -![change_config](./assets/monitor_system_integrate_with_self/change_config.jpg) - -**步骤三:开启上报任务** - -![open_sink_schedule](./assets/monitor_system_integrate_with_self/open_sink_schedule.jpg) - - -## 2、监控告警规则集成 - -完成**1、指标数据上报集成**之后,即可在自己的监控系统进行监控告警规则的配置了。完成该步骤的集成之后,可以在`Logi-KafkaManager`中进行监控告警规则的增删改查等等。 - -大体上和**1、指标数据上报集成**一致, - -**步骤一:实现相关接口** - -![integrate_ms](./assets/monitor_system_integrate_with_self/integrate_ms.jpg) - -实现完成步骤一之后,接下来的步骤和**1、指标数据上报集成**中的步骤二、步骤三一致,都需要进行相关配置的修改即可。 - - -## 3、总结 - -简单介绍了一下监控告警的集成,嫌麻烦的同学可以仅做 **1、指标数据上报集成** 这一节的内容即可满足一定场景下的需求。 - - -**集成过程中,有任何觉得文档没有说清楚的地方或者建议,欢迎入群交流,也欢迎贡献代码,觉得好也辛苦给个star。** diff --git a/docs/dev_guide/use_mysql_8.md b/docs/dev_guide/use_mysql_8.md deleted file mode 100644 index 6c8f6b38..00000000 --- a/docs/dev_guide/use_mysql_8.md +++ /dev/null @@ -1,41 +0,0 @@ - ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - -# 使用`MySQL 8` - -感谢 [herry-hu](https://github.com/herry-hu) 提供的方案。 - - -当前因为无法同时兼容`MySQL 8`与`MySQL 5.7`,因此代码中默认的版本还是`MySQL 5.7`。 - - -当前如需使用`MySQL 8`,则需按照下述流程进行简单修改代码。 - - -- Step1. 修改application.yml中的MySQL驱动类 -```shell - -# 将driver-class-name后面的驱动类修改为: -# driver-class-name: com.mysql.jdbc.Driver -driver-class-name: com.mysql.cj.jdbc.Driver -``` - - -- Step2. 修改MySQL依赖包 -```shell -# 将根目录下面的pom.xml文件依赖的`MySQL`依赖包版本调整为 - - - mysql - mysql-connector-java -# 5.1.41 - 8.0.20 - -``` - diff --git a/docs/dev_guide/健康巡检.md b/docs/dev_guide/健康巡检.md new file mode 100644 index 00000000..937344df --- /dev/null +++ b/docs/dev_guide/健康巡检.md @@ -0,0 +1,98 @@ + +![Logo](../assets/KnowStreamingLogo.png) + + + + +# 健康巡检 + +## 1、前言 + + + +--- + +## 2、已有巡检 + +### 2.1、Cluster健康巡检(1个) + +#### 2.1.1、集群Controller数错误 + +**说明** + +- 集群Controller数不等于1,表明集群集群无Controller或者出现了多个Controller,该 + + +**配置** + +--- + +### 2.2、Broker健康巡检(2个) + +#### 2.2.1、Broker-RequestQueueSize被打满 + +**说明** + +- Broker的RequestQueueSize,被打满; + + +**配置** + +--- + + +#### 2.2.2、Broker-NetworkProcessorAvgIdle过低 + +**说明** + +- Broker的NetworkProcessorAvgIdle指标,当前过低; + + +**配置** + +--- + +### 2.3、Topic健康巡检(2个) + + +#### 2.3.1、Topic 无Leader数 + +**说明** + +- 当前Topic的无Leader分区数超过一定值; + + +**配置** + + +#### 2.3.1、Topic 长期处于未同步状态 + +**说明** + +- 指定的一段时间内,Topic一直处于未同步的状态; + + +**配置** + +--- + +### 2.4、Group健康巡检(1个) + + +#### 2.4.1、Group Re-Balance太频繁 + +**说明** + +- 指定的一段时间内,Group Re-Balance的次数是否过多; + + +**配置** + + + +--- + +## 3、自定义增强 + +如何增加想要的巡检? + diff --git a/docs/dev_guide/免登录调用接口.md b/docs/dev_guide/免登录调用接口.md new file mode 100644 index 00000000..cfaaf688 --- /dev/null +++ b/docs/dev_guide/免登录调用接口.md @@ -0,0 +1,43 @@ + +![Logo](../assets/KnowStreamingLogo.png) + +## 登录绕过 + +### 背景 + +现在除了开放出来的第三方接口,其他接口都需要走登录认证。 + +但是第三方接口不多,开放出来的能力有限,但是登录的接口又需要登录,非常的麻烦。 + +因此,新增了一个登录绕过的功能,为一些紧急临时的需求,提供一个调用不需要登录的能力。 + +### 使用方式 + +步骤一:接口调用时,在header中,增加如下信息: +```shell +# 表示开启登录绕过 +Trick-Login-Switch : on + +# 登录绕过的用户, 这里可以是admin, 或者是其他的, 但是必须在运维管控->平台管理->用户管理中设置了该用户。 +Trick-Login-User : admin +``` + +  + +步骤二:在运维管控->平台管理->平台配置上,设置允许了该用户以绕过的方式登录 +```shell +# 设置的key,必须是这个 +SECURITY.TRICK_USERS + +# 设置的value,是json数组的格式,例如 +[ "admin", "logi"] +``` + +  + +步骤三:解释说明 + +设置完成上面两步之后,就可以直接调用需要登录的接口了。 + +但是还有一点需要注意,绕过的用户仅能调用他有权限的接口,比如一个普通用户,那么他就只能调用普通的接口,不能去调用运维人员的接口。 + diff --git a/docs/dev_guide/周期任务说明文档.md b/docs/dev_guide/周期任务说明文档.md deleted file mode 100644 index 51587eb4..00000000 --- a/docs/dev_guide/周期任务说明文档.md +++ /dev/null @@ -1,39 +0,0 @@ ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - - -| 定时任务名称或方法名 | 所在类 | 详细说明 | cron | cron说明 | 线程数量 | -| -------------------------------------- | -------------------------------------- | ------------------------------------------ | --------------- | --------------------------------------- | -------- | -| calKafkaBill | CalKafkaTopicBill | 计算Kafka使用账单 | 0 0 1 * * ? | 每天凌晨1点执行一次 | 1 | -| calRegionCapacity | CalRegionCapacity | 计算Region容量 | 0 0 0/12 * * ? | 每隔12小时执行一次,在0分钟0秒时触发 | 1 | -| calTopicStatistics | CalTopicStatistics | 定时计算Topic统计数据 | 0 0 0/4 * * ? | 每隔4小时执行一次,在0分钟0秒时触发 | 5 | -| flushBrokerTable | FlushBrokerTable | 定时刷新BrokerTable数据 | 0 0 0/1 * * ? | 每隔1小时执行一次,在0分钟0秒时触发 | 1 | -| flushExpiredTopic | FlushExpiredTopic | 定期更新过期Topic | 0 0 0/5 * * ? | 每隔5小时执行一次,在0分钟0秒时触发 | 1 | -| syncClusterTaskState | SyncClusterTaskState | 同步更新集群任务状态 | 0 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的0秒时触发 | 1 | -| newCollectAndPublishCGData | CollectAndPublishCGData | 收集并发布消费者指标数据 | 30 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的30秒时触发 | 10 | -| collectAndPublishCommunityTopicMetrics | CollectAndPublishCommunityTopicMetrics | Topic社区指标收集 | 31 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的30秒时触发 | 5 | -| collectAndPublishTopicThrottledMetrics | CollectAndPublishTopicThrottledMetrics | 收集和发布Topic限流信息 | 11 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的11秒时触发 | 5 | -| deleteMetrics | DeleteMetrics | 定期删除Metrics信息 | 0 0/2 * * * ? | 每隔2分钟执行一次,在每分钟的0秒时触发 | 1 | -| storeDiDiAppTopicMetrics | StoreDiDiAppTopicMetrics | JMX中获取appId维度的流量信息存DB | 41 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的41秒时触发 | 5 | -| storeDiDiTopicRequestTimeMetrics | StoreDiDiTopicRequestTimeMetrics | JMX中获取的TopicRequestTimeMetrics信息存DB | 51 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的51秒时触发 | 5 | -| autoHandleTopicOrder | AutoHandleTopicOrder | 定时自动处理Topic相关工单 | 0 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的0秒时触发 | 1 | -| automatedHandleOrder | AutomatedHandleOrder | 工单自动化审批 | 0 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的0秒时触发 | 1 | -| flushReassignment | FlushReassignment | 定时处理分区迁移任务 | 0 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的0秒时触发 | 1 | -| syncTopic2DB | SyncTopic2DB | 定期将未落盘的Topic刷新到DB中 | 0 0/2 * * * ? | 每隔2分钟执行一次,在每分钟的0秒时触发 | 1 | -| sinkCommunityTopicMetrics2Monitor | SinkCommunityTopicMetrics2Monitor | 定时上报Topic监控指标 | 1 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的1秒时触发 | 5 | -| flush方法 | LogicalClusterMetadataManager | 定时刷新逻辑集群元数据到缓存中 | 0/30 * * * * ? | 每隔30秒执行一次 | 1 | -| flush方法 | AccountServiceImpl | 定时刷新account信息到缓存中 | 0/5 * * * * ? | 每隔5秒执行一次 | 1 | -| ipFlush方法 | HeartBeat | 定时获取管控平台所在机器IP等信息到DB | 0/10 * * * * ? | 每隔10秒执行一次 | 1 | -| flushTopicMetrics方法 | FlushTopicMetrics | 定时刷新topic指标到缓存中 | 5 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的5秒时触发 | 1 | -| schedule方法 | FlushBKConsumerGroupMetadata | 定时刷新broker上消费组信息到缓存中 | 15 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的15秒时触发 | 1 | -| flush方法 | FlushClusterMetadata | 定时刷新物理集群元信息到缓存中 | 0/30 * * * * ? | 每隔30秒执行一次 | 1 | -| flush方法 | FlushTopicProperties | 定时刷新物理集群配置到缓存中 | 25 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的25秒时触发 | 1 | -| schedule方法 | FlushZKConsumerGroupMetadata | 定时刷新zk上的消费组信息到缓存中 | 35 0/1 * * * ? | 每隔1分钟执行一次,在每分钟的35秒时触发 | 1 | - - - diff --git a/docs/dev_guide/多版本兼容方案.md b/docs/dev_guide/多版本兼容方案.md new file mode 100644 index 00000000..acbde789 --- /dev/null +++ b/docs/dev_guide/多版本兼容方案.md @@ -0,0 +1,42 @@ +## 3.2、Kafka 多版本兼容方案 + +  当前 KnowStreaming 支持纳管多个版本的 kafka 集群,由于不同版本的 kafka 在指标采集、接口查询、行为操作上有些不一致,因此 KnowStreaming 需要一套机制来解决多 kafka 版本的纳管兼容性问题。 + +### 3.2.1、整体思路 + +  由于需要纳管多个 kafka 版本,而且未来还可能会纳管非 kafka 官方的版本,kafka 的版本号会存在着多种情况,所以首先要明确一个核心思想:KnowStreaming 提供尽可能多的纳管能力,但是不提供无限的纳管能力,每一个版本的 KnowStreaming 只纳管其自身声明的 kafka 版本,后续随着 KnowStreaming 自身版本的迭代,会逐步支持更多 kafka 版本的纳管接入。 + +### 3.2.2、构建版本兼容列表 + +  每一个版本的 KnowStreaming 都声明一个自身支持纳管的 kafka 版本列表,并且对 kafka 的版本号进行归一化处理,后续所有 KnowStreaming 对不同 kafka 集群的操作都和这个集群对应的版本号严格相关。 + +  KnowStreaming 对外提供自身所支持的 kafka 版本兼容列表,用以声明自身支持的版本范围。 + +  对于在集群接入过程中,如果希望接入当前 KnowStreaming 不支持的 kafka 版本的集群,KnowStreaming 建议在于的过程中选择相近的版本号接入。 + +### 3.2.3、构建版本兼容性字典 + +  在构建了 KnowStreaming 支持的 kafka 版本列表的基础上,KnowStreaming 在实现过程中,还会声明自身支持的所有兼容性,构建兼容性字典。 + +  当前 KnowStreaming 支持的 kafka 版本兼容性字典包括三个维度: + +- 指标采集:同一个指标在不同 kafka 版本下可能获取的方式不一样,不同版本的 kafka 可能会有不同的指标,因此对于指标采集的处理需要构建兼容性字典。 +- kafka api:同一个 kafka 的操作处理的方式在不同 kafka 版本下可能存在不一致,如:topic 的创建,因此 KnowStreaming 针对不同 kafka-api 的处理需要构建兼容性字典。 +- 平台操作:KnowStreaming 在接入不同版本的 kafka 集群的时候,在平台页面上会根据不同的 kafka 版。 + +兼容性字典的核心设计字段如下: + +| 兼容性维度 | 兼容项名称 | 最小 Kafka 版本号(归一化) | 最大 Kafka 版本号(归一化) | 处理器 | +| ---------- | ---------- | --------------------------- | --------------------------- | ------ | + +KS-KM 根据其需要纳管的 kafka 版本,按照上述三个维度构建了完善了兼容性字典。 + +### 3.2.4、兼容性问题 + +  KS-KM 的每个版本针对需要纳管的 kafka 版本列表,事先分析各个版本的差异性和产品需求,同时 KS-KM 构建了一套专门处理兼容性的服务,来进行兼容性的注册、字典构建、处理器分发等操作,其中版本兼容性处理器是来具体处理不同 kafka 版本差异性的地方。 + +​ ![registerHandler](./assets/multi_version_compatible/registerHandler.png) + +  如上图所示,KS-KM 的 topic 服务在面对不同 kafka 版本时,其 topic 的创建、删除、扩容由于 kafka 版本自身的差异,导致 KnowStreaming 的处理也不一样,所以需要根据不同的 kafka 版本来实现不同的兼容性处理器,同时向 KnowStreaming 的兼容服务进行兼容性的注册,构建兼容性字典,后续在 KnowStreaming 的运行过程中,针对不同的 kafka 版本即可分发到不同的处理器中执行。 + +  后续随着 KnowStreaming 产品的发展,如果有新的兼容性的地方需要增加,只需要实现新版本的处理器,增加注册项即可。 diff --git a/docs/dev_guide/如何使用集群安装部署功能.md b/docs/dev_guide/如何使用集群安装部署功能.md deleted file mode 100644 index 5e5f0101..00000000 --- a/docs/dev_guide/如何使用集群安装部署功能.md +++ /dev/null @@ -1,89 +0,0 @@ - ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - -# 如何使用集群安装部署功能? - -[TOC] - -## 1、实现原理 - -![KCM实现原理](./assets/kcm/kcm_principle.png) - -- LogiKM上传安装包到S3服务; -- LogiKM调用夜莺-Job服务接口,创建执行[kcm_script.sh](https://github.com/didi/LogiKM/blob/master/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh)脚本的任务,kcm_script.sh脚本是安装部署Kafka集群的脚本; -- 夜莺将任务脚本下发到具体的机器上,通过夜莺Agent执行该脚本; -- kcm_script.sh脚本会进行Kafka-Broker的安装部署; - - ---- - -## 2、使用方式 - -### 2.1、第一步:修改配置 - -**配置application.yml文件** -```yaml -# -kcm: - enabled: false # 是否开启,将其修改为true - s3: # s3 存储服务 - endpoint: s3.didiyunapi.com - access-key: 1234567890 - secret-key: 0987654321 - bucket: logi-kafka - n9e: # 夜莺 - base-url: http://127.0.0.1:8004 # 夜莺job服务地址 - user-token: 12345678 # 用户的token - timeout: 300 # 单台操作的超时时间 - account: root # 操作时使用的账号 - script-file: kcm_script.sh # 脚本,已内置好,在源码的kcm模块内,此处配置无需修改 - logikm-url: http://127.0.0.1:8080 # logikm部署地址,部署时kcm_script.sh会调用logikm检查部署中的一些状态,这里只需要填写 http://IP:PORT 就可以了 - - -account: - jump-login: - gateway-api: false # 网关接口 - third-part-api: false # 第三方接口,将其修改为true,即允许未登录情况下调用开放的第三方接口 -``` - -### 2.2、第二步:检查服务 - -**检查s3服务** -- 测试 "运维管控-》集群运维-》版本管理" 页面的上传,查看等功能是否都正常。如果存在不正常,则需要查看s3的配置是否正确; -- 如果都没有问题,则上传Kafka的以.tgz结尾的安装包以及server.properties文件; - -**检查夜莺Job服务** -- 创建一个job任务,机器选择需要安装Kafka集群的机器,然后执行的命令是echo "Hello LogiKM",看能否被成功执行。如果不行,则需要检查夜莺的安装; -- 如果没有问题则表示夜莺和所需部署的机器之间的交互是没有问题的; - -### 2.3、第三步:接入集群 - -在LogiKM的 “运维管控-》集群列表” 中接入需要安装部署的集群,**PS:此时是允许接入一个没有任何Broker的空的Kafka集群**,其中对的bootstrapServers配置搭建完成后的Kafka集群地址就可以了,而ZK地址必须和集群的server.properties中的ZK地址保持一致; - -### 2.4、第四步:部署集群 - -- 打开LogiKM的 “运维管控-》集群运维-》集群任务” 页面,点击 “新建集群任务” 按钮; -- 选择集群、任务类型、包版本、server配置及填写主机列表,然后点击确认,即可在夜莺的Job服务中心中创建一个任务出来。**PS:如果创建失败,可以看一下日志我为什么创建失败**; -- 随后可以点击详情及状态对任务进行操作; - -### 2.5、可能问题 - -#### 2.5.1、问题一:任务执行超时、失败等 - -进入夜莺Job服务中心,查看对应的任务的相关日志; - -- 提示安装包下载失败,则需要查看对应的s3服务是否可以直接wget下载安装包,如果不可以则需要对kcm_script.sh脚本进行修改; -- 提示调用LogiKM失败,则可以使用postman手动测试一下kcm_script.sh脚本调用LogiKM的那个接口是否有问题,如果存在问题则进行相应的修改;PS:具体接口见kcm_script.sh脚本 - - -## 3、备注说明 - -- 集群安装部署,仅安装部署Kafka-Broker,不安装Kafka的ZK服务; -- 安装部署中,有任何定制化的需求,例如修改安装的目录等,可以通过修改kcm_script.sh脚本实现; -- kcm_script.sh脚本位置:[kcm_script.sh](https://github.com/didi/LogiKM/blob/master/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh); diff --git a/docs/dev_guide/如何增加上报监控系统指标.md b/docs/dev_guide/如何增加上报监控系统指标.md deleted file mode 100644 index f1ed9260..00000000 --- a/docs/dev_guide/如何增加上报监控系统指标.md +++ /dev/null @@ -1,53 +0,0 @@ - ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - -# 如何增加上报监控系统指标? - -## 0、前言 - -LogiKM是 **一站式`Apache Kafka`集群指标监控与运维管控平台** ,当前会将消费Lag,Topic流量等指标上报到监控系统中,从而方便用户在监控系统中对这些指标配置监控告警规则,进而达到监控自身客户端是否正常的目的。 - -那么,如果我们想增加一个新的监控指标,应该如何做呢,比如我们想监控Broker的流量,监控Broker的存活信息,监控集群Controller个数等等。 - -在具体介绍之前,我们大家都知道,Kafka监控相关的信息,基本都存储于Broker、Jmx以及ZK中。当前LogiKM也已经具备从这三个地方获取数据的基本能力,因此基于LogiKM我们再获取其他指标,总体上还是非常方便的。 - -这里我们就以已经获取到的Topic流量信息为例,看LogiKM如何实现Topic指标的获取并上报的。 - ---- - -## 1、确定指标位置 - -基于对Kafka的了解,我们知道Topic流量信息这个指标是存储于Jmx中的,因此我们需要从Jmx中获取。大家如果对于自己所需要获取的指标存储在何处不太清楚的,可以加入我们维护的Kafka中文社区(README中有二维码)中今天沟通交流。 - ---- - -## 2、指标获取 - -Topic流量指标的获取详细见图中说明。 - -![Topic流量指标采集说明](./assets/increase_the_indicators_reported_to_monitor_system/collect_topic_metrics.jpg) - ---- - -## 3、指标上报 - -上一步我们已经采集到Topic流量指标了,下一步就是将该指标上报到监控系统,这块只需要按照监控系统要求的格式,将数据上报即可。 - -LogiKM中有一个monitor模块,具体的如下图所示: - -![指标上报](./assets/increase_the_indicators_reported_to_monitor_system/sink_metrcis.png) - - -## 4、补充说明 - -监控系统对接的相关内容见: - -[监控系统集成](./monitor_system_integrate_with_self.md) - -[监控系统集成例子——集成夜莺](./monitor_system_integrate_with_n9e.md) diff --git a/docs/dev_guide/指标说明.md b/docs/dev_guide/指标说明.md new file mode 100644 index 00000000..fe342d5f --- /dev/null +++ b/docs/dev_guide/指标说明.md @@ -0,0 +1,152 @@ +## 2.3、指标说明 + +  当前 KnowStreaming 支持针对 kafka 集群的多维度指标的采集和展示,同时也支持多个 kafka 版本的指标进行兼容,以下是 KnowStreaming 支持的指标说明。 + +  现在对当前 KnowStreaming 支持的指标从指标名称、指标单位、指标说明、kafka 版本四个维度进行说明。 + +### 2.3.1、Cluster 指标 + +| 指标名称 | 指标单位 | 指标含义 | kafka 版本 | 企业/开源版指标 | +| ------------------------- | -------- | ------------------------------------ | ---------------- | --------------- | +| HealthScore | 分 | 集群总体的健康分 | 全部版本 | 开源版 | +| HealthCheckPassed | 个 | 集群总体健康检查通过数 | 全部版本 | 开源版 | +| HealthCheckTotal | 个 | 集群总体健康检查总数 | 全部版本 | 开源版 | +| HealthScore_Topics | 分 | 集群 Topics 的健康分 | 全部版本 | 开源版 | +| HealthCheckPassed_Topics | 个 | 集群 Topics 健康检查通过数 | 全部版本 | 开源版 | +| HealthCheckTotal_Topics | 个 | 集群 Topics 健康检查总数 | 全部版本 | 开源版 | +| HealthScore_Brokers | 分 | 集群 Brokers 的健康分 | 全部版本 | 开源版 | +| HealthCheckPassed_Brokers | 个 | 集群 Brokers 健康检查通过数 | 全部版本 | 开源版 | +| HealthCheckTotal_Brokers | 个 | 集群 Brokers 健康检查总数 | 全部版本 | 开源版 | +| HealthScore_Groups | 分 | 集群 Groups 的健康分 | 全部版本 | 开源版 | +| HealthCheckPassed_Groups | 个 | 集群 Groups 健康检查总数 | 全部版本 | 开源版 | +| HealthCheckTotal_Groups | 个 | 集群 Groups 健康检查总数 | 全部版本 | 开源版 | +| HealthScore_Cluster | 分 | 集群自身的健康分 | 全部版本 | 开源版 | +| HealthCheckPassed_Cluster | 个 | 集群自身健康检查通过数 | 全部版本 | 开源版 | +| HealthCheckTotal_Cluster | 个 | 集群自身健康检查总数 | 全部版本 | 开源版 | +| TotalRequestQueueSize | 个 | 集群中总的请求队列数 | 全部版本 | 开源版 | +| TotalResponseQueueSize | 个 | 集群中总的响应队列数 | 全部版本 | 开源版 | +| EventQueueSize | 个 | 集群中 Controller 的 EventQueue 大小 | 2.0.0 及以上版本 | 开源版 | +| ActiveControllerCount | 个 | 集群中存活的 Controller 数 | 全部版本 | 开源版 | +| TotalProduceRequests | 个 | 集群中的 Produce 每秒请求数 | 全部版本 | 开源版 | +| TotalLogSize | byte | 集群总的已使用的磁盘大小 | 全部版本 | 开源版 | +| ConnectionsCount | 个 | 集群的连接(Connections)个数 | 全部版本 | 开源版 | +| Zookeepers | 个 | 集群中存活的 zk 节点个数 | 全部版本 | 开源版 | +| ZookeepersAvailable | 是/否 | ZK 地址是否合法 | 全部版本 | 开源版 | +| Brokers | 个 | 集群的 broker 的总数 | 全部版本 | 开源版 | +| BrokersAlive | 个 | 集群的 broker 的存活数 | 全部版本 | 开源版 | +| BrokersNotAlive | 个 | 集群的 broker 的未存活数 | 全部版本 | 开源版 | +| Replicas | 个 | 集群中 Replica 的总数 | 全部版本 | 开源版 | +| Topics | 个 | 集群中 Topic 的总数 | 全部版本 | 开源版 | +| Partitions | 个 | 集群的 Partitions 总数 | 全部版本 | 开源版 | +| PartitionNoLeader | 个 | 集群中的 PartitionNoLeader 总数 | 全部版本 | 开源版 | +| PartitionMinISR_S | 个 | 集群中的小于 PartitionMinISR 总数 | 全部版本 | 开源版 | +| PartitionMinISR_E | 个 | 集群中的等于 PartitionMinISR 总数 | 全部版本 | 开源版 | +| PartitionURP | 个 | 集群中的未同步的 Partition 总数 | 全部版本 | 开源版 | +| MessagesIn | 条/s | 集群每条消息写入条数 | 全部版本 | 开源版 | +| Messages | 条 | 集群总的消息条数 | 全部版本 | 开源版 | +| LeaderMessages | 条 | 集群中 leader 总的消息条数 | 全部版本 | 开源版 | +| BytesIn | byte/s | 集群的每秒写入字节数 | 全部版本 | 开源版 | +| BytesIn_min_5 | byte/s | 集群的每秒写入字节数,5 分钟均值 | 全部版本 | 开源版 | +| BytesIn_min_15 | byte/s | 集群的每秒写入字节数,15 分钟均值 | 全部版本 | 开源版 | +| BytesOut | byte/s | 集群的每秒流出字节数 | 全部版本 | 开源版 | +| BytesOut_min_5 | byte/s | 集群的每秒流出字节数,5 分钟均值 | 全部版本 | 开源版 | +| BytesOut_min_15 | byte/s | 集群的每秒流出字节数,15 分钟均值 | 全部版本 | 开源版 | +| Groups | 个 | 集群中 Group 的总数 | 全部版本 | 开源版 | +| GroupActives | 个 | 集群中 ActiveGroup 的总数 | 全部版本 | 开源版 | +| GroupEmptys | 个 | 集群中 EmptyGroup 的总数 | 全部版本 | 开源版 | +| GroupRebalances | 个 | 集群中 RebalanceGroup 的总数 | 全部版本 | 开源版 | +| GroupDeads | 个 | 集群中 DeadGroup 的总数 | 全部版本 | 开源版 | +| Alive | 是/否 | 集群是否存活,1:存活;0:没有存活 | 全部版本 | 开源版 | +| AclEnable | 是/否 | 集群是否开启 Acl,1:是;0:否 | 全部版本 | 开源版 | +| Acls | 个 | ACL 数 | 全部版本 | 开源版 | +| AclUsers | 个 | ACL-KafkaUser 数 | 全部版本 | 开源版 | +| AclTopics | 个 | ACL-Topic 数 | 全部版本 | 开源版 | +| AclGroups | 个 | ACL-Group 数 | 全部版本 | 开源版 | +| Jobs | 个 | 集群任务总数 | 全部版本 | 开源版 | +| JobsRunning | 个 | 集群 running 任务总数 | 全部版本 | 开源版 | +| JobsWaiting | 个 | 集群 waiting 任务总数 | 全部版本 | 开源版 | +| JobsSuccess | 个 | 集群 success 任务总数 | 全部版本 | 开源版 | +| JobsFailed | 个 | 集群 failed 任务总数 | 全部版本 | 开源版 | +| LoadReBalanceEnable | 是/否 | 是否开启均衡, 1:是;0:否 | 全部版本 | 企业版 | +| LoadReBalanceCpu | 是/否 | CPU 是否均衡, 1:是;0:否 | 全部版本 | 企业版 | +| LoadReBalanceNwIn | 是/否 | BytesIn 是否均衡, 1:是;0:否 | 全部版本 | 企业版 | +| LoadReBalanceNwOut | 是/否 | BytesOut 是否均衡, 1:是;0:否 | 全部版本 | 企业版 | +| LoadReBalanceDisk | 是/否 | Disk 是否均衡, 1:是;0:否 | 全部版本 | 企业版 | + +### 2.3.2、Broker 指标 + +| 指标名称 | 指标单位 | 指标含义 | kafka 版本 | 企业/开源版指标 | +| ----------------------- | -------- | ------------------------------------- | ---------- | --------------- | +| HealthScore | 分 | Broker 健康分 | 全部版本 | 开源版 | +| HealthCheckPassed | 个 | Broker 健康检查通过数 | 全部版本 | 开源版 | +| HealthCheckTotal | 个 | Broker 健康检查总数 | 全部版本 | 开源版 | +| TotalRequestQueueSize | 个 | Broker 的请求队列大小 | 全部版本 | 开源版 | +| TotalResponseQueueSize | 个 | Broker 的应答队列大小 | 全部版本 | 开源版 | +| ReplicationBytesIn | byte/s | Broker 的副本流入流量 | 全部版本 | 开源版 | +| ReplicationBytesOut | byte/s | Broker 的副本流出流量 | 全部版本 | 开源版 | +| MessagesIn | 条/s | Broker 的每秒消息流入条数 | 全部版本 | 开源版 | +| TotalProduceRequests | 个/s | Broker 上 Produce 的每秒请求数 | 全部版本 | 开源版 | +| NetworkProcessorAvgIdle | % | Broker 的网络处理器的空闲百分比 | 全部版本 | 开源版 | +| RequestHandlerAvgIdle | % | Broker 上请求处理器的空闲百分比 | 全部版本 | 开源版 | +| PartitionURP | 个 | Broker 上的未同步的副本的个数 | 全部版本 | 开源版 | +| ConnectionsCount | 个 | Broker 上网络链接的个数 | 全部版本 | 开源版 | +| BytesIn | byte/s | Broker 的每秒数据写入量 | 全部版本 | 开源版 | +| BytesIn_min_5 | byte/s | Broker 的每秒数据写入量,5 分钟均值 | 全部版本 | 开源版 | +| BytesIn_min_15 | byte/s | Broker 的每秒数据写入量,15 分钟均值 | 全部版本 | 开源版 | +| BytesOut | byte/s | Broker 的每秒数据流出量 | 全部版本 | 开源版 | +| BytesOut_min_5 | byte/s | Broker 的每秒数据流出量,5 分钟均值 | 全部版本 | 开源版 | +| BytesOut_min_15 | byte/s | Broker 的每秒数据流出量,15 分钟均值 | 全部版本 | 开源版 | +| ReassignmentBytesIn | byte/s | Broker 的每秒数据迁移写入量 | 全部版本 | 开源版 | +| ReassignmentBytesOut | byte/s | Broker 的每秒数据迁移流出量 | 全部版本 | 开源版 | +| Partitions | 个 | Broker 上的 Partition 个数 | 全部版本 | 开源版 | +| PartitionsSkew | % | Broker 上的 Partitions 倾斜度 | 全部版本 | 开源版 | +| Leaders | 个 | Broker 上的 Leaders 个数 | 全部版本 | 开源版 | +| LeadersSkew | % | Broker 上的 Leaders 倾斜度 | 全部版本 | 开源版 | +| LogSize | byte | Broker 上的消息容量大小 | 全部版本 | 开源版 | +| Alive | 是/否 | Broker 是否存活,1:存活;0:没有存活 | 全部版本 | 开源版 | + +### 2.3.3、Topic 指标 + +| 指标名称 | 指标单位 | 指标含义 | kafka 版本 | 企业/开源版指标 | +| --------------------- | -------- | ------------------------------------- | ---------- | --------------- | +| HealthScore | 分 | 健康分 | 全部版本 | 开源版 | +| HealthCheckPassed | 个 | 健康项检查通过数 | 全部版本 | 开源版 | +| HealthCheckTotal | 个 | 健康项检查总数 | 全部版本 | 开源版 | +| TotalProduceRequests | 条/s | Topic 的 TotalProduceRequests | 全部版本 | 开源版 | +| BytesRejected | 个/s | Topic 的每秒写入拒绝量 | 全部版本 | 开源版 | +| FailedFetchRequests | 个/s | Topic 的 FailedFetchRequests | 全部版本 | 开源版 | +| FailedProduceRequests | 个/s | Topic 的 FailedProduceRequests | 全部版本 | 开源版 | +| ReplicationCount | 个 | Topic 总的副本数 | 全部版本 | 开源版 | +| Messages | 条 | Topic 总的消息数 | 全部版本 | 开源版 | +| MessagesIn | 条/s | Topic 每秒消息条数 | 全部版本 | 开源版 | +| BytesIn | byte/s | Topic 每秒消息写入字节数 | 全部版本 | 开源版 | +| BytesIn_min_5 | byte/s | Topic 每秒消息写入字节数,5 分钟均值 | 全部版本 | 开源版 | +| BytesIn_min_15 | byte/s | Topic 每秒消息写入字节数,15 分钟均值 | 全部版本 | 开源版 | +| BytesOut | byte/s | Topic 每秒消息流出字节数 | 全部版本 | 开源版 | +| BytesOut_min_5 | byte/s | Topic 每秒消息流出字节数,5 分钟均值 | 全部版本 | 开源版 | +| BytesOut_min_15 | byte/s | Topic 每秒消息流出字节数,15 分钟均值 | 全部版本 | 开源版 | +| LogSize | byte | Topic 的大小 | 全部版本 | 开源版 | +| PartitionURP | 个 | Topic 未同步的副本数 | 全部版本 | 开源版 | + +### 2.3.4、Partition 指标 + +| 指标名称 | 指标单位 | 指标含义 | kafka 版本 | 企业/开源版指标 | +| -------------- | -------- | ----------------------------------------- | ---------- | --------------- | +| LogEndOffset | 条 | Partition 中 leader 副本的 LogEndOffset | 全部版本 | 开源版 | +| LogStartOffset | 条 | Partition 中 leader 副本的 LogStartOffset | 全部版本 | 开源版 | +| Messages | 条 | Partition 总的消息数 | 全部版本 | 开源版 | +| BytesIn | byte/s | Partition 的每秒消息流入字节数 | 全部版本 | 开源版 | +| BytesOut | byte/s | Partition 的每秒消息流出字节数 | 全部版本 | 开源版 | +| LogSize | byte | Partition 的大小 | 全部版本 | 开源版 | + +### 2.3.5、Group 指标 + +| 指标名称 | 指标单位 | 指标含义 | kafka 版本 | 企业/开源版指标 | +| ----------------- | -------- | -------------------------- | ---------- | --------------- | +| HealthScore | 分 | 健康分 | 全部版本 | 开源版 | +| HealthCheckPassed | 个 | 健康检查通过数 | 全部版本 | 开源版 | +| HealthCheckTotal | 个 | 健康检查总数 | 全部版本 | 开源版 | +| OffsetConsumed | 条 | Consumer 的 CommitedOffset | 全部版本 | 开源版 | +| LogEndOffset | 条 | Consumer 的 LogEndOffset | 全部版本 | 开源版 | +| Lag | 条 | Group 消费者的 Lag 数 | 全部版本 | 开源版 | +| State | 个 | Group 组的状态 | 全部版本 | 开源版 | diff --git a/docs/dev_guide/本地源码启动手册.md b/docs/dev_guide/本地源码启动手册.md new file mode 100644 index 00000000..1ee63a13 --- /dev/null +++ b/docs/dev_guide/本地源码启动手册.md @@ -0,0 +1,87 @@ +## 6.1、本地源码启动手册 + +### 6.1.1、打包方式 + +`Know Streaming` 采用前后端分离的开发模式,使用 Maven 对项目进行统一的构建管理。maven 在打包构建过程中,会将前后端代码一并打包生成最终的安装包。 + +`Know Streaming` 除了使用安装包启动之外,还可以通过本地源码启动完整的带前端页面的项目,下面我们正式开始介绍本地源码如何启动 `Know Streaming`。 + +### 6.1.2、环境要求 + +**系统支持** + +`windows7+`、`Linux`、`Mac` + +**环境依赖** + +- Maven 3.6.3 +- Node v12.20.0 +- Java 8+ +- MySQL 5.7 +- Idea +- Elasticsearch 7.6 + +### 6.1.3、环境初始化 + +安装好环境信息之后,还需要初始化 MySQL 与 Elasticsearch 信息,包括: + +- 初始化 MySQL 表及数据 +- 初始化 Elasticsearch 索引 + +具体见:[快速开始](./1-quick-start.md) 中的最后一步,部署 KnowStreaming 服务中的初始化相关工作。 + +### 6.1.4、本地启动 + +**第一步:本地打包** + +执行 `mvn install` 可对项目进行前后端同时进行打包,通过该命令,除了可以对后端进行打包之外,还可以将前端相关的静态资源文件也一并打包出来。 + +**第二步:修改配置** + +```yaml +# 修改 km-rest/src/main/resources/application.yml 中相关的配置 + +# 修改MySQL的配置,中间省略了一些非必需修改的配置 +spring: + datasource: + know-streaming: + jdbc-url: 修改为实际MYSQL地址 + username: 修改为实际MYSQL用户名 + password: 修改为实际MYSQL密码 + logi-job: + jdbc-url: 修改为实际MYSQL地址 + username: 修改为实际MYSQL用户名 + password: 修改为实际MYSQL密码 + logi-security: + jdbc-url: 修改为实际MYSQL地址 + username: 修改为实际MYSQL用户名 + password: 修改为实际MYSQL密码 + +# 修改ES的配置,中间省略了一些非必需修改的配置 +es.client.address: 修改为实际ES地址 +``` + +**第三步:配置 IDEA** + +`Know streaming`的 Main 方法在: + +```java +km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/KnowStreaming.java +``` + +IDEA 更多具体的配置如下图所示: + +![IDEA配置](./assets/startup_using_source_code/IDEA配置.jpg) + +**第四步:启动项目** + +最后就是启动项目,在本地 console 中输出了 `KnowStreaming-KM started` 则表示我们已经成功启动 `Know streaming` 了。 + +### 6.1.5、本地访问 + +`Know streaming` 启动之后,可以访问一些信息,包括: + +- 产品页面:http://localhost:8080 ,默认账号密码:`admin` / `admin2022_` 进行登录。 +- 接口地址:http://localhost:8080/swagger-ui.html 查看后端提供的相关接口。 + +更多信息,详见:[KnowStreaming 官网](http://116.85.24.211/) diff --git a/docs/dev_guide/登录系统对接.md b/docs/dev_guide/登录系统对接.md new file mode 100644 index 00000000..85046ac0 --- /dev/null +++ b/docs/dev_guide/登录系统对接.md @@ -0,0 +1,114 @@ + +![Logo](../assets/KnowStreamingLogo.png) + + + + + +## 登录系统对接 + +### 前言 + +KnowStreaming 除了实现基于本地MySQL的用户登录认证方式外,还实现了基于Ldap的登录认证。 + +但是,登录认证系统并非仅此两种,因此本文将介绍 KnowStreaming 如何对接自有的用户登录认证系统。 + +下面我们正式开始介绍登录系统的对接。 + +### 如何对接? + +- 实现Log-Common中的LoginService的三个接口即可; + +```Java +// LoginService三个方法 +public interface LoginService { + /** + * 验证登录信息,同时记住登录状态 + */ + UserBriefVO verifyLogin(AccountLoginDTO loginDTO, HttpServletRequest request, HttpServletResponse response) throws LogiSecurityException; + + /** + * 登出接口,清楚登录状态 + */ + Result logout(HttpServletRequest request, HttpServletResponse response); + + /** + * 检查是否已经登录 + */ + boolean interceptorCheck(HttpServletRequest request, HttpServletResponse response, + String requestMappingValue, + List whiteMappingValues) throws IOException; +} + +``` + +没错,登录就是如此的简单,仅仅只需要实现上述的三个接口即可。说了半天,具体如何做呢,能不能给个例子? + + +### 有没有例子? + +我们以Ldap对接为例,说明KnowStreaming如何对接登录认证系统。 + +```Java +// 继承 LoginService 接口 +public class LdapLoginServiceImpl implements LoginService { + private static final Logger LOGGER = LoggerFactory.getLogger(LdapLoginServiceImpl.class); + + // Ldap校验 + @Autowired + private LdapAuthentication ldapAuthentication; + + @Override + public UserBriefVO verifyLogin(AccountLoginDTO loginDTO, + HttpServletRequest request, + HttpServletResponse response) throws LogiSecurityException { + String decodePasswd = AESUtils.decrypt(loginDTO.getPw()); + + // 去LDAP验证账密 + LdapPrincipal ldapAttrsInfo = ldapAuthentication.authenticate(loginDTO.getUserName(), decodePasswd); + if (ldapAttrsInfo == null) { + // 用户不存在,正常来说上如果有问题,上一步会直接抛出异常 + throw new LogiSecurityException(ResultCode.USER_NOT_EXISTS); + } + + // 进行业务相关操作 + + // 记录登录状态,Ldap因为无法记录登录状态,因此有KnowStreaming进行记录 + initLoginContext(request, response, loginDTO.getUserName(), user.getId()); + return CopyBeanUtil.copy(user, UserBriefVO.class); + } + + @Override + public Result logout(HttpServletRequest request, HttpServletResponse response) { + request.getSession().invalidate(); + response.setStatus(REDIRECT_CODE); + return Result.buildSucc(Boolean.TRUE); + } + + @Override + public boolean interceptorCheck(HttpServletRequest request, HttpServletResponse response, String requestMappingValue, List whiteMappingValues) throws IOException { + // 其他处理 + + // 检查是否已经登录 + String userName = HttpRequestUtil.getOperator(request); + if (StringUtils.isEmpty(userName)) { + // 未登录,则进行登出 + logout(request, response); + return Boolean.FALSE; + } + + // 其他业务处理 + + return Boolean.TRUE; + } +} + +``` + + +### 背后原理是? + +- KnowStreaming 会拦截所有的接口请求; +- 拦截到请求之后,如果是登录的请求,则调用LoginService.verifyLogin(); +- 拦截到请求之后,如果是登出的请求,则调用LoginService.logout(); +- 拦截到请求之后,如果是其他请求,则调用LoginService.interceptorCheck(); \ No newline at end of file diff --git a/docs/dev_guide/解决连接JMX失败.md b/docs/dev_guide/解决连接JMX失败.md new file mode 100644 index 00000000..f66a5ab0 --- /dev/null +++ b/docs/dev_guide/解决连接JMX失败.md @@ -0,0 +1,101 @@ + +![Logo](../assets/KnowStreamingLogo.png) + + +## JMX-连接失败问题解决 + +- [JMX-连接失败问题解决](#jmx-连接失败问题解决) + - [1、问题&说明](#1问题说明) + - [2、解决方法](#2解决方法) + - [3、解决方法 —— 认证的JMX](#3解决方法--认证的jmx) + +集群正常接入Logi-KafkaManager之后,即可以看到集群的Broker列表,此时如果查看不了Topic的实时流量,或者是Broker的实时流量信息时,那么大概率就是JMX连接的问题了。 + +下面我们按照步骤来一步一步的检查。 + +### 1、问题&说明 + +**类型一:JMX配置未开启** + +未开启时,直接到`2、解决方法`查看如何开启即可。 + +![check_jmx_opened](./assets/connect_jmx_failed/check_jmx_opened.jpg) + + +**类型二:配置错误** + +`JMX`端口已经开启的情况下,有的时候开启的配置不正确,此时也会导致出现连接失败的问题。这里大概列举几种原因: + +- `JMX`配置错误:见`2、解决方法`。 +- 存在防火墙或者网络限制:网络通的另外一台机器`telnet`试一下看是否可以连接上。 +- 需要进行用户名及密码的认证:见`3、解决方法 —— 认证的JMX`。 + + +错误日志例子: +``` +# 错误一: 错误提示的是真实的IP,这样的话基本就是JMX配置的有问题了。 +2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:192.168.0.1 port:9999. +java.rmi.ConnectException: Connection refused to host: 192.168.0.1; nested exception is: + + +# 错误二:错误提示的是127.0.0.1这个IP,这个是机器的hostname配置的可能有问题。 +2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:127.0.0.1 port:9999. +java.rmi.ConnectException: Connection refused to host: 127.0.0.1;; nested exception is: +``` + +### 2、解决方法 + +这里仅介绍一下比较通用的解决方式,如若有更好的方式,欢迎大家指导告知一下。 + +修改`kafka-server-start.sh`文件: +``` +# 在这个下面增加JMX端口的配置 +if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then + export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" + export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999 +fi +``` + +  + +修改`kafka-run-class.sh`文件 +``` +# JMX settings +if [ -z "$KAFKA_JMX_OPTS" ]; then + KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=${当前机器的IP}" +fi + +# JMX port to use +if [ $JMX_PORT ]; then + KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT" +fi +``` + + +### 3、解决方法 —— 认证的JMX + +如果您是直接看的这个部分,建议先看一下上一节:`2、解决方法`以确保`JMX`的配置没有问题了。 + +在JMX的配置等都没有问题的情况下,如果是因为认证的原因导致连接不了的,此时可以使用下面介绍的方法进行解决。 + +**当前这块后端刚刚开发完成,可能还不够完善,有问题随时沟通。** + +`Logi-KafkaManager 2.2.0+`之后的版本后端已经支持`JMX`认证方式的连接,但是还没有界面,此时我们可以往`cluster`表的`jmx_properties`字段写入`JMX`的认证信息。 + +这个数据是`json`格式的字符串,例子如下所示: + +```json +{ + "maxConn": 10, # KM对单台Broker的最大JMX连接数 + "username": "xxxxx", # 用户名 + "password": "xxxx", # 密码 + "openSSL": true, # 开启SSL, true表示开启ssl, false表示关闭 +} +``` + +  + +SQL的例子: +```sql +UPDATE cluster SET jmx_properties='{ "maxConn": 10, "username": "xxxxx", "password": "xxxx", "openSSL": false }' where id={xxx}; +``` \ No newline at end of file diff --git a/docs/install_guide/config_description.md b/docs/install_guide/config_description.md deleted file mode 100644 index 04335e29..00000000 --- a/docs/install_guide/config_description.md +++ /dev/null @@ -1,107 +0,0 @@ - ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - -# 配置说明 - -```yaml -server: - port: 8080 # 服务端口 - tomcat: - accept-count: 1000 - max-connections: 10000 - max-threads: 800 - min-spare-threads: 100 - -spring: - application: - name: kafkamanager - datasource: - kafka-manager: # 数据库连接配置 - jdbc-url: jdbc:mysql://127.0.0.1:3306/kafka_manager?characterEncoding=UTF-8&serverTimezone=GMT%2B8 #数据库的地址 - username: admin # 用户名 - password: admin # 密码 - driver-class-name: com.mysql.jdbc.Driver - main: - allow-bean-definition-overriding: true - - profiles: - active: dev # 启用的配置 - servlet: - multipart: - max-file-size: 100MB - max-request-size: 100MB - -logging: - config: classpath:logback-spring.xml - -custom: - idc: cn # 部署的数据中心, 忽略该配置, 后续会进行删除 - jmx: - max-conn: 10 # 和单台 broker 的最大JMX连接数 - store-metrics-task: - community: - broker-metrics-enabled: true # 社区部分broker metrics信息收集开关, 关闭之后metrics信息将不会进行收集及写DB - topic-metrics-enabled: true # 社区部分topic的metrics信息收集开关, 关闭之后metrics信息将不会进行收集及写DB - didi: - app-topic-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭 - topic-request-time-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭 - topic-throttled-metrics-enabled: false # 滴滴埋入的指标, 社区AK不存在该指标,因此默认关闭 - save-days: 7 #指标在DB中保持的天数,-1表示永久保存,7表示保存近7天的数据 - -# 任务相关的开关 -task: - op: - sync-topic-enabled: false # 未落盘的Topic定期同步到DB中 - order-auto-exec: # 工单自动化审批线程的开关 - topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启 - app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启 - -account: # ldap相关的配置, 社区版本暂时支持不够完善,可以先忽略,欢迎贡献代码对这块做优化 - ldap: - -kcm: # 集群升级部署相关的功能,需要配合夜莺及S3进行使用,这块我们后续专门补充一个文档细化一下,牵扯到kcm_script.sh脚本的修改 - enabled: false # 默认关闭 - storage: - base-url: http://127.0.0.1 # 存储地址 - n9e: - base-url: http://127.0.0.1:8004 # 夜莺任务中心的地址 - user-token: 12345678 # 夜莺用户的token - timeout: 300 # 集群任务的超时时间,单位秒 - account: root # 集群任务使用的账号 - script-file: kcm_script.sh # 集群任务的脚本 - -monitor: # 监控告警相关的功能,需要配合夜莺进行使用 - enabled: false # 默认关闭,true就是开启 - n9e: - nid: 2 - user-token: 1234567890 - mon: - # 夜莺 mon监控服务 地址 - base-url: http://127.0.0.1:8032 - sink: - # 夜莺 transfer上传服务 地址 - base-url: http://127.0.0.1:8006 - rdb: - # 夜莺 rdb资源服务 地址 - base-url: http://127.0.0.1:80 - -# enabled: 表示是否开启监控告警的功能, true: 开启, false: 不开启 -# n9e.nid: 夜莺的节点ID -# n9e.user-token: 用户的密钥,在夜莺的个人设置中 -# n9e.mon.base-url: 监控地址 -# n9e.sink.base-url: 数据上报地址 -# n9e.rdb.base-url: 用户资源中心地址 - -notify: # 通知的功能 - kafka: # 默认通知发送到kafka的指定Topic中 - cluster-id: 95 # Topic的集群ID - topic-name: didi-kafka-notify # Topic名称 - order: # 部署的KM的地址 - detail-url: http://127.0.0.1 -``` diff --git a/docs/install_guide/install_guide_cn.md b/docs/install_guide/install_guide_cn.md deleted file mode 100644 index 0130bd55..00000000 --- a/docs/install_guide/install_guide_cn.md +++ /dev/null @@ -1,93 +0,0 @@ - ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - -# 安装手册 - -## 1、环境依赖 - -如果是以Release包进行安装的,则仅安装`Java`及`MySQL`即可。如果是要先进行源码包进行打包,然后再使用,则需要安装`Maven`及`Node`环境。 - -- `Java 8+`(运行环境需要) -- `MySQL 5.7`(数据存储) -- `Maven 3.5+`(后端打包依赖) -- `Node 10+`(前端打包依赖) - ---- - -## 2、获取安装包 - -**1、Release直接下载** - -这里如果觉得麻烦,然后也不想进行二次开发,则可以直接下载Release包,下载地址:[Github Release包下载地址](https://github.com/didi/Logi-KafkaManager/releases) - -如果觉得Github的下载地址太慢了,也可以进入`Logi-KafkaManager`的用户群获取,群地址在README中。 - - -**2、源代码进行打包** - -下载好代码之后,进入`Logi-KafkaManager`的主目录,执行`mvn -Prelease-kafka-manager -Dmaven.test.skip=true clean install -U `命令即可, -执行完成之后会在`distribution/target`目录下面生成一个`kafka-manager-*.tar.gz`。 -和一个`kafka-manager-*.zip` 文件,随便任意一个压缩包都可以; -当然此时同级目录有一个已经解压好的文件夹; - - - ---- - -## 3. 解压安装包 -解压完成后; 在文件目录中可以看到有`kafka-manager/conf/create_mysql_table.sql` 有个mysql初始化文件 -先初始化DB - - -## 4、MySQL-DB初始化 - -执行[create_mysql_table.sql](../../distribution/conf/create_mysql_table.sql)中的SQL命令,从而创建所需的MySQL库及表,默认创建的库名是`logi_kafka_manager`。 - -``` -# 示例: -mysql -uXXXX -pXXX -h XXX.XXX.XXX.XXX -PXXXX < ./create_mysql_table.sql -``` - ---- - -## 5.修该配置 -请将`conf/application.yml.example` 文件复制一份出来命名为`application.yml` 放在同级目录:conf/application.yml ; -并且修改配置; 当然不修改的话 就会用默认的配置; -至少 mysql配置成自己的吧 - - -## 6、启动/关闭 -解压包中有启动和关闭脚本 -`kafka-manager/bin/shutdown.sh` -`kafka-manager/bin/startup.sh` - -执行 sh startup.sh 启动 -执行 sh shutdown.sh 关闭 - - - -### 6、使用 - -本地启动的话,访问`http://localhost:8080`,输入帐号及密码(默认`admin/admin`)进行登录。更多参考:[kafka-manager 用户使用手册](../user_guide/user_guide_cn.md) - -### 7. 升级 - -如果是升级版本,请查看文件 [kafka-manager 升级手册](../../distribution/upgrade_config.md) - 在您下载的启动包(V2.5及其后)中也有记录,在 kafka-manager/upgrade_config.md 中 - - -### 8. 在IDE中启动 -> 如果想参与开发或者想在IDE中启动的话 -> 先执行 `mvn -Dmaven.test.skip=true clean install -U ` -> -> 然后这个时候可以选择去 [pom.xml](../../pom.xml) 中将`kafka-manager-console`模块注释掉; -> 注释是因为每次install的时候都会把前端文件`kafka-manager-console`重新打包进`kafka-manager-web` -> -> 完事之后,只需要直接用IDE启动运行`kafka-manager-web`模块中的 -> com.xiaojukeji.kafka.manager.web.MainApplication main方法就行了 \ No newline at end of file diff --git a/docs/install_guide/install_guide_docker_cn.md b/docs/install_guide/install_guide_docker_cn.md deleted file mode 100644 index 85617867..00000000 --- a/docs/install_guide/install_guide_docker_cn.md +++ /dev/null @@ -1,132 +0,0 @@ ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - - -## 基于Docker部署Logikm - -为了方便用户快速的在自己的环境搭建Logikm,可使用docker快速搭建 - -### 部署Mysql - -```shell -docker run --name mysql -p 3306:3306 -d registry.cn-hangzhou.aliyuncs.com/zqqq/logikm-mysql:5.7.37 -``` - -可选变量参考[文档](https://hub.docker.com/_/mysql) - -默认参数 - -* MYSQL_ROOT_PASSWORD:root - - - -### 部署Logikm Allinone - -> 前后端部署在一起 - -```shell -docker run --name logikm -p 8080:8080 --link mysql -d registry.cn-hangzhou.aliyuncs.com/zqqq/logikm:2.6.0 -``` - -参数详解: - -* -p 映射容器8080端口至宿主机的8080 -* --link 连接mysql容器 - - - -### 部署前后端分离 - -#### 部署后端 Logikm-backend - -```shell -docker run --name logikm-backend --link mysql -d registry.cn-hangzhou.aliyuncs.com/zqqq/logikm-backend:2.6.0 -``` - -可选参数: - -* -e LOGI_MYSQL_HOST mysql连接地址,默认mysql -* -e LOGI_MYSQL_PORT mysql端口,默认3306 -* -e LOGI_MYSQL_DATABASE 数据库,默认logi_kafka_manager -* -e LOGI_MYSQL_USER mysql用户名,默认root -* -e LOGI_MYSQL_PASSWORD mysql密码,默认root - -#### 部署前端 Logikm-front - -```shell -docker run --name logikm-front -p 8088:80 --link logikm-backend -d registry.cn-hangzhou.aliyuncs.com/zqqq/logikm-front:2.6.0 -``` - - - -### Logi后端可配置参数 - -docker run 运行参数 -e 可指定环境变量如下 - -| 环境变量 | 变量解释 | 默认值 | -| ------------------- | ------------- | ------------------ | -| LOGI_MYSQL_HOST | mysql连接地址 | mysql | -| LOGI_MYSQL_PORT | mysql端口 | 3306 | -| LOGI_MYSQL_DATABASE | 数据库 | logi_kafka_manager | -| LOGI_MYSQL_USER | mysql用户名 | root | -| LOGI_MYSQL_PASSWORD | mysql密码 | root | - - - - -## 基于Docker源码构建 - -根据此文档用户可自行通过Docker 源码构建 Logikm - -### 构建Mysql - -```shell -docker build -t mysql:{TAG} -f container/dockerfiles/mysql/Dockerfile container/dockerfiles/mysql -``` - -### 构建Allinone - -将前后端打包在一起 - -```shell -docker build -t logikm:{TAG} . -``` - -可选参数 --build-arg : - -* MAVEN_VERSION maven镜像tag -* JAVA_VERSION java镜像tag - - - -### 构建前后端分离 - -前后端分离打包 - -#### 构建后端 - -```shell -docker build --build-arg CONSOLE_ENABLE=false -t logikm-backend:{TAG} . -``` - -参数: - -* MAVEN_VERSION maven镜像tag -* JAVA_VERSION java镜像tag - -* CONSOLE_ENABLE=false 不构建console模块 - -#### 构建前端 - -```shell -docker build -t logikm-front:{TAG} -f kafka-manager-console/Dockerfile kafka-manager-console -``` - -可选参数: - -* --build-arg:OUTPUT_PATH 修改默认打包输出路径,默认当前目录下的dist \ No newline at end of file diff --git a/docs/install_guide/install_guide_nginx_cn.md b/docs/install_guide/install_guide_nginx_cn.md deleted file mode 100644 index ad55f947..00000000 --- a/docs/install_guide/install_guide_nginx_cn.md +++ /dev/null @@ -1,94 +0,0 @@ ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - -## nginx配置-安装手册 - -# 一、独立部署 - -请参考参考:[kafka-manager 安装手册](install_guide_cn.md) - -# 二、nginx配置 - -## 1、独立部署配置 - -``` - #nginx 根目录访问配置如下 - location / { - proxy_pass http://ip:port; - } -``` - -## 2、前后端分离&配置多个静态资源 - -以下配置解决`nginx代理多个静态资源`,实现项目前后端分离,版本更新迭代。 - -### 1、源码下载 - -根据所需版本下载对应代码,下载地址:[Github 下载地址](https://github.com/didi/Logi-KafkaManager) - -### 2、修改webpack.config.js 配置文件 - -修改`kafka-manager-console`模块 `webpack.config.js` -以下所有xxxx为nginx代理路径和打包静态文件加载前缀,xxxx可根据需求自行更改。 - -``` - cd kafka-manager-console - vi webpack.config.js - - # publicPath默认打包方式根目录下,修改为nginx代理访问路径。 - let publicPath = '/xxxx'; -``` - -### 3、打包 - -``` - - npm cache clean --force && npm install - -``` - -ps:如果打包过程中报错,运行`npm install clipboard@2.0.6`,相反请忽略! - -### 4、部署 - -#### 1、前段静态文件部署 - -静态资源 `../kafka-manager-web/src/main/resources/templates` - -上传到指定目录,目前以`root目录`做demo - -#### 2、上传jar包并启动,请参考:[kafka-manager 安装手册](install_guide_cn.md) - -#### 3、修改nginx 配置 - -``` - location /xxxx { - # 静态文件存放位置 - alias /root/templates; - try_files $uri $uri/ /xxxx/index.html; - index index.html; - } - - location /api { - proxy_pass http://ip:port; - } - #后代端口建议使用/api,如果冲突可以使用以下配置 - #location /api/v2 { - # proxy_pass http://ip:port; - #} - #location /api/v1 { - # proxy_pass http://ip:port; - #} -``` - - - - - - - diff --git a/docs/install_guide/单机部署手册.md b/docs/install_guide/单机部署手册.md new file mode 100644 index 00000000..40c9b26e --- /dev/null +++ b/docs/install_guide/单机部署手册.md @@ -0,0 +1,237 @@ +## 前言 + +- 本文以 Centos7 系统为例,系统基础配置要求:4 核 8G +- 按照本文可以快速部署一套单机模式的 KnowStreaming 环境 +- 本文以 v3.0.0-bete 版本为例进行部署,如需其他版本请关注[官网](https://knowstreaming.com/) +- 部署完成后可以通过浏览器输入 IP:PORT 进行访问,默认用户名密码: admin/admin2022\_ +- KnowStreaming 同样支持分布式集群模式,如需部署高可用集群,[请联系我们](https://knowstreaming.com/support-center) + +## 1.1、软件版本及依赖 + +| 软件名 | 版本要求 | 默认端口 | +| ------------- | -------- | -------- | +| Mysql | v5.7+ | 3306 | +| Elasticsearch | v6+ | 8060 | +| JDK | v8+ | - | +| Centos | v6+ | - | +| Ubantu | v16+ | - | + +## 1.2、部署方式选择 + +- Shell 部署(单机版本) + +- 容器化部署(需准备 K8S 环境) + +- 根据操作手册进行手动部署 + +## 1.3、Shell 部署 + +### 1.3.1、在线方式安装 + + #在服务器中下载安装脚本,脚本中会重新安装Mysql + wget https://s3-gzpu.didistatic.com/pub/knowstreaming/deploy_KnowStreaming.sh + + #执行脚本 + sh deploy_KnowStreaming.sh + + #访问测试 + 127.0.0.1:8080 + +### 1.3.2、离线方式安装 + + #将安装包下载到本地且传输到目标服务器 + wget https://s3-gzpu.didistatic.com/pub/knowstreaming/KnowStreaming-3.0.0-beta—offline.tar.gz + + #解压安装包 + tar -zxf KnowStreaming-3.0.0-beta—offline.tar.gz + + #执行安装脚本 + sh deploy_KnowStreaming-offline.sh + + #访问测试 + 127.0.0.1:8080 + +## 1.4、容器化部署 + +### 1.4.1、环境依赖及版本要求 + +- Kubernetes >= 1.14 ,Helm >= 2.17.0 + +- 默认配置为全部安装(elasticsearch + mysql + knowstreaming) + +- 如果使用已有的 elasticsearch(7.6.x) 和 mysql(5.7) 只需调整 values.yaml 部分参数即可 + +### 1.4.2、安装方式 + + #下载安装包 + wget https://s3-gzpu.didistatic.com/pub/knowstreaming/knowstreaming-3.0.0-hlem.tgz + + #解压安装包 + tar -zxf knowstreaming-3.0.0-hlem.tgz + + #执行命令(NAMESPACE需要更改为已存在的) + helm install -n [NAMESPACE] knowstreaming knowstreaming-manager/ + + #获取KnowStreaming前端ui的service. 默认nodeport方式.(http://nodeIP:nodeport,默认用户名密码:admin/admin2022_) + +## 1.5、手动部署 + +### 1.5.1、部署流程 + +基础依赖服务部署 ——> KnowStreaming 模块 + +### 1.5.2、基础依赖服务部署 + +#### 如现有环境中已经有相关服务,可跳过对其的安装 + +#### 基础依赖:JAVA11、Mysql、Elasticsearch + +#### 1.5.2.1、安装 Mysql 服务 + +##### 1.5.2.1.1 yum 方式安装 + + #配置yum源 + wget https://dev.mysql.com/get/mysql57-community-release-el7-9.noarch.rpm + rpm -ivh mysql57-community-release-el7-9.noarch.rpm + + #执行安装 + yum -y install mysql-server mysql-client + + #服务启动 + systemctl start mysqld + + #获取初始密码并修改 + old_pass=`grep 'temporary password' /var/log/mysqld.log | awk '{print $NF}' | tail -n 1` + + mysql -NBe "alter user USER() identified by 'Didi_km_678';" --connect-expired-password -uroot -p$old_pass + +##### 1.5.2.1.2、rpm 包方式安装 + + #下载安装包 + wget https://s3-gzpu.didistatic.com/knowsearch/mysql5.7.tar.gz + + #解压到指定目录 + tar -zxf mysql5.7.tar.gz -C /tmp/ + + #执行安装 + yum -y localinstall /tmp/libaio-*.rpm /tmp/mysql-*.rpm + + #服务启动 + systemctl start mysqld + + + #获取初始密码并修改 + old_pass=`grep 'temporary password' /var/log/mysqld.log | awk '{print $NF}' | tail -n 1` + + mysql -NBe "alter user USER() identified by 'Didi_km_678';" --connect-expired-pa + ssword -uroot -p$old_pass + +#### 1.5.2.2、配置 JAVA 环境 + +#下载安装包 +wget https://s3-gzpu.didistatic.com/pub/jdk11.tar.gz #解压到指定目录 +tar -zxf jdk11.tar.gz -C /usr/local/ #更改目录名 +mv /usr/local/jdk-11.0.2 /usr/local/java11 #添加到环境变量 +echo "export JAVA_HOME=/usr/local/java11" >> ~/.bashrc +echo "export CLASSPATH=/usr/java/java11/lib" >> ~/.bashrc +echo "export PATH=\$JAVA_HOME/bin:\$PATH:\$HOME/bin" >> ~/.bashrc +source ~/.bashrc + +#### 1.5.2.3、Elasticsearch 实例搭建 + +#### Elasticsearch 元数据集群来支持平台核心指标数据的存储,如集群维度指标、节点维度指标等 + +#### 以下安装示例为单节点模式,如需集群部署可以参考[Elasticsearch 官方文档](https://www.elastic.co/guide/en/elasticsearch/reference/7.6/elasticsearch-intro.html) + + #下载安装包 + wget https://s3-gzpu.didistatic.com/pub/elasticsearch.tar.gz + + #创建ES数据存储目录 + mkdir -p /data/es_data + + #创建ES所属用户 + useradd arius + + #配置用户的打开文件数 + echo "arius soft nofile 655350" >>/etc/security/limits.conf + echo "arius hard nofile 655350" >>/etc/security/limits.conf + echo "vm.max_map_count = 655360" >>/etc/sysctl.conf + sysctl -p + + #解压安装包 + tar -zxf elasticsearch.tar.gz -C /data/ + + #更改目录所属组 + chown -R arius:arius /data/ + + #修改配置文件(参考以下配置) + vim /data/elasticsearch/config/elasticsearch.yml + cluster.name: km_es + node.name: es-node1 + node.master: true + node.data: true + path.data: /data/es_data + http.port: 8060 + discovery.seed_hosts: ["127.0.0.1:9300"] + + #修改内存配置 + vim /data/elasticsearch/config/jvm.options + -Xms2g + -Xmx2g + + #启动服务 + su - arius + export JAVA_HOME=/usr/local/java11 + sh /data/elasticsearch/control.sh start + + #确认状态 + sh /data/elasticsearch/control.sh status + +### 1.5.3、KnowStreaming 服务部署 + +#### 以 KnowStreaming 为例 + + #下载安装包 + wget wget https://s3-gzpu.didistatic.com/pub/knowstreaming/KnowStreaming-3.0.0-beta.tar.gz + + #解压安装包到指定目录 + tar -zxf KnowStreaming-3.0.0-beta.tar.gz -C /data/ + + #修改启动脚本并加入systemd管理 + cd /data/KnowStreaming/ + + #创建相应的库和导入初始化数据 + mysql -uroot -pDidi_km_678 -e "create database know_streaming;" + mysql -uroot -pDidi_km_678 know_streaming < ./init/sql/ddl-ks-km.sql + mysql -uroot -pDidi_km_678 know_streaming < ./init/sql/ddl-logi-job.sql + mysql -uroot -pDidi_km_678 know_streaming < ./init/sql/ddl-logi-security.sql + mysql -uroot -pDidi_km_678 know_streaming < ./init/sql/dml-ks-km.sql + mysql -uroot -pDidi_km_678 know_streaming < ./init/sql/dml-logi.sql + + #创建elasticsearch初始化数据 + sh ./init/template/template.sh + + #修改配置文件 + vim ./conf/application.yml + + #监听端口 + server: + +port: 8080 # web 服务端口 +tomcat: +accept-count: 1000 +max-connections: 10000 + + #elasticsearch地址 + es.client.address: 127.0.0.1:8060 + + #数据库配置(一共三处地方,修改正确的mysql地址和数据库名称以及用户名密码) + jdbc-url: jdbc:mariadb://127.0.0.1:3306/know-streaming?..... + username: root + password: Didi_km_678 + + #启动服务 + cd /data/KnowStreaming/bin/ + sh startup.sh + +#### 打开浏览器输入 IP 地址+端口测试(默认端口 8080),用户名密码: admin/admin2022\_ diff --git a/docs/install_guide/源码编译打包手册.md b/docs/install_guide/源码编译打包手册.md new file mode 100644 index 00000000..625ce3a7 --- /dev/null +++ b/docs/install_guide/源码编译打包手册.md @@ -0,0 +1,70 @@ + +![Logo](../assets/KnowStreamingLogo.png) + + +# `Know Streaming` 源码编译打包手册 + +## 1、环境信息 + +**系统支持** + +`windows7+`、`Linux`、`Mac` + +**环境依赖** + +- Maven 3.6.3 (后端) +- Node v12.20.0/v14.17.3 (前端) +- Java 8+ (后端) + +## 2、编译打包 + +整个工程中,除了`km-console`为前端模块之外,其他模块都是后端工程相关模块。 + +因此,如果前后端合并打包,则打对整个工程进行打包;如果前端单独打包,则仅打包 `km-console` 中的代码;如果是仅需要后端打包,则在顶层 `pom.xml` 中去掉 `km-console`模块,然后进行打包。 + +具体见下面描述。 + + + +### 2.1、前后端合并打包 + +1. 下载源码; +2. 进入 `KS-KM` 工程目录,执行 `mvn -Prelease-package -Dmaven.test.skip=true clean install -U` 命令; +3. 打包命令执行完成后,会在 `km-dist/target` 目录下面生成一个 `KnowStreaming-*.tar.gz` 的安装包。 + + +### 2.2、前端单独打包 + +1. 下载源码; +2. 进入 `KS-KM/km-console` 工程目录; +3. 执行 `npm run build`命令,会在 `KS-KM/km-console` 目录下生成一个名为 `pub` 的前端静态资源包; + + + +### 2.3、后端单独打包 + +1. 下载源码; +2. 修改顶层 `pom.xml` ,去掉其中的 `km-console` 模块,如下所示; +```xml + + + km-common + km-persistence + km-core + km-biz + km-extends/km-account + km-extends/km-monitor + km-extends/km-license + km-extends/km-rebalance + km-task + km-collector + km-rest + km-dist + + ``` +3. 执行 `mvn -U clean package -Dmaven.test.skip=true`命令; +4. 执行完成之后会在 `KS-KM/km-rest/target` 目录下面生成一个 `ks-km.jar` 即为KS的后端部署的Jar包,也可以执行 `mvn -Prelease-package -Dmaven.test.skip=true clean install -U` 生成的tar包也仅有后端服务的功能; + + + + diff --git a/docs/install_guide/版本升级手册.md b/docs/install_guide/版本升级手册.md new file mode 100644 index 00000000..be117b6f --- /dev/null +++ b/docs/install_guide/版本升级手册.md @@ -0,0 +1,37 @@ +## 版本升级说明 + +### `2.x`版本 升级至 `3.0.0`版本 + +**升级步骤:** + +1. 依旧使用**`2.x 版本的 DB`**,在上面初始化 3.0.0 版本所需数据库表结构及数据; +2. 将 2.x 版本中的集群,在 3.0.0 版本,手动逐一接入; +3. 将 Topic 业务数据,迁移至 3.0.0 表中,详见下方 SQL; + +**注意事项** + +- 建议升级 3.0.0 版本过程中,保留 2.x 版本的使用,待 3.0.0 版本稳定使用后,再下线 2.x 版本; +- 3.0.0 版本仅需要`集群信息`及`Topic的描述信息`。2.x 版本的 DB 的其他数据 3.0.0 版本都不需要; +- 部署 3.0.0 版本之后,集群、Topic 等指标数据都为空,3.0.0 版本会周期进行采集,运行一段时间之后就会有该数据了,因此不会将 2.x 中的指标数据进行迁移; + +**迁移数据** + +```sql +-- 迁移Topic的备注信息。 +-- 需在 3.0.0 部署完成后,再执行该SQL。 +-- 考虑到 2.x 版本中还存在增量数据,因此建议改SQL周期执行,是的增量数据也能被迁移至 3.0.0 版本中。 + +UPDATE ks_km_topic + INNER JOIN + (SELECT + topic.cluster_id AS cluster_id, + topic.topic_name AS topic_name, + topic.description AS description + FROM topic WHERE description != '' + ) AS t + + ON ks_km_topic.cluster_phy_id = t.cluster_id + AND ks_km_topic.topic_name = t.topic_name + AND ks_km_topic.id > 0 +SET ks_km_topic.description = t.description; +``` diff --git a/docs/user_guide/add_cluster/add_cluster.md b/docs/user_guide/add_cluster/add_cluster.md deleted file mode 100644 index 1774a9be..00000000 --- a/docs/user_guide/add_cluster/add_cluster.md +++ /dev/null @@ -1,49 +0,0 @@ - ---- - -![kafka-manager-logo](../../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - - ---- - -# 集群接入 - -## 主要概念讲解 -面对大规模集群、业务场景复杂的情况,引入Region、逻辑集群的概念 -- Region:划分部分Broker作为一个 Region,用Region定义资源划分的单位,提高扩展性和隔离性。如果部分Topic异常也不会影响大面积的Broker -- 逻辑集群:逻辑集群由部分Region组成,便于对大规模集群按照业务划分、保障能力进行管理 -![op_cluster_arch](assets/op_cluster_arch.png) - -集群的接入总共需要三个步骤,分别是: -1. 接入物理集群:填写机器地址、安全协议等配置信息,接入真实的物理集群 -2. 创建Region:将部分Broker划分为一个Region -3. 创建逻辑集群:逻辑集群由部分Region组成,可根据业务划分、保障等级来创建相应的逻辑集群 - -![op_cluster_flow](assets/op_cluster_flow.png) - - -**备注:接入集群需要2、3两步是因为普通用户的视角下,看到的都是逻辑集群,如果没有2、3两步,那么普通用户看不到任何信息。** - - -## 1、接入物理集群 - -![op_add_cluster](assets/op_add_cluster.jpg) - -如上图所示,填写集群信息,然后点击确定,即可完成集群的接入。因为考虑到分布式部署,添加集群之后,需要稍等**`1分钟`**才可以在界面上看到集群的详细信息。 - -## 2、创建Region - -![op_add_region](assets/op_add_region.jpg) - -如上图所示,填写Region信息,然后点击确定,即可完成Region的创建。 - -备注:Region即为Broker的集合,可以按照业务需要,将Broker归类,从而创建相应的Region。 - -## 3、创建逻辑集群 - -![op_add_logical_cluster](assets/op_add_logical_cluster.jpg) - - -如上图所示,填写逻辑集群信息,然后点击确定,即可完成逻辑集群的创建。 diff --git a/docs/user_guide/add_cluster/assets/op_add_cluster.jpg b/docs/user_guide/add_cluster/assets/op_add_cluster.jpg deleted file mode 100644 index 98d239ae..00000000 Binary files a/docs/user_guide/add_cluster/assets/op_add_cluster.jpg and /dev/null differ diff --git a/docs/user_guide/add_cluster/assets/op_add_logical_cluster.jpg b/docs/user_guide/add_cluster/assets/op_add_logical_cluster.jpg deleted file mode 100644 index 8650fa94..00000000 Binary files a/docs/user_guide/add_cluster/assets/op_add_logical_cluster.jpg and /dev/null differ diff --git a/docs/user_guide/add_cluster/assets/op_add_region.jpg b/docs/user_guide/add_cluster/assets/op_add_region.jpg deleted file mode 100644 index fe2669a6..00000000 Binary files a/docs/user_guide/add_cluster/assets/op_add_region.jpg and /dev/null differ diff --git a/docs/user_guide/add_cluster/assets/op_cluster_arch.png b/docs/user_guide/add_cluster/assets/op_cluster_arch.png deleted file mode 100644 index aa972d9e..00000000 Binary files a/docs/user_guide/add_cluster/assets/op_cluster_arch.png and /dev/null differ diff --git a/docs/user_guide/add_cluster/assets/op_cluster_flow.png b/docs/user_guide/add_cluster/assets/op_cluster_flow.png deleted file mode 100644 index 283f2676..00000000 Binary files a/docs/user_guide/add_cluster/assets/op_cluster_flow.png and /dev/null differ diff --git a/docs/user_guide/assets/LeaderRebalance.png b/docs/user_guide/assets/LeaderRebalance.png deleted file mode 100644 index 66bcfa1e..00000000 Binary files a/docs/user_guide/assets/LeaderRebalance.png and /dev/null differ diff --git a/docs/user_guide/assets/Versionmanagement.png b/docs/user_guide/assets/Versionmanagement.png deleted file mode 100644 index 865ebd1b..00000000 Binary files a/docs/user_guide/assets/Versionmanagement.png and /dev/null differ diff --git a/docs/user_guide/assets/alarmhistory.png b/docs/user_guide/assets/alarmhistory.png deleted file mode 100644 index c73216d0..00000000 Binary files a/docs/user_guide/assets/alarmhistory.png and /dev/null differ diff --git a/docs/user_guide/assets/alarmruledetail.png b/docs/user_guide/assets/alarmruledetail.png deleted file mode 100644 index 2a908ef6..00000000 Binary files a/docs/user_guide/assets/alarmruledetail.png and /dev/null differ diff --git a/docs/user_guide/assets/alarmruleex.png b/docs/user_guide/assets/alarmruleex.png deleted file mode 100644 index a3d80742..00000000 Binary files a/docs/user_guide/assets/alarmruleex.png and /dev/null differ diff --git a/docs/user_guide/assets/alarmruleforbidden.png b/docs/user_guide/assets/alarmruleforbidden.png deleted file mode 100644 index 59c65780..00000000 Binary files a/docs/user_guide/assets/alarmruleforbidden.png and /dev/null differ diff --git a/docs/user_guide/assets/alarmruleforbiddenhistory.png b/docs/user_guide/assets/alarmruleforbiddenhistory.png deleted file mode 100644 index 1e7bab38..00000000 Binary files a/docs/user_guide/assets/alarmruleforbiddenhistory.png and /dev/null differ diff --git a/docs/user_guide/assets/alarmrulesent.png b/docs/user_guide/assets/alarmrulesent.png deleted file mode 100644 index 565d5e54..00000000 Binary files a/docs/user_guide/assets/alarmrulesent.png and /dev/null differ diff --git a/docs/user_guide/assets/alarmruletime.png b/docs/user_guide/assets/alarmruletime.png deleted file mode 100644 index 3a1f79bf..00000000 Binary files a/docs/user_guide/assets/alarmruletime.png and /dev/null differ diff --git a/docs/user_guide/assets/appdetailop.png b/docs/user_guide/assets/appdetailop.png deleted file mode 100644 index 9042b6dd..00000000 Binary files a/docs/user_guide/assets/appdetailop.png and /dev/null differ diff --git a/docs/user_guide/assets/applyapp.png b/docs/user_guide/assets/applyapp.png deleted file mode 100644 index 0e3d2f03..00000000 Binary files a/docs/user_guide/assets/applyapp.png and /dev/null differ diff --git a/docs/user_guide/assets/applycluster.png b/docs/user_guide/assets/applycluster.png deleted file mode 100644 index e06c58fa..00000000 Binary files a/docs/user_guide/assets/applycluster.png and /dev/null differ diff --git a/docs/user_guide/assets/applylocated.png b/docs/user_guide/assets/applylocated.png deleted file mode 100644 index 88b7aefb..00000000 Binary files a/docs/user_guide/assets/applylocated.png and /dev/null differ diff --git a/docs/user_guide/assets/applytopicright.png b/docs/user_guide/assets/applytopicright.png deleted file mode 100644 index a7abbf23..00000000 Binary files a/docs/user_guide/assets/applytopicright.png and /dev/null differ diff --git a/docs/user_guide/assets/appmanager.png b/docs/user_guide/assets/appmanager.png deleted file mode 100644 index 9e627607..00000000 Binary files a/docs/user_guide/assets/appmanager.png and /dev/null differ diff --git a/docs/user_guide/assets/appmanagerop.png b/docs/user_guide/assets/appmanagerop.png deleted file mode 100644 index 048fe6ce..00000000 Binary files a/docs/user_guide/assets/appmanagerop.png and /dev/null differ diff --git a/docs/user_guide/assets/appoffline.png b/docs/user_guide/assets/appoffline.png deleted file mode 100644 index ae21670f..00000000 Binary files a/docs/user_guide/assets/appoffline.png and /dev/null differ diff --git a/docs/user_guide/assets/apprighttopic.png b/docs/user_guide/assets/apprighttopic.png deleted file mode 100644 index c43921b4..00000000 Binary files a/docs/user_guide/assets/apprighttopic.png and /dev/null differ diff --git a/docs/user_guide/assets/apptopic.png b/docs/user_guide/assets/apptopic.png deleted file mode 100644 index eefce262..00000000 Binary files a/docs/user_guide/assets/apptopic.png and /dev/null differ diff --git a/docs/user_guide/assets/billdata.png b/docs/user_guide/assets/billdata.png deleted file mode 100644 index 075002a4..00000000 Binary files a/docs/user_guide/assets/billdata.png and /dev/null differ diff --git a/docs/user_guide/assets/brokerinfo.png b/docs/user_guide/assets/brokerinfo.png deleted file mode 100644 index 517924c1..00000000 Binary files a/docs/user_guide/assets/brokerinfo.png and /dev/null differ diff --git a/docs/user_guide/assets/brokerinfolist.png b/docs/user_guide/assets/brokerinfolist.png deleted file mode 100644 index af78a657..00000000 Binary files a/docs/user_guide/assets/brokerinfolist.png and /dev/null differ diff --git a/docs/user_guide/assets/brokerpartition.png b/docs/user_guide/assets/brokerpartition.png deleted file mode 100644 index 356c20dd..00000000 Binary files a/docs/user_guide/assets/brokerpartition.png and /dev/null differ diff --git a/docs/user_guide/assets/brokerpartitionop.png b/docs/user_guide/assets/brokerpartitionop.png deleted file mode 100644 index b15653cf..00000000 Binary files a/docs/user_guide/assets/brokerpartitionop.png and /dev/null differ diff --git a/docs/user_guide/assets/brokerrask.png b/docs/user_guide/assets/brokerrask.png deleted file mode 100644 index 92b3cf34..00000000 Binary files a/docs/user_guide/assets/brokerrask.png and /dev/null differ diff --git a/docs/user_guide/assets/brokerraskop.png b/docs/user_guide/assets/brokerraskop.png deleted file mode 100644 index 489fe4e0..00000000 Binary files a/docs/user_guide/assets/brokerraskop.png and /dev/null differ diff --git a/docs/user_guide/assets/brokerregion.png b/docs/user_guide/assets/brokerregion.png deleted file mode 100644 index 661cd3e6..00000000 Binary files a/docs/user_guide/assets/brokerregion.png and /dev/null differ diff --git a/docs/user_guide/assets/brokertable.png b/docs/user_guide/assets/brokertable.png deleted file mode 100644 index 24c60b08..00000000 Binary files a/docs/user_guide/assets/brokertable.png and /dev/null differ diff --git a/docs/user_guide/assets/brokertopic.png b/docs/user_guide/assets/brokertopic.png deleted file mode 100644 index 66de19dc..00000000 Binary files a/docs/user_guide/assets/brokertopic.png and /dev/null differ diff --git a/docs/user_guide/assets/brokertopicana.png b/docs/user_guide/assets/brokertopicana.png deleted file mode 100644 index c9207071..00000000 Binary files a/docs/user_guide/assets/brokertopicana.png and /dev/null differ diff --git a/docs/user_guide/assets/cancelright.png b/docs/user_guide/assets/cancelright.png deleted file mode 100644 index b24df843..00000000 Binary files a/docs/user_guide/assets/cancelright.png and /dev/null differ diff --git a/docs/user_guide/assets/clusterbroker.png b/docs/user_guide/assets/clusterbroker.png deleted file mode 100644 index cbfadd74..00000000 Binary files a/docs/user_guide/assets/clusterbroker.png and /dev/null differ diff --git a/docs/user_guide/assets/clusterbrokerdetail.png b/docs/user_guide/assets/clusterbrokerdetail.png deleted file mode 100644 index 5b8956d0..00000000 Binary files a/docs/user_guide/assets/clusterbrokerdetail.png and /dev/null differ diff --git a/docs/user_guide/assets/clusterbrokerdetailop.png b/docs/user_guide/assets/clusterbrokerdetailop.png deleted file mode 100644 index 7e974b42..00000000 Binary files a/docs/user_guide/assets/clusterbrokerdetailop.png and /dev/null differ diff --git a/docs/user_guide/assets/clusterbrokermo.png b/docs/user_guide/assets/clusterbrokermo.png deleted file mode 100644 index c9b7ce96..00000000 Binary files a/docs/user_guide/assets/clusterbrokermo.png and /dev/null differ diff --git a/docs/user_guide/assets/clusterbrokerop.png b/docs/user_guide/assets/clusterbrokerop.png deleted file mode 100644 index 7c68cb58..00000000 Binary files a/docs/user_guide/assets/clusterbrokerop.png and /dev/null differ diff --git a/docs/user_guide/assets/clusterdetail.png b/docs/user_guide/assets/clusterdetail.png deleted file mode 100644 index f13dab3f..00000000 Binary files a/docs/user_guide/assets/clusterdetail.png and /dev/null differ diff --git a/docs/user_guide/assets/clusterinfobrief.png b/docs/user_guide/assets/clusterinfobrief.png deleted file mode 100644 index aad69bf5..00000000 Binary files a/docs/user_guide/assets/clusterinfobrief.png and /dev/null differ diff --git a/docs/user_guide/assets/clustertask.png b/docs/user_guide/assets/clustertask.png deleted file mode 100644 index 0eeeac21..00000000 Binary files a/docs/user_guide/assets/clustertask.png and /dev/null differ diff --git a/docs/user_guide/assets/clustertaskdetail.png b/docs/user_guide/assets/clustertaskdetail.png deleted file mode 100644 index e61a8c37..00000000 Binary files a/docs/user_guide/assets/clustertaskdetail.png and /dev/null differ diff --git a/docs/user_guide/assets/clustertopic.png b/docs/user_guide/assets/clustertopic.png deleted file mode 100644 index 178dc042..00000000 Binary files a/docs/user_guide/assets/clustertopic.png and /dev/null differ diff --git a/docs/user_guide/assets/clustertopicop.png b/docs/user_guide/assets/clustertopicop.png deleted file mode 100644 index 18c9def1..00000000 Binary files a/docs/user_guide/assets/clustertopicop.png and /dev/null differ diff --git a/docs/user_guide/assets/configuremanager.png b/docs/user_guide/assets/configuremanager.png deleted file mode 100644 index b9132d4e..00000000 Binary files a/docs/user_guide/assets/configuremanager.png and /dev/null differ diff --git a/docs/user_guide/assets/consumergroup.png b/docs/user_guide/assets/consumergroup.png deleted file mode 100644 index 3d3c2136..00000000 Binary files a/docs/user_guide/assets/consumergroup.png and /dev/null differ diff --git a/docs/user_guide/assets/consumeroffset.png b/docs/user_guide/assets/consumeroffset.png deleted file mode 100644 index c4b855fa..00000000 Binary files a/docs/user_guide/assets/consumeroffset.png and /dev/null differ diff --git a/docs/user_guide/assets/consumertopic.png b/docs/user_guide/assets/consumertopic.png deleted file mode 100644 index 64676f6d..00000000 Binary files a/docs/user_guide/assets/consumertopic.png and /dev/null differ diff --git a/docs/user_guide/assets/createalarmrule.png b/docs/user_guide/assets/createalarmrule.png deleted file mode 100644 index 5e2a77a2..00000000 Binary files a/docs/user_guide/assets/createalarmrule.png and /dev/null differ diff --git a/docs/user_guide/assets/createclustertask.png b/docs/user_guide/assets/createclustertask.png deleted file mode 100644 index 8101e8a1..00000000 Binary files a/docs/user_guide/assets/createclustertask.png and /dev/null differ diff --git a/docs/user_guide/assets/createregion.png b/docs/user_guide/assets/createregion.png deleted file mode 100644 index 4d1c5e1e..00000000 Binary files a/docs/user_guide/assets/createregion.png and /dev/null differ diff --git a/docs/user_guide/assets/createtask.png b/docs/user_guide/assets/createtask.png deleted file mode 100644 index 6f40bdb4..00000000 Binary files a/docs/user_guide/assets/createtask.png and /dev/null differ diff --git a/docs/user_guide/assets/createusers.png b/docs/user_guide/assets/createusers.png deleted file mode 100644 index 185c66ed..00000000 Binary files a/docs/user_guide/assets/createusers.png and /dev/null differ diff --git a/docs/user_guide/assets/datacenter.png b/docs/user_guide/assets/datacenter.png deleted file mode 100644 index 6f2a72db..00000000 Binary files a/docs/user_guide/assets/datacenter.png and /dev/null differ diff --git a/docs/user_guide/assets/dealtask.png b/docs/user_guide/assets/dealtask.png deleted file mode 100644 index f0a66066..00000000 Binary files a/docs/user_guide/assets/dealtask.png and /dev/null differ diff --git a/docs/user_guide/assets/deletcluster.png b/docs/user_guide/assets/deletcluster.png deleted file mode 100644 index d1a2c33f..00000000 Binary files a/docs/user_guide/assets/deletcluster.png and /dev/null differ diff --git a/docs/user_guide/assets/deleteconfigure.png b/docs/user_guide/assets/deleteconfigure.png deleted file mode 100644 index ecc64311..00000000 Binary files a/docs/user_guide/assets/deleteconfigure.png and /dev/null differ diff --git a/docs/user_guide/assets/deleteregion.png b/docs/user_guide/assets/deleteregion.png deleted file mode 100644 index 4caa0620..00000000 Binary files a/docs/user_guide/assets/deleteregion.png and /dev/null differ diff --git a/docs/user_guide/assets/editapp.png b/docs/user_guide/assets/editapp.png deleted file mode 100644 index 74357d49..00000000 Binary files a/docs/user_guide/assets/editapp.png and /dev/null differ diff --git a/docs/user_guide/assets/editcluster.png b/docs/user_guide/assets/editcluster.png deleted file mode 100644 index 00961240..00000000 Binary files a/docs/user_guide/assets/editcluster.png and /dev/null differ diff --git a/docs/user_guide/assets/editconfigure.png b/docs/user_guide/assets/editconfigure.png deleted file mode 100644 index 06f5bea2..00000000 Binary files a/docs/user_guide/assets/editconfigure.png and /dev/null differ diff --git a/docs/user_guide/assets/editregion.png b/docs/user_guide/assets/editregion.png deleted file mode 100644 index 31740d46..00000000 Binary files a/docs/user_guide/assets/editregion.png and /dev/null differ diff --git a/docs/user_guide/assets/editroom.png b/docs/user_guide/assets/editroom.png deleted file mode 100644 index c35c5d5f..00000000 Binary files a/docs/user_guide/assets/editroom.png and /dev/null differ diff --git a/docs/user_guide/assets/edittopic.png b/docs/user_guide/assets/edittopic.png deleted file mode 100644 index 94b1c06a..00000000 Binary files a/docs/user_guide/assets/edittopic.png and /dev/null differ diff --git a/docs/user_guide/assets/edituser.png b/docs/user_guide/assets/edituser.png deleted file mode 100644 index dd6dbc23..00000000 Binary files a/docs/user_guide/assets/edituser.png and /dev/null differ diff --git a/docs/user_guide/assets/errordiagnosis.png b/docs/user_guide/assets/errordiagnosis.png deleted file mode 100644 index 16e8a83f..00000000 Binary files a/docs/user_guide/assets/errordiagnosis.png and /dev/null differ diff --git a/docs/user_guide/assets/expiredtopic.png b/docs/user_guide/assets/expiredtopic.png deleted file mode 100644 index e73dcc30..00000000 Binary files a/docs/user_guide/assets/expiredtopic.png and /dev/null differ diff --git a/docs/user_guide/assets/faq/jmx_check.jpg b/docs/user_guide/assets/faq/jmx_check.jpg deleted file mode 100644 index d2088660..00000000 Binary files a/docs/user_guide/assets/faq/jmx_check.jpg and /dev/null differ diff --git a/docs/user_guide/assets/helpcenter.png b/docs/user_guide/assets/helpcenter.png deleted file mode 100644 index d510c09b..00000000 Binary files a/docs/user_guide/assets/helpcenter.png and /dev/null differ diff --git a/docs/user_guide/assets/hotpointtopic.png b/docs/user_guide/assets/hotpointtopic.png deleted file mode 100644 index ce6339d3..00000000 Binary files a/docs/user_guide/assets/hotpointtopic.png and /dev/null differ diff --git a/docs/user_guide/assets/limit.png b/docs/user_guide/assets/limit.png deleted file mode 100644 index 0e7292c4..00000000 Binary files a/docs/user_guide/assets/limit.png and /dev/null differ diff --git a/docs/user_guide/assets/logicclusterdele.png b/docs/user_guide/assets/logicclusterdele.png deleted file mode 100644 index cfd68250..00000000 Binary files a/docs/user_guide/assets/logicclusterdele.png and /dev/null differ diff --git a/docs/user_guide/assets/migrationtask.png b/docs/user_guide/assets/migrationtask.png deleted file mode 100644 index 79ff358c..00000000 Binary files a/docs/user_guide/assets/migrationtask.png and /dev/null differ diff --git a/docs/user_guide/assets/migrationtaskdetail.png b/docs/user_guide/assets/migrationtaskdetail.png deleted file mode 100644 index 915b86f7..00000000 Binary files a/docs/user_guide/assets/migrationtaskdetail.png and /dev/null differ diff --git a/docs/user_guide/assets/migrationtasklist.png b/docs/user_guide/assets/migrationtasklist.png deleted file mode 100644 index 080adb87..00000000 Binary files a/docs/user_guide/assets/migrationtasklist.png and /dev/null differ diff --git a/docs/user_guide/assets/migrationtaskset.png b/docs/user_guide/assets/migrationtaskset.png deleted file mode 100644 index 8485a933..00000000 Binary files a/docs/user_guide/assets/migrationtaskset.png and /dev/null differ diff --git a/docs/user_guide/assets/myapplication.png b/docs/user_guide/assets/myapplication.png deleted file mode 100644 index 66b5efe7..00000000 Binary files a/docs/user_guide/assets/myapplication.png and /dev/null differ diff --git a/docs/user_guide/assets/mytopic.png b/docs/user_guide/assets/mytopic.png deleted file mode 100644 index 038a0e89..00000000 Binary files a/docs/user_guide/assets/mytopic.png and /dev/null differ diff --git a/docs/user_guide/assets/offlinecluster.png b/docs/user_guide/assets/offlinecluster.png deleted file mode 100644 index f39074fd..00000000 Binary files a/docs/user_guide/assets/offlinecluster.png and /dev/null differ diff --git a/docs/user_guide/assets/opapplycluster.png b/docs/user_guide/assets/opapplycluster.png deleted file mode 100644 index 366a4945..00000000 Binary files a/docs/user_guide/assets/opapplycluster.png and /dev/null differ diff --git a/docs/user_guide/assets/opcluster.png b/docs/user_guide/assets/opcluster.png deleted file mode 100644 index e398bbc9..00000000 Binary files a/docs/user_guide/assets/opcluster.png and /dev/null differ diff --git a/docs/user_guide/assets/resource_apply/production_consumption_flow.png b/docs/user_guide/assets/resource_apply/production_consumption_flow.png deleted file mode 100644 index 36187c83..00000000 Binary files a/docs/user_guide/assets/resource_apply/production_consumption_flow.png and /dev/null differ diff --git a/docs/user_guide/assets/startclustermo.png b/docs/user_guide/assets/startclustermo.png deleted file mode 100644 index d3ae0146..00000000 Binary files a/docs/user_guide/assets/startclustermo.png and /dev/null differ diff --git a/docs/user_guide/assets/stopclustermo.png b/docs/user_guide/assets/stopclustermo.png deleted file mode 100644 index e49a8c1a..00000000 Binary files a/docs/user_guide/assets/stopclustermo.png and /dev/null differ diff --git a/docs/user_guide/assets/strategy.png b/docs/user_guide/assets/strategy.png deleted file mode 100644 index ef63aebe..00000000 Binary files a/docs/user_guide/assets/strategy.png and /dev/null differ diff --git a/docs/user_guide/assets/tasklog.png b/docs/user_guide/assets/tasklog.png deleted file mode 100644 index cba738d0..00000000 Binary files a/docs/user_guide/assets/tasklog.png and /dev/null differ diff --git a/docs/user_guide/assets/taskprogress.png b/docs/user_guide/assets/taskprogress.png deleted file mode 100644 index b1aa68a7..00000000 Binary files a/docs/user_guide/assets/taskprogress.png and /dev/null differ diff --git a/docs/user_guide/assets/ticketdetail.png b/docs/user_guide/assets/ticketdetail.png deleted file mode 100644 index a30f9e7d..00000000 Binary files a/docs/user_guide/assets/ticketdetail.png and /dev/null differ diff --git a/docs/user_guide/assets/topicaccount.png b/docs/user_guide/assets/topicaccount.png deleted file mode 100644 index db969985..00000000 Binary files a/docs/user_guide/assets/topicaccount.png and /dev/null differ diff --git a/docs/user_guide/assets/topicapp.png b/docs/user_guide/assets/topicapp.png deleted file mode 100644 index 4ef7dcdc..00000000 Binary files a/docs/user_guide/assets/topicapp.png and /dev/null differ diff --git a/docs/user_guide/assets/topicapply.png b/docs/user_guide/assets/topicapply.png deleted file mode 100644 index 703dc860..00000000 Binary files a/docs/user_guide/assets/topicapply.png and /dev/null differ diff --git a/docs/user_guide/assets/topicbasicinfo.png b/docs/user_guide/assets/topicbasicinfo.png deleted file mode 100644 index 69e425fe..00000000 Binary files a/docs/user_guide/assets/topicbasicinfo.png and /dev/null differ diff --git a/docs/user_guide/assets/topicconsumerinfo.png b/docs/user_guide/assets/topicconsumerinfo.png deleted file mode 100644 index 465b0aff..00000000 Binary files a/docs/user_guide/assets/topicconsumerinfo.png and /dev/null differ diff --git a/docs/user_guide/assets/topicinfoconnect.png b/docs/user_guide/assets/topicinfoconnect.png deleted file mode 100644 index f84e09b8..00000000 Binary files a/docs/user_guide/assets/topicinfoconnect.png and /dev/null differ diff --git a/docs/user_guide/assets/topicinfoconsumer.png b/docs/user_guide/assets/topicinfoconsumer.png deleted file mode 100644 index c5082ef5..00000000 Binary files a/docs/user_guide/assets/topicinfoconsumer.png and /dev/null differ diff --git a/docs/user_guide/assets/topicoffline.png b/docs/user_guide/assets/topicoffline.png deleted file mode 100644 index eecd34cf..00000000 Binary files a/docs/user_guide/assets/topicoffline.png and /dev/null differ diff --git a/docs/user_guide/assets/topicpartition.png b/docs/user_guide/assets/topicpartition.png deleted file mode 100644 index bbb7fa8c..00000000 Binary files a/docs/user_guide/assets/topicpartition.png and /dev/null differ diff --git a/docs/user_guide/assets/topicresource.png b/docs/user_guide/assets/topicresource.png deleted file mode 100644 index 8cc6ffb6..00000000 Binary files a/docs/user_guide/assets/topicresource.png and /dev/null differ diff --git a/docs/user_guide/assets/topictable.png b/docs/user_guide/assets/topictable.png deleted file mode 100644 index 39957650..00000000 Binary files a/docs/user_guide/assets/topictable.png and /dev/null differ diff --git a/docs/user_guide/assets/topicunenough.png b/docs/user_guide/assets/topicunenough.png deleted file mode 100644 index 0a28bbd7..00000000 Binary files a/docs/user_guide/assets/topicunenough.png and /dev/null differ diff --git a/docs/user_guide/assets/uploadversion.png b/docs/user_guide/assets/uploadversion.png deleted file mode 100644 index dd667f9a..00000000 Binary files a/docs/user_guide/assets/uploadversion.png and /dev/null differ diff --git a/docs/user_guide/assets/userbill.png b/docs/user_guide/assets/userbill.png deleted file mode 100644 index 4a0a4f66..00000000 Binary files a/docs/user_guide/assets/userbill.png and /dev/null differ diff --git a/docs/user_guide/assets/usercenter.png b/docs/user_guide/assets/usercenter.png deleted file mode 100644 index 386b81e4..00000000 Binary files a/docs/user_guide/assets/usercenter.png and /dev/null differ diff --git a/docs/user_guide/assets/usersmanager.png b/docs/user_guide/assets/usersmanager.png deleted file mode 100644 index eccade00..00000000 Binary files a/docs/user_guide/assets/usersmanager.png and /dev/null differ diff --git a/docs/user_guide/assets/zoomapply.png b/docs/user_guide/assets/zoomapply.png deleted file mode 100644 index 68f37bb9..00000000 Binary files a/docs/user_guide/assets/zoomapply.png and /dev/null differ diff --git a/docs/user_guide/auto-number-title.css b/docs/user_guide/auto-number-title.css deleted file mode 100644 index 2229f03d..00000000 --- a/docs/user_guide/auto-number-title.css +++ /dev/null @@ -1,36 +0,0 @@ -h1 { counter-reset: h2counter; } -h2 { counter-reset: h3counter; } -h3 { counter-reset: h4counter; } -h4 { counter-reset: h5counter; } -h5 { counter-reset: h6counter; } -h6 { } -h2:before { - counter-increment: h2counter; - content: counter(h2counter) ".\0000a0\0000a0"; -} -h3:before { - counter-increment: h3counter; - content: counter(h2counter) "." - counter(h3counter) ".\0000a0\0000a0"; -} -h4:before { - counter-increment: h4counter; - content: counter(h2counter) "." - counter(h3counter) "." - counter(h4counter) ".\0000a0\0000a0"; -} -h5:before { - counter-increment: h5counter; - content: counter(h2counter) "." - counter(h3counter) "." - counter(h4counter) "." - counter(h5counter) ".\0000a0\0000a0"; -} -h6:before { - counter-increment: h6counter; - content: counter(h2counter) "." - counter(h3counter) "." - counter(h4counter) "." - counter(h5counter) "." - counter(h6counter) ".\0000a0\0000a0"; -} \ No newline at end of file diff --git a/docs/user_guide/call_api_bypass_login.md b/docs/user_guide/call_api_bypass_login.md deleted file mode 100644 index 7a2feac8..00000000 --- a/docs/user_guide/call_api_bypass_login.md +++ /dev/null @@ -1,49 +0,0 @@ - ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - -# 登录绕过 - -## 背景 - -现在除了开放出来的第三方接口,其他接口都需要走登录认证。 - -但是第三方接口不多,开放出来的能力有限,但是登录的接口又需要登录,非常的麻烦。 - -因此,新增了一个登录绕过的功能,为一些紧急临时的需求,提供一个调用不需要登录的能力。 - -## 使用方式 - -步骤一:接口调用时,在header中,增加如下信息: -```shell -# 表示开启登录绕过 -Trick-Login-Switch : on - -# 登录绕过的用户, 这里可以是admin, 或者是其他的, 但是必须在运维管控->平台管理->用户管理中设置了该用户。 -Trick-Login-User : admin -``` - -  - -步骤二:在运维管控->平台管理->平台配置上,设置允许了该用户以绕过的方式登录 -```shell -# 设置的key,必须是这个 -SECURITY.TRICK_USERS - -# 设置的value,是json数组的格式,例如 -[ "admin", "logi"] -``` - -  - -步骤三:解释说明 - -设置完成上面两步之后,就可以直接调用需要登录的接口了。 - -但是还有一点需要注意,绕过的用户仅能调用他有权限的接口,比如一个普通用户,那么他就只能调用普通的接口,不能去调用运维人员的接口。 - diff --git a/docs/user_guide/faq.md b/docs/user_guide/faq.md index 287cf46a..471595cc 100644 --- a/docs/user_guide/faq.md +++ b/docs/user_guide/faq.md @@ -1,219 +1,90 @@ +![Logo](../assets/KnowStreamingLogo.png) + + +# FAQ + +- [FAQ](#faq) + - [1、支持哪些Kafka版本?](#1支持哪些kafka版本) + - [2、页面流量信息等无数据?](#2页面流量信息等无数据) + - [3、`Jmx`连接失败如何解决?](#3jmx连接失败如何解决) + - [4、有没有 API 文档?](#4有没有-api-文档) + - [5、删除Topic成功后,为何过段时间又出现了?](#5删除topic成功后为何过段时间又出现了) + - [6、如何在不登录的情况下,调用接口?](#6如何在不登录的情况下调用接口) + --- -![kafka-manager-logo](../assets/images/common/logo_name.png) +## 1、支持哪些Kafka版本? -**一站式`Apache Kafka`集群指标监控与运维管控平台** +- 支持 0.10+ 的Kafka版本; +- 支持 ZK 及 Raft 运行模式的Kafka版本; ---- -# FAQ +  -- 0、支持哪些Kafka版本? -- 1、Topic申请、新建监控告警等操作时没有可选择的集群? -- 2、逻辑集群 & Region的用途? -- 3、登录失败? -- 4、页面流量信息等无数据? -- 5、如何对接夜莺的监控告警功能? -- 6、如何使用`MySQL 8`? -- 7、`Jmx`连接失败如何解决? -- 8、`topic biz data not exist`错误及处理方式 -- 9、进程启动后,如何查看API文档 -- 10、如何创建告警组? -- 11、连接信息、耗时信息、磁盘信息为什么没有数据? -- 12、逻辑集群申请审批通过之后为什么看不到逻辑集群? -- 13、heartbeat表关联业务和使用场景是什么? -- 14、集群的删除,是否会真正的删除集群? -- 15、APP(应用)如何被使用起来? -- 16、为什么下线应用提示operation forbidden? -- 17、删除Topic成功,为什么过一会儿之后又出现了? -- 18、如何在不登录的情况下,调用一些需要登录的接口? -- 19、为什么无法看到连接信息、耗时信息等指标? -- 20、AppID鉴权、生产消费配额不起作用 -- 21、如何查看周期任务说明文档 ---- +## 2、页面流量信息等无数据? -### 0、支持哪些Kafka版本? +- 1、`Broker JMX`未正确开启 -基本上只要所使用的Kafka还依赖于Zookeeper,那么该版本的主要功能基本上应该就是支持的。 +可以参看:[Jmx连接配置&问题解决说明文档](../dev_guide/解决连接JMX失败.md) ---- +- 2、`ES` 存在问题 -### 1、Topic申请、新建监控告警等操作时没有可选择的集群? +建议使用`ES 7.6`版本,同时创建近7天的索引,具体见:[单机部署手册](../install_guide/单机部署手册.md) 中的ES索引模版及索引创建。 -缺少逻辑集群导致的,在Topic管理、监控告警、集群管理这三个Tab下面都是普通用户视角,普通用户看到的集群都是逻辑集群,因此在这三个Tab下进行操作时,都需要有逻辑集群。 -逻辑集群的创建参看: +  -- [kafka-manager 接入集群](add_cluster/add_cluster.md) 手册,这里的Region和逻辑集群都必须添加。 ---- +## 3、`Jmx`连接失败如何解决? -### 2、逻辑集群 & Region的用途? +- 参看 [Jmx连接配置&问题解决说明文档](../dev_guide/解决连接JMX失败.md) 说明。 -主要用途是进行大集群的管理 & 集群细节的屏蔽。 -- 逻辑集群:通过逻辑集群概念,将集群Broker按业务进行归类,方便管理; -- Region:通过引入Region,同时Topic按Region纬度创建,减少Broker间的连接; +  ---- -### 3、登录失败? +## 4、有没有 API 文档? -- 检查使用的MySQL版本,8.0等版本暂不支持,具体版本辛苦查看 README 。 +`KnowStreaming` 采用 Swagger 进行 API 说明,在启动 KnowStreaming 服务之后,就可以从下面地址看到。 ---- +Swagger-API地址: [http://IP:PORT/swagger-ui.html#/](http://IP:PORT/swagger-ui.html#/) -### 4、页面流量信息等无数据? -- 1、检查`Broker JMX`是否正确开启。 +  -如若还未开启,具体可百度一下看如何开启,或者参看:[Jmx连接配置&问题解决说明文档](../dev_guide/connect_jmx_failed.md) -![helpcenter](./assets/faq/jmx_check.jpg) - -- 2、`MySQL`的版本是否过高。 - -建议使用`MySQL 5.7`版本。 - -- 3、数据库时区问题。 - -检查MySQL的cluster表的gmt_modify字段,做一个update动作,看这个gmt_modify时间是否是当前时间,如果不是,那么就是时区问题了。时区不对问题具体可以搜索一下看如何解决。 - ---- - -### 5、如何对接夜莺的监控告警功能? - -- 参看 [kafka-manager 对接夜莺监控](../dev_guide/monitor_system_integrate_with_n9e.md) 说明。 - ---- - -### 6、如何使用`MySQL 8`? - -- 参看 [kafka-manager 使用`MySQL 8`](../dev_guide/use_mysql_8.md) 说明。 - ---- - -### 7、`Jmx`连接失败如何解决? - -- 参看 [Jmx连接配置&问题解决](../dev_guide/connect_jmx_failed.md) 说明。 - ---- - -### 8、`topic biz data not exist`错误及处理方式 - -**错误原因** - -在进行权限审批的时候,可能会出现这个错误,出现这个错误的原因是因为Topic相关的业务信息没有在DB中存储,或者更具体的说就是该Topic不属于任何应用导致的,只需要将这些无主的Topic挂在某个应用下面即可。 - -**解决方式** - -可以在`运维管控->集群列表->Topic信息`下面,编辑申请权限的Topic,为Topic选择一个应用即可。 - -以上仅仅只是针对单个Topic的场景,如果你有非常多的Topic需要进行初始化的,那么此时可以在配置管理中增加一个配置,来定时的对无主的Topic进行同步,具体见:[动态配置管理 - 1、Topic定时同步任务](../dev_guide/dynamic_config_manager.md) - ---- - -### 9、进程启动后,如何查看API文档 - -- 滴滴Logi-KafkaManager采用Swagger-API工具记录API文档。Swagger-API地址: [http://IP:PORT/swagger-ui.html#/](http://IP:PORT/swagger-ui.html#/) - - -### 10、如何创建告警组? - -告警组的创建需要到Logi-KM对接的监控系统中创建,比如我们现在默认是对接了夜莺,那么告警组需要到夜莺中创建,如果没有安装夜莺,那么需要安装一下夜莺并进行对接。当然,这里也可以对接自己内部的监控系统,不过需要实现一些接口。 - -具体的文档可见:[监控功能对接夜莺](../dev_guide/monitor_system_integrate_with_n9e.md)、[监控功能对接其他系统](../dev_guide/monitor_system_integrate_with_self.md) - -那么在夜莺中,如何创建告警组呢? -需要前往夜莺平台-用户资源中心-团队管理中新建团队。新建过团队之后再次回到Logi-KM中刷新页面就可以在该下拉框中选择告警接收组了。 - -### 11、连接信息、耗时信息、磁盘信息为什么没有数据? - -这块需要结合滴滴内部的kafka-gateway一同使用才会有数据,滴滴kafka-gateway暂未开源。 - -### 12、逻辑集群申请审批通过之后为什么看不到逻辑集群? - -逻辑集群的申请与审批仅仅只是一个工单流程,并不会去实际创建逻辑集群,逻辑集群的创建还需要手动去创建。 - -具体的操作可见:[kafka-manager 接入集群](add_cluster/add_cluster.md)。 - - -### 13、heartbeat表关联业务和使用场景是什么? - -做任务抢占用的。 - -KM支持HA的方式部署,那么部署多台的时候,就会出现每一台都可能去做指标收集的事情,这块就使用heartbeat表做KM的存活性判断,然后进行任务的抢占或者是均衡。 - -更多详细的内容,可以看一下源码中,heartbeat表在哪里被使用了。 - - -### 14、集群的删除,是否会真正的删除集群? - -Logi-KM的运维管控,集群列表中的集群删除,仅仅只是将该集群从Logi-KM中进行删除,并不会对真正的物理集群做什么操作。 - - -### 15、APP(应用)如何被使用起来? - -app在Logi-KM中可以近似理解为租户,或者是kafka里面的一个账号的概念。 - -界面中显示的app信息、权限信息等,在平台层面仅仅只是控制Topic或集群在平台上的可见性,如果使用的是社区版本的Kafka,那么实际上是不能真正的管控到客户端对Topic的生产和消费。 - -但是如果是使用的滴滴的Kafka-Gateway,那么是可以做到对客户端的生产和消费的权限管控。滴滴的Kafka-Gateway暂未开源,属于企业服务,具体的可以入群交流,群地址在README中。 - - -### 16、为什么下线应用提示operation forbidden? - -**原因一:** - -该应用还存在对Topic的权限,因此导致下线失败。具体查看的位置在"Topic管理-》应用管理-》详情",可以看到应用对哪些Topic还有权限。 - -只有当权限全部去除之后,才可以下线应用。 - -**原因二:** - -使用的是2.4.0之前的旧版本,旧版本存在缓存更新的BUG,建议升级至最新的版本,或者简单粗暴的就是重启一下KM。 - - -### 17、删除Topic成功,为什么过一会儿之后又出现了? +## 5、删除Topic成功后,为何过段时间又出现了? **原因说明:** -Logi-KM会去请求Topic的endoffset信息,要获取这个信息就需要发送metadata请求,发送metadata请求的时候,如果集群允许自动创建Topic,那么当Topic不存在时,就会自动将该Topic创建出来。 +`KnowStreaming` 会去请求Topic的endoffset信息,要获取这个信息就需要发送metadata请求,发送metadata请求的时候,如果集群允许自动创建Topic,那么当Topic不存在时,就会自动将该Topic创建出来。 **问题解决:** -因为在Logi-KM上,禁止Kafka客户端内部元信息获取这个动作非常的难做到,因此短时间内这个问题不好从Logi-KM上解决。 +因为在 `KnowStreaming` 上,禁止Kafka客户端内部元信息获取这个动作非常的难做到,因此短时间内这个问题不好从 `KnowStreaming` 上解决。 -当然,对于不存在的Topic,Logi-KM是不会进行元信息请求的,因此也不用担心会莫名其妙的创建一个Topic出来。 +当然,对于不存在的Topic,`KnowStreaming` 是不会进行元信息请求的,因此也不用担心会莫名其妙的创建一个Topic出来。 -但是,另外一点,对于开启允许Topic自动创建的集群,建议是关闭该功能,开启是非常危险的,如果关闭之后,Logi-KM也不会有这个问题。 +但是,另外一点,对于开启允许Topic自动创建的集群,建议是关闭该功能,开启是非常危险的,如果关闭之后,`KnowStreaming` 也不会有这个问题。 最后这里举个开启这个配置后,非常危险的代码例子吧: ```java for (int i= 0; i < 100000; ++i) { // 如果是客户端类似这样写的,那么一启动,那么将创建10万个Topic出来,集群元信息瞬间爆炸,controller可能就不可服务了。 - producer.send(new ProducerRecord("logi_km" + i,"hello logi_km")); + producer.send(new ProducerRecord("know_streaming" + i,"hello logi_km")); } ``` -### 18、如何在不登录的情况下,调用一些需要登录的接口? -具体见:[登录绕过](./call_api_bypass_login.md) +  -### 19、为什么无法看到连接信息、耗时信息等指标? -连接信息、耗时信息等指标依赖于滴滴kafka-gateway和滴滴Kafka引擎,通过gateway可获取到连接到该Topic的应用情况,提高对Topic的管控能力。通过滴滴Kafka引擎的自带埋点,可获取到耗时信息,提升Topic生产消费时的可观测性。这部分内容是属于商业版的范畴,暂未开源。如有需要,可进行商业合作。 -具体见:[滴滴Logi-KafkaManager开源版和商业版特性对比](../开源版与商业版特性对比.md) +## 6、如何在不登录的情况下,调用接口? -### 20、AppID鉴权、生产消费配额不起作用? -AppID鉴权、生产消费配额依赖于滴滴kafka-gateway,通过gateway进行身份鉴权和生产消费限流,可避免用户无限制的使用集群的流量,流量大的用户会耗尽系统资源从而影响其他用户的使用,造成集群的节点故障。这部分内容是属于商业版的范畴,暂未开源。如有需要,可进行商业合作。 +具体见:[免登录调用接口](../dev_guide/免登录调用接口.md) -具体见:[滴滴Logi-KafkaManager开源版和商业版特性对比](../开源版与商业版特性对比.md) -### 20、如何查看周期任务说明文档 - -具体见:[周期任务说明文档](../dev_guide/周期任务说明文档.md) \ No newline at end of file diff --git a/docs/user_guide/kafka_metrics_desc.md b/docs/user_guide/kafka_metrics_desc.md deleted file mode 100644 index 2d95b273..00000000 --- a/docs/user_guide/kafka_metrics_desc.md +++ /dev/null @@ -1,72 +0,0 @@ - ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - - - -# Topic 指标说明 - -## 1. 实时流量指标说明 - - -| 指标名称| 单位| 指标含义| -|-- |---- |---| -| messagesIn| 条/s | 每秒发送到kafka的消息条数 | -| byteIn| B/s | 每秒发送到kafka的字节数 | -| byteOut| B/s | 每秒流出kafka的字节数(所有消费组消费的流量,如果是Kafka版本较低,这个还包括副本同步的流量) | -| byteRejected| B/s | 每秒被拒绝的字节数 | -| failedFetchRequest| qps | 每秒拉取失败的请求数 | -| failedProduceRequest| qps | 每秒发送失败的请求数 | -| totalProduceRequest| qps | 每秒总共发送的请求数,与messagesIn的区别是一个是发送请求里面可能会有多条消息 | -| totalFetchRequest| qps | 每秒总共拉取消息的请求数 | - -  - -## 2. 历史流量指标说明 - -| 指标名称| 单位| 指标含义| -|-- |---- |---| -| messagesIn| 条/s | 近一分钟每秒发送到kafka的消息条数 | -| byteIn| B/s | 近一分钟每秒发送到kafka的字节数 | -| byteOut| B/s | 近一分钟每秒流出kafka的字节数(所有消费组消费的流量,如果是Kafka版本较低,副本同步的流量) | -| byteRejected| B/s | 近一分钟每秒被拒绝的字节数 | -| totalProduceRequest| qps | 近一分钟每秒总共发送的请求数,与messagesIn的区别是一个是发送请求里面可能会有多条消息 | - -  - -## 3. 实时耗时指标说明 - -**基于滴滴加强版Kafka引擎的特性,可以获取Broker的实时耗时信息和历史耗时信息** - -| 指标名称| 单位 | 指标含义 | 耗时高原因 | 解决方案| -|-- |-- |-- |-- |--| -| RequestQueueTimeMs| ms | 请求队列排队时间 | 请求多,服务端处理不过来 | 联系运维人员处理 | -| LocalTimeMs| ms | Broker本地处理时间 | 服务端读写数据慢,可能是读写锁竞争 | 联系运维人员处理 | -| RemoteTimeMs| ms | 请求等待远程完成时间,对于发送请求,如果ack=-1,该时间表示副本同步时间,对于消费请求,如果当前没有数据,该时间为等待新数据时间,如果请求的版本与topic存储的版本不同,需要做版本转换,也会拉高该时间 | 对于生产,ack=-1必然会导致该指标耗时高,对于消费,如果topic数据写入很慢,该指标高也正常。如果需要版本转换,该指标耗时也会高 | 对于生产,可以考虑修改ack=1,消费端问题可以联系运维人员具体分析 | -| ThrottleTimeMs| ms | 请求限流时间 | 生产/消费被限流 | 申请提升限流值 | -| ResponseQueueTimeMs| ms | 响应队列排队时间 | 响应多,服务端处理不过来 | 联系运维人员处理 | -| ResponseSendTimeMs| ms | 响应返回客户端时间 | 1:下游消费能力差,导致向consumer发送数据时写网络缓冲区过慢;2:消费lag过大,一直从磁盘读取数据 | 1:提升客户端消费性能;2: 联系运维人员确认是否读取磁盘问题 | -| TotalTimeMs| ms | 接收到请求到完成总时间,理论上该时间等于上述六项时间之和,但由于各时间都是单独统计,总时间只是约等于上述六部分时间之和 | 上面六项有些耗时高 | 具体针对高的指标解决 | - -**备注:由于kafka消费端实现方式,消费端一次会发送多个Fetch请求,在接收到一个Response之后就会开始处理数据,使Broker端返回其他Response等待,因此ResponseSendTimeMs并不完全是服务端发送时间,有时会包含一部分消费端处理数据时间** - -## 4. 历史耗时指标说明 - -**基于滴滴加强版Kafka引擎的特性,可以获取Broker的实时耗时信息和历史耗时信息** - -| 指标名称| 单位| 指标含义| -|-- | ---- |---| -| produceRequestTime99thPercentile|ms|Topic近一分钟发送99分位耗时| -| fetchRequestTime99thPercentile|ms|Topic近一分钟拉取99分位耗时| -| produceRequestTime95thPercentile|ms|Topic近一分钟发送95分位耗时| -| fetchRequestTime95thPercentile|ms|Topic近一分钟拉取95分位耗时| -| produceRequestTime75thPercentile|ms|Topic近一分钟发送75分位耗时| -| fetchRequestTime75thPercentile|ms|Topic近一分钟拉取75分位耗时| -| produceRequestTime50thPercentile|ms|Topic近一分钟发送50分位耗时| -| fetchRequestTime50thPercentile|ms|Topic近一分钟拉取50分位耗时| - diff --git a/docs/user_guide/monitor_desc.md b/docs/user_guide/monitor_desc.md deleted file mode 100644 index abd06209..00000000 --- a/docs/user_guide/monitor_desc.md +++ /dev/null @@ -1,30 +0,0 @@ -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - -## 报警策略-监控指标说明 - -| 指标 | 含义 |备注 | -| --- | --- | --- | -| online-kafka-consumer-lag | 消费时,按照分区的维度进行监控lag数 | lag表示有多少数据没有被消费,因为按照分区的维度监控,所以告警时一般会有分区信息 | -| online-kafka-consumer-maxLag | 消费时,按照整个Topic的维度,监控Topic所有的分区里面的那个最大的lag | 比如每个分区的lag分别是3、5、7,那么maxLag的值就是max(3,5,7)=7 | -| online-kafka-consumer-maxDelayTime | 消费时,按照Topic维度监控预计的消费延迟 | 这块是按照lag和messagesIn之间的关系计算出来的,可能会有误差 | - -## 报警策略-报警函数介绍 - -| 类别 | 函数 | 含义 |函数文案 |备注 | -| --- | --- | --- | --- | --- | -| 发生次数 |all,n | 最近$n个周期内,全发生 | 连续发生(all) | | -| 发生次数 | happen, n, m | 最近$n个周期内,发生m次 | 出现(happen) | null点也计算在n内 | -| 数学统计 | sum, n | 最近$n个周期取值 的 和 | 求和(sum) | sum_over_time | -| 数学统计 | avg, n | 最近$n个周期取值 的 平均值 | 平均值(avg) | avg_over_time | -| 数学统计 | min, n | 最近$n个周期取值 的 最小值 | 最小值(min) | min_over_time | -| 数学统计 | max, n | 最近$n个周期取值 的 最大值 | 最大值(max | max_over_time | -| 变化率 | pdiff, n | 最近$n个点的变化率, 有一个满足 则触发 | 突增突降率(pdiff) | 假设, 最近3个周期的值分别为 v, v2, v3(v为最新值)那么计算公式为 any( (v-v2)/v2, (v-v3)/v3 )**区分正负** | -| 变化量 | diff, n | 最近$n个点的变化量, 有一个满足 则触发 | 突增突降值(diff) | 假设, 最近3个周期的值分别为 v, v2, v3(v为最新值)那么计算公式为 any( (v-v2), (v-v3) )**区分正负** | -| 变化量 | ndiff | 最近n个周期,发生m次 v(t) - v(t-1) $OP threshold其中 v(t) 为最新值 | 连续变化(区分正负) - ndiff | | -| 数据中断 | nodata, t | 最近 $t 秒内 无数据上报 | 数据上报中断(nodata) | | -| 同环比 | c_avg_rate_abs, n | 最近$n个周期的取值,相比 1天或7天前取值 的变化率 的绝对值 | 同比变化率(c_avg_rate_abs) | 假设最近的n个值为 v1, v2, v3历史取到的对应n'个值为 v1', v2'那么计算公式为abs((avg(v1,v2,v3) / avg(v1',v2') -1)* 100%) | -| 同环比 | c_avg_rate, n | 最近$n个周期的取值,相比 1天或7天前取值 的变化率(**区分正负**) | 同比变化率(c_avg_rate) | 假设最近的n个值为 v1, v2, v3历史取到的对应n'个值为 v1', v2'那么计算公式为(avg(v1,v2,v3) / avg(v1',v2') -1)* 100% | diff --git a/docs/user_guide/resource_apply.md b/docs/user_guide/resource_apply.md deleted file mode 100644 index 87537f95..00000000 --- a/docs/user_guide/resource_apply.md +++ /dev/null @@ -1,32 +0,0 @@ - ---- - -![kafka-manager-logo](../assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - - -# 资源申请文档 - -## 主要名词解释 - -- 应用(App):作为Kafka中的账户,使用AppID+password作为身份标识 -- 集群:可使用平台提供的共享集群,也可为某一应用申请单独的集群 -- Topic:可申请创建Topic或申请其他Topic的生产/消费权限。进行生产/消费时通过Topic+AppID进行身份鉴权 -![production_consumption_flow](assets/resource_apply/production_consumption_flow.png) - -## 应用申请 -应用(App)作为Kafka中的账户,使用AppID+password作为身份标识。对Topic进行生产/消费时通过Topic+AppID进行身份鉴权。 - -用户申请应用,经由运维人员审批,审批通过后获得AppID和密钥 - -## 集群申请 -可使用平台提供的共享集群,若对隔离性、稳定性、生产消费速率有更高的需求,可对某一应用申请单独的集群 - -## Topic申请 -- 用户可根据已申请的应用创建Topic。创建后,应用负责人默认拥有该Topic的生产/消费权限和管理权限 -- 也可申请其他Topic的生产、消费权限。经由Topic所属应用的负责人审批后,即可拥有相应权限。 - - diff --git a/docs/user_guide/user_guide_cn.md b/docs/user_guide/user_guide_cn.md deleted file mode 100644 index caebf2b0..00000000 --- a/docs/user_guide/user_guide_cn.md +++ /dev/null @@ -1,1287 +0,0 @@ - - - **Kafka Manager 云平台用户使用手册** - -# 文档概述 # - -本文档提供了滴滴出行-Kafka Manager云平台产品的入门操作指导和详细配置指导,希望通过本说明书为平台的使用者提供支持。 - -默认登陆密码: `admin/admin` - -**文档版本** - -01(2020-10-16) - -第一次正式发布。 - - **目录** - - -- [文档概述](#文档概述) -- [功能介绍](#功能介绍) - - [数据中心切换](#数据中心切换) - - [访问帮助中心](#访问帮助中心) - - [个人中心](#个人中心) - - [工单查看-我的申请](#工单查看-我的申请) - - [工单查看-我的审批](#工单查看-我的审批) - - [账单管理](#账单管理) - - [退出系统](#退出系统) - - [Topic管理](#topic管理) - - [我的Topic](#我的topic) - - [查看活跃Topic列表](#查看活跃topic列表) - - [申请新增Topic](#申请新增topic) - - [申请Topic配额](#申请topic配额) - - [申请Topic分区](#申请topic分区) - - [申请Topic下线](#申请topic下线) - - [编辑Topic](#编辑topic) - - [查看Topic基本信息](#查看topic基本信息) - - [采样](#采样) - - [查看Topic状态图](#查看topic状态图) - - [查看Topic与客户端的关联信息](#查看topic与客户端的关联信息) - - [查看Topic消费组信息](#查看topic消费组信息) - - [重置Topic的消费offset(偏移)](#重置topic的消费offset偏移) - - [查看Topic分区信息](#查看topic分区信息) - - [查看Topic的Broker信息](#查看topic的broker信息) - - [查看Topic关联的应用信息](#查看topic关联的应用信息) - - [查看Topic的账单信息](#查看topic的账单信息) - - [查看broker的详细信息](#查看broker的详细信息) - - [查看broker的基础及监控信息](#查看broker的基础及监控信息) - - [查看broker的关联topic信息](#查看broker的关联topic信息) - - [查看broker的磁盘信息](#查看broker的磁盘信息) - - [查看broker的partition信息](#查看broker的partition信息) - - [查看broker的Topic分析情况](#查看broker的topic分析情况) - - [已过期Topic](#已过期topic) - - [全部Topic](#全部topic) - - [查看全部Topic列表](#查看全部topic列表) - - [申请Topic权限](#申请topic权限) - - [申请扩Topic的配额](#申请扩topic的配额) - - [应用管理](#应用管理) - - [申请应用](#申请应用) - - [修改应用](#修改应用) - - [申请下线应用](#申请下线应用) - - [查看应用详情](#查看应用详情) - - [取消应用对topic的权限](#取消应用对topic的权限) - - [集群管理](#集群管理) - - [我的集群](#我的集群) - - [查看我的集群列表](#查看我的集群列表) - - [申请集群](#申请集群) - - [申请集群下线](#申请集群下线) - - [集群扩缩容](#集群扩缩容) - - [查看集群详情](#查看集群详情) - - [查看集群topic列表](#查看集群topic列表) - - [查看集群Broker列表](#查看集群broker列表) - - [查看集群topic的限流情况](#查看集群topic的限流情况) - - [监控告警](#监控告警) - - [告警列表](#告警列表) - - [新建告警规则](#新建告警规则) - - [查看该条规则的历史告警](#查看该条规则的历史告警) - - [告警规则屏蔽](#告警规则屏蔽) - - [运维管控](#运维管控) - - [集群列表](#集群列表) - - [新增集群](#新增集群) - - [修改集群](#修改集群) - - [暂停/开始集群监控](#暂停开始集群监控) - - [删除集群](#删除集群) - - [查看集群详情](#查看集群详情-1) - - [查看集群详情-集群概览](#查看集群详情-集群概览) - - [查看集群详情-Topic信息](#查看集群详情-topic信息) - - [查看集群详情-Broker信息](#查看集群详情-broker信息) - - [查看集群详情-Broker信息-Leader Rebalance](#查看集群详情-broker信息-leader-rebalance) - - [查看集群详情-Broker信息-Broker详情](#查看集群详情-broker信息-broker详情) - - [查看集群详情-消费组信息](#查看集群详情-消费组信息) - - [查看集群详情-Region信息](#查看集群详情-region信息) - - [查看集群详情-新增Region](#查看集群详情-新增region) - - [查看集群详情-编辑Region](#查看集群详情-编辑region) - - [查看集群详情-删除Region](#查看集群详情-删除region) - - [集群运维](#集群运维) - - [迁移任务](#迁移任务) - - [新建迁移任务](#新建迁移任务) - - [迁移任务详情](#迁移任务详情) - - [集群任务](#集群任务) - - [新建集群任务](#新建集群任务) - - [集群任务详情](#集群任务详情) - - [版本管理](#版本管理) - - [上传配置](#上传配置) - - [平台管理](#平台管理) - - [应用管理-查看应用列表](#应用管理-查看应用列表) - - [查看应用详情](#查看应用详情-1) - - [用户管理-用户列表](#用户管理-用户列表) - - [用户管理-编辑用户](#用户管理-编辑用户) - - [用户管理-添加用户](#用户管理-添加用户) - - [配置管理-配置列表](#配置管理-配置列表) - - [配置管理-编辑配置](#配置管理-编辑配置) - - [配置管理-删除配置](#配置管理-删除配置) - - [用户账单](#用户账单) - - [个人账单](#个人账单) - - [专家服务](#专家服务) - - [Topic分区热点](#topic分区热点) - - [数据迁移操作](#数据迁移操作) - - [迁移任务](#迁移任务-1) - - [Topic分区不足](#topic分区不足) - - [Topic资源治理](#topic资源治理) - - [Topic异常诊断](#topic异常诊断) - - -# 功能介绍 # -## 数据中心切换 ## -用户可点击右上方数据中心切换入口,下拉选择需要访问的数据中心,点击完成切换。 - -目前可选择的有:国内 - -![datacenter](./assets/datacenter.png) - -## 访问帮助中心 ## -用户可点击右上方帮助中心,在下拉框中,选择访问:产品介绍、QuickStart、常见问题、联系我们。 - -* 产品介绍:对平台各模块的功能做解释,并介绍相应操作。 - -* QuickStart:选择了部分重要的操作,以实例为例,介绍操作。如:发送、消费数据;申请App;创建Topic;申请集群权限。 - -* 常见问题:记录了对Kafka平台用户的答疑,可以在这里找到一些常见问题的解决方案。 - -* 联系我们:快捷进入Kafka用户群,可以找到Kafka平台的联系人。 - -![helpcenter](./assets/helpcenter.png) -## 个人中心 ## -鼠标悬停导航栏右上方的头像,即可访问用户个人中心。 - -可以实现查看我的申请、我的审批、账单管理和退出系统的操作。 - -![usercenter](./assets/usercenter.png) -### 工单查看-我的申请 ### -可以查看到由当前用户申请的工单,可执行撤回、查看详情等操作。 - -**操作步骤** - -步骤1:鼠标悬停导航栏右侧用户头像>“我的申请”。 - -步骤2:可以查看到,由当前用户发起申请的工单。工单的状态共有四种:待审批、已通过、已拒绝、已取消。 - -步骤3:点击对应的tab页,可快速查看:审批中、已通过、全部状态的工单。 - -步骤4:列表展示工单类型、工单ID、工单标题、申请原因、任务状态、操作。点击详情可以查看到工单详情,如由谁审批,当前进度等。 - -![myapplication](./assets/myapplication.png) - -### 工单查看-我的审批 ### - -可以查看到需要由当前用户审批的工单,并处理。 - -**操作步骤** - -步骤1:鼠标悬停导航栏右侧用户头像>“我的审批”。 - -步骤2:可以查看到,需要由当前用户审批的工单。 - -步骤3:点击对应的tab页,可快速查看:审批中、已通过、全部状态的工单。 - -步骤4:列表展示工单类型、工单ID、工单标题、申请原因、任务状态、操作。 - -![dealtask](./assets/dealtask.png) - -步骤5:点击详情,可以查看工单详情并进行操作。 - -审批操作有通过、驳回。 - -![ticketdetail](./assets/ticketdetail.png) - -### 账单管理 ### - -同时,“运维管控”模块也具备账单统筹查看的能力。可访问“运维管控”>"用户账单"。 - -**操作步骤** - -步骤1:鼠标悬停导航栏右侧用户头像>“账单管理”。 - -步骤2:可以看到以列表和图表的形式展示的账单数据。 - -![billdata](./assets/billdata.png) - -### 退出系统 ### - -步骤1:鼠标悬停导航栏右侧用户头像>点击“退出”。 - -## Topic管理 ## - -Topic管理面向普通用户,可提供Topic相关的各操作。 - -### 我的Topic ### -用户可以在“我的Topic”模块查看到其有权限的Topic列表。权限分为可管理、可发送、可消费。并且对于自己有权限的Topic,可以执行配额、分区申请;采样;重置消费offset等等操作(具体可见以下内容) -#### 查看活跃Topic列表 #### - -**操作步骤** - -步骤1:点击“Topic管理”>“我的Topic”>"活跃Topic"。 - -步骤2:点击【关联应用】、【集群】下拉框,可以筛选想要查看目标应用或集群相关的Topic;在【名称】输入框中输入关键字,可以搜索目标Topic。 - -步骤3:列表展示“Topic名称”、“Bytes in”、“Bytes out”、"所属集群"、“权限”、“关联应用”、“操作”等参数。 - -![mytopic](./assets/mytopic.png) - -步骤4:部分属性支持表头排序和筛选。例如“权限”,点击筛选按钮,可以快速选择对应的权限。 - - -#### 申请新增Topic #### - -**操作步骤** - -步骤1:点击“申请Topic”,进入申请Topic弹框。 - -步骤2:正确填写申请参数,提交申请工单。 - -Topic名称:允许数字、字母、下划线。 - -所属应用:选择该Topic属于哪个应用。 - -峰值流量:设置该Topic的流量上限值;运维人员也会根据该值分配Topic配额。 -点击“Kafka计价方式”,可跳转查看费用说明。 - -![topicapply](./assets/topicapply.png) - -#### 申请Topic配额 #### - -**操作步骤** - -步骤1:找到对应Topic,点击“申请配额”,弹出申请配额弹框。 - -步骤2:正确填写申请参数,点击确认,提交申请工单。 - -![applylocated](./assets/applylocated.png) - -#### 申请Topic分区 #### - -**操作步骤** - -步骤1:找到对应Topic,点击“申请分区”,弹出申请分区弹框。 - -分区数:填写需要的分区数。请根据实际情况填写。 - -备注:填写申请理由。 - -注意:如果topic已被限流,则直接申请配额即可,无须申请分区。 - -步骤2:正确填写申请参数,点击确认,提交申请工单。 - -![zoomapply](./assets/zoomapply.png) - -#### 申请Topic下线 #### - -对于有管理权限的Topic,可以申请下线该Topic. - -**操作步骤** - -步骤1:找到对应Topic,点击“申请下线”,弹出申请分区弹框。 - -步骤2:如果连接信息不为空,则表示该Topic当前有客户端在使用;如果下线该Topic可能会影响相关业务。 - -必须先至客户端侧关闭,再下线Topic。 - -步骤3:正确填写申请参数,点击确认,提交下线申请工单。 - -![topicoffline](./assets/topicoffline.png) - -#### 编辑Topic #### - -对于有管理权限的Topic,可以编辑该Topic. - -**操作步骤** - -步骤1:找到对应Topic,点击操作栏中“更多”>“编辑”,弹出编辑弹框。 - -步骤2:可以编辑备注内容,点击确认,完成编辑。 - -![edittopic](./assets/edittopic.png) - -#### 查看Topic基本信息 #### - -**操作步骤** - -步骤1:点击Topic名称即跳转进入详情页,可查看该topic的详情。 - -步骤2:点击“基本信息”可查看当前Topic的基本信息。见下图。 - -**专业名词介绍**: - -* 健康分:越高表明越健康,用户可以通过该指标评估该Topic所关联的broker的运行状况,是否有异常等。 - -* (物理)集群ID:Topic所关联等物理集群ID;当客户端使用该平台Topic时,可能需要知道集群ID。 - -* 服务地址(Bootstrap Severs):Topic所关联等物理集群的服务地址。 - -![topicbasicinfo](./assets/topicbasicinfo.png) - - -#### 采样 #### - -**操作步骤** - -步骤1:点击Topic名称即跳转进入详情页,可查看该topic的详情。 - -步骤2:点击【采样按钮】,可以通过采样了解该Topic的数据样例。 - - -#### 查看Topic状态图 #### - -**操作步骤** - -步骤1:点击Topic名称即跳转进入详情页,可查看该topic的详情。 - -步骤2:点击“状态图”即可查看当前Topic的各指标图表。 - -步骤3:可查看历史流量和历史耗时信息相关的各指标。点击“指标说明”可跳转查看各指标的含义说明。 - -步骤4:支持下拉切换指标查看,并可自定义选择时间段。 - -![topictable](./assets/topictable.png) - - -#### 查看Topic与客户端的关联信息 #### - -**操作步骤** - -步骤1:点击Topic名称即跳转进入详情页。 - -步骤2:点击“连接信息”即可查看当前Topic,与哪些客户端有关联。 - -步骤3:列表展示“AppID”-客户端使用的App、“主机名”-客户端运行的机器、“客户端版本”、“客户端类型”。部分字段支持表头筛选。 - -![topicinfoconnect](./assets/topicinfoconnect.png) - -#### 查看Topic消费组信息 #### - -消费组不需要申请,由系统自动生成。当客户端正常消费时,自动生成消费组。 - -**操作步骤** - -步骤1:点击Topic名称即跳转进入详情页。 - -步骤2:点击“消费组信息”即可查看当前Topic有哪些消费组。列表展示“消费组名称”、“AppID”、“Location”(broker或zookeeper)。部分字段支持表头筛选。 - -![topicinfoconsumer](./assets/topicinfoconsumer.png) - -步骤3:点击消费组名称可查看该消费组的信息,如"Partition ID"、“Consumer ID”、“Consume Offsset”、“Partition Offsset”、“Lag”. - - **专业名词解释** - -Lag:表示该消费客户端是否有堆积;等于 partition offset-consume offset。 - -![topicconsumerinfo](./assets/topicconsumerinfo.png) - -#### 重置Topic的消费offset(偏移) #### - -**操作步骤** - -步骤1:在上一步的基础上,点击【重置offset】按钮,出现重置消费offset(偏移)的弹框。 - -步骤2:需注意,重置消费offset之前,需要关闭消费客户端,并等待几分钟后再执行重置操作。否则会操作失败。 - -步骤3:可在弹框内选择重置到最新的offset,或某个时间的offset; 或重置至指定的分区伤的某个offset。点击重置,操作成功。 - -![consumeroffset](./assets/consumeroffset.png) - -#### 查看Topic分区信息 #### - -**操作步骤** - -步骤1:点击Topic名称即跳转进入详情页。 - -步骤2:点击“分区信息”即可查看当前Topic各分区的信息。 - -步骤3:列表展示“分区ID”、“BeginingOffset”、“EndOffset”、“MsgNum”、“Leader Broker”、“LogSize”、“优选副本”、“AR”、“ISR”、“是否同步”。部分字段支持表头筛选。 - -![topicpartition](./assets/topicpartition.png) - -#### 查看Topic的Broker信息 #### - -**操作步骤** - -步骤1:点击Topic名称即跳转进入详情页。 - -步骤2:点击“Broker信息”即可查看当前Topic的broker信息。 - -步骤3:列表展示“BrokerID”、“Host”、“Leader个数”、“分区LeaderID”、“分区个数”、“分区ID”。部分字段支持表头筛选及排序。 - -#### 查看Topic关联的应用信息 #### - -**操作步骤** - -步骤1:点击Topic名称即跳转进入详情页。 - -步骤2:点击“应用信息”即可查看当前Topic的应用信息。 - -步骤3:列表展示“应用ID”、“应用名称”、“负责人”、“生产配额”、“分区个数”、“分区ID”。部分字段支持表头筛选及排序。 - - -![topicapp](./assets/topicapp.png) - -#### 查看Topic的账单信息 #### - -**操作步骤** - -步骤1:点击Topic名称即跳转进入详情页。 - -步骤2:点击“账单信息”即可查看当前Topic的账单信息,以图表的形式展示数据走向。 - -![topicaccount](./assets/topicaccount.png) - -### 查看broker的详细信息 ### - -#### 查看broker的基础及监控信息 #### -**操作步骤** - -步骤1:在topic的详情页,点击BrokerID,即可看到Broker的详细信息。 - -![brokerinfo](./assets/brokerinfo.png) - -![brokerinfolis](./assets/brokerinfolist.png) - -步骤2:点击“监控信息”即可查看broker的一些指标图表,以图表的形式展示数据走向。 - -![brokertable](./assets/brokertable.png) - -#### 查看broker的关联topic信息 #### -**操作步骤** - -步骤1:在topic的详情页,点击BrokerID,即可看到Broker的详细信息。 - -步骤2:点击“Topic信息”即可查看与该broker相关的Topic列表。 - -![brokertopic](./assets/brokertopic.png) - - -#### 查看broker的磁盘信息 #### -**操作步骤** - -步骤1:在topic的详情页,点击BrokerID,即可看到Broker的详细信息。 - -步骤2:点击“磁盘信息”即可查看与该broker相关的磁盘列表。 - -![brokerrask](./assets/brokerrask.png) - -#### 查看broker的partition信息 #### -**操作步骤** - -步骤1:在topic的详情页,点击BrokerID,即可看到Broker的详细信息。 - -步骤2:点击“partition信息”即可查看与该broker相关的partition列表。 - -![brokerpartition](./assets/brokerpartition.png) - - -#### 查看broker的Topic分析情况 #### -**操作步骤** - -步骤1:在topic的详情页,点击BrokerID,即可看到Broker的详细信息。 - -步骤2:点击“Topic分析”即可查看其使用的Topic情况。 - -![brokertopicana](./assets/brokertopicana.png) - -#### 已过期Topic #### -**操作步骤** - -步骤1:在“我的topic”模块,点击“已过期Topic”可以查看到与自己相关、但已过期的Topic。 - -步骤2:可以通过筛选项,筛选目标Topic。 - -![expiredtopic](./assets/expiredtopic.png) - -### 全部Topic ### -“全部Topic”模块以列表的形式展示所有的Topic,用户可以在这里申请目标Topic的相关权限。 -#### 查看全部Topic列表 #### -**操作步骤** - -步骤1:点击“Topic管理”>“全部Topic”. 列表展示全部Topic,“Topic名称”、“所属集群”、“Topic描述”、“负责人”。 - -步骤2:可以通过筛选项,筛选目标Topic。 - -步骤3:点击Topic名称,可进入详情页。详情内容与查看方式与“我的Topic”模块一致。 - -![expiredtopic](./assets/expiredtopic.png) - -#### 申请Topic权限 #### -**操作步骤** - -步骤1:点击“Topic管理”>“全部Topic”. 对于想申请/增加权限的Topic,点击“申请权限”,出现申请权限弹框。 - -步骤2:正确填写参数,选择需要申请权限的应用,并选择权限类型,点击确认提交工单。 - - -![applytopicright](./assets/applytopicright.png) - -#### 申请扩Topic的配额 #### -**操作步骤** - -步骤1:如果用户当前已有该topic的权限点击Topic名称,进入详情页。详情内容与查看方式与“我的Topic”模块一致。 - -步骤2: - -![applytopicright](./assets/applytopicright.png) -### 应用管理 ### -应用管理可以查看到当前用户关联到应用列表。 - -**操作步骤** - -步骤1:点击“Topic管理”>"应用管理"进入应用管理列表页。 - -步骤2:列表展示“APPID”、“应用名称”、“应用描述”、“负责人”、“操作”。 - -![appmanager](./assets/appmanager.png) - -#### 申请应用 #### - -平台的权限申请、扩分区等操作,均需要先用应用去申请操作的权限。如果用户没有应用,需要先绑定或申请应用。 - -**操作步骤** - -步骤1:点击列表右上方的“申请应用”,打开应用申请弹框。 - -步骤2:需要填写应用名称、负责人(至少选择两人)、应用描述(阐述应用相关用途及申请原因)。填写完成后,点击确认提交申请。 - -![applyapp](./assets/applyapp.png) - -#### 修改应用 #### - -**操作步骤** - -步骤1:点击操作中的"修改",打开修改弹窗。 - -步骤2:可以修改应用名称 -负责人、应用描述。 - - -步骤3:点击【确定】,操作成功。 - -![editapp](./assets/editapp.png) - -#### 申请下线应用 #### - -**操作步骤** - -步骤1:点击操作中的"申请下线",打开下线弹窗。 - -步骤2:弹框内展示该app目前正在使用的的主机信息,需要先关闭对应主机的Kafka发送/消费客户端才能下线。 - -![appoffline](./assets/appoffline.png) - -#### 查看应用详情 #### - -**操作步骤** - -步骤1:点击应用名称,可以查看应用详情。 - -步骤2:点击【创建的Topic】可以查看到该应用创建的Topic列表。 - -![apptopic](./assets/apptopic.png) - -步骤3:点击【有权限的Topic】,可以查看该应用有权限的Topic列表。 - -#### 取消应用对topic的权限 #### - -**操作步骤** - -步骤1:点击【取消权限】,打开取消权限的弹框。 - -步骤2:选择想取消的权限,点击确认,可以取消应用与该topic的权限关系。 - -![cancelright](./assets/cancelright.png) - -## 集群管理 ## -### 我的集群 ### -#### 查看我的集群列表 #### -**操作步骤** - -步骤1:点击“集群管理”>"我的集群",查看与我相关的集群列表。 - -步骤2:选择想取消的权限,点击确认,可以取消应用与该topic的权限关系。 - -![cancelright](./assets/cancelright.png) - -#### 申请集群 #### -**操作步骤** - -步骤1:点击“集群管理”>"我的集群">【申请集群】,打开申请集群弹框。 - -步骤2:数据中心:默认为当前所处的数据中心 - -所属应用:选择在哪个应用下创建集群。 - -集群类型:选择创建的集群为“独享”还是“独立”。 - -* 独享集群意味着, 您拥有一个集群中,部分broker的使用权限。 - -* 独立集群意味着,您独自拥有一个 - 物理集群; - -* 共享集群意味着,大家共用一个集群及其中broker。 - -峰值流量:选择峰值流量限制。并支持自定义输入。 - -申请原因:填写申请集群的原因,用途,稳定性要求等等。 - -步骤3:填写完成后,点击提交即可提交申请。 - -备注说明:集群创建后,还需在此基础上创建region、逻辑集群。具体操作可参照 [集群接入手册](https://github.com/didi/Logi-KafkaManager/blob/master/docs/user_guide/add_cluster/add_cluster.md) - - -![applycluster](./assets/applycluster.png) - -#### 申请集群下线 #### - -对于自己独享的集群,可以申请下线。 - -**操作步骤** - -步骤1:点击“集群管理”>"我的集群",查看与我相关的集群列表。 - -步骤2:点击【申请下线】,打开申请下线弹框。 - -步骤2:弹框内展示是否有与该集群相关的活跃topic,如果有,则无法下线该集群。需要先下线相应topic。 - -![offlinecluster](./assets/offlinecluster.png) - -#### 集群扩缩容 #### - -对于自己独享的集群,可以申请扩缩容。 - -**操作步骤** - -步骤1:点击“集群管理”>"我的集群",查看与我相关的集群列表。 - -步骤2:点击【扩缩容】,打开申请扩缩容的弹框。 - -步骤2:下拉选择申请类型:扩容,或缩容 - -申请原因:填写申请扩缩容的原因。 - -点击确认提交。 - -![editzroom](./assets/editroom.png) - -#### 查看集群详情 #### - -**操作步骤** - -步骤1:点击集群名称,进入集群详情页。 - -步骤2:查看集群的基本信息:集群名称、集群类型、集群名称:逻辑集群名、集群类型:逻辑集群类型, 0:共享集群, 1:独享集群, 2:独立集群 、接入时间、物理集群Id、Kafka版、Bootstrap Severs、Kafka版本:集群版本、Bootstrap Severs:集群服务地址。以及实时流量列表、历史流量图表。 - -![clusterdetail](./assets/clusterdetail.png) - - -#### 查看集群topic列表 #### - -**操作步骤** - -步骤1:点击集群名称,进入集群详情页。 - -步骤2:点击“Topic信息”,查看集群相关的topic列表。展示Topic名称、QPS、Bytes In(KB/s)、所属应用、保存时间(h)、更新时间、Topic说明。 - -![clustertopic](./assets/clustertopic.png) - -步骤3:点击Topic名称可跳转至Topic的详情页。 - -#### 查看集群Broker列表 #### - -**操作步骤** - -步骤1:点击集群名称,进入集群详情页。 - -步骤2:点击“Broker信息”,查看集群相关的Broker列表。展示ID、主机、Prot、JMX Port、启动时间、Byte In(MB/s)、Byte Out(MB/s)、副本状态、状态. - -![clusterbroker](./assets/clusterbroker.png) - -#### 查看集群topic的限流情况 #### - -**操作步骤** - -步骤1:点击集群名称,进入集群详情页。 - -步骤2:点击“限流信息”,展示Topic名称、应用ID、类型、Broker、类型:客户端类型[Produce|Fetch]、Broker:BrokerId列表。 - -![limit](./assets/limit.png) - -## 监控告警 ## -### 告警列表 ### - -**操作步骤** - -步骤1:以列表的形式展示每个告警规则的告警情况。告警名称、应用名称、操作人、创建时间、操作。 -步骤2:可通过下拉筛选想查看的应用,或输入关键字匹配告警。 - -![limit](./assets/limit.png) - -### 新建告警规则 ### - -**操作步骤** - -步骤1:点击“新建规则”,进入新建告警规则页面。 -![createalarmrule](./assets/createalarmrule.png) -步骤2:填写告警基本信息:规则名称 - -选择指标:可以选择对部分集群、部分topic、部分消费组生效。 - -报警策略:可以选择周期、连续发生、同比变化率、突增突降值、突增突降率、求和等计算方式。 - -![strategy](./assets/strategy.png) - -* 如以下图为例,表示在最近连续3个周期内,如有两次指标值大于等于阈值:90%,则触发告警。 - -![alarmruleex](./assets/alarmruleex.png) - - -步骤3:生效时间:选择告警规则生效的时间段。 - -如下图,则表示在每周一至周日的凌晨-晚23点,如果产生了告警,都会进行上报。 - -![alarmruletime](./assets/alarmruletime.png) - -步骤4:配置发送信息:告警通知时间段。 - -选择指标:可以选择对部分集群、部分topic、部分消费组生效。 - - -步骤5:配置发送信息: - -报警级别:设置告警级别。 - -报警周期(分钟):设置告警通知的周期。 - -周期内报警次数:设置在一个周期内,最多产生多少条告警。 - -报警接受组:设置报警通知的群组。 - -回调地址: 指对于该告警如果有相应处理措施,则可以通过回调该地址执行。 - -* 如下图,则表示本条规则的产生的告警为3级告警,以4分钟为一个周期进行通知,并且一个周期内,最多通知2次。通知对象为pborder群组内的联系人。 - -![alarmrulesent](./assets/alarmrulesent.png) - -步骤6:点击【提交】,完成规则的设置。 - -### 查看该条规则的历史告警 ### - -**操作步骤** - -步骤1:点击规则名称,进入详情页。点击“告警历史”tab页。查看相关历史告警。 - -步骤2:列表展示监控名称、开始时间、结束时间、状态、监控级别。 - -步骤3:可通过点击快捷时间筛选,来选择查看近三天,或近一周的告警情况。 - -![alarmhistory](./assets/alarmhistory.png) - -### 告警规则屏蔽 ### - -**操作步骤** - -步骤1:点击规则名称,进入详情页。点击“屏蔽历史”tab页,查看屏蔽历史。 - -步骤2:列表展示监控名称、开始时间、结束时间、备注、操作。 - -![alarmruleforbiddenhistory](./assets/alarmruleforbiddenhistory.png) - -## 运维管控 ## -### 集群列表 ### -**操作步骤** - -步骤1:点击"运维管控">“集群列表”,以列表的形式向运维侧人员展示集群信息。 - -步骤2:列表展示集群ID、集群名称、Topic数、Broker数、Consumer数、Region数、ControllerID、是否监控、操作。 - -步骤3:可通过列表左上方的搜索框,输入集群名称关键字,搜索对应的集群。 - -![opcluster](./assets/opcluster.png) - -#### 新增集群 #### -**操作步骤** - -步骤1:点击集群列表右上方的【注册集群】,打开集群申请弹框。 - -步骤2:集群名称:设置创建的集群名称。 - -Zookeeper地址:输入Zookeeper地址。 - -bootstrapServers地址:输入bootstrapServers地址。 - -数据中心:默认为当前登录的数据中心。 - -集群类型:设置集群的类型。可选择共享集群、独立集群、独享集群。**(各集群对应的含义可见2.1.2 申请集群)**。 - -安全协议:非必填,请输入安全协议。 - -步骤3:点击【确认】,提交申请单。 - -![opapplycluster](./assets/opapplycluster.png) - -#### 修改集群 #### -**操作步骤** - -步骤1:点击集群列表“操作”栏中【修改】,打开修改弹框。 - -步骤2:仅可修改集群类型和安全协议两项。 - -步骤3:点击【确认】,完成修改。 - -![editcluster](./assets/editcluster.png) - -#### 暂停/开始集群监控 #### -**操作步骤** - -步骤1:点击集群列表“操作”栏中【暂停监控】。 - -步骤2:在气泡确认框中点击确认,可暂停对该集群对监控。 - -![stopclustermo](./assets/stopclustermo.png) - -步骤3:当集群监控停止后,可以通过点击集群列表“操作”栏中【开始监控】,打开对集群的监控。 - -![startclustermo](./assets/startclustermo.png) - -#### 删除集群 #### -**操作步骤** - -步骤1:点击集群列表“操作”栏中【删除】。 - -步骤2:在二次确认弹框中,会显示该物理集群上是否有逻辑集群。如果有,则无法直接删除,需要先将逻辑集群删除后,再删除该物理集群。 - -![deletcluster](./assets/deletcluster.png) - -步骤3:在集群的详情页,选择“逻辑集群信息”tab页,删除逻辑集群。完成后即可进行物理集群删除操作。 - -![logicclusterdele](./assets/logicclusterdele.png) - -#### 查看集群详情 #### - -集群详情包括:集群概览、Topic信息、Broker信息、消费组信息、Region信息、逻辑集群信息、Controller变更历史、限流信息。 - -##### 查看集群详情-集群概览 ##### - -**操作步骤** - -步骤1:点击集群名称,进入详情页。 - -步骤2:点击“集群概览”,查看集群基本信息:集群名称、集群类型、接入时间、kafka版本、Bootstrap Severs、Zookeeper。 - -以及实时流量、历史流量数据的图表。 - -![clusterinfobrief](./assets/clusterinfobrief.png) - -步骤3:点击【指标说明】可以查看所有指标的含义。 - -##### 查看集群详情-Topic信息 ##### - -**操作步骤** - -步骤1:点击集群名称,进入详情页。 - -步骤2:点击“Topic信息”,查看Topic列表。并展示字段:Topic名称、QPS、Bytesin、所属应用、保存时间、更新时间、Topic说明、操作。 - -部分列支持表头排序或筛选。 - -步骤3:点击Topic名称可以查看该Topic详情;并可以对其进行编辑、扩分区、删除操作。 - -![clustertopicop](./assets/clustertopicop.png) - -##### 查看集群详情-Broker信息 ##### - -**操作步骤** - -步骤1:点击集群名称,进入详情页。 - -步骤2:点击“Broker信息”,显示各Broker峰值使用率情况,和副本同步的情况,以饼图的形式展示分布。 - -查看Broker列表。并展示字段:ID、主机、Port、JMX Port、启动时间、Bytes in、Bytes out、峰值状态、副本状态、regionName、状态、操作。 -部分列支持表头排序或筛选。 - -![clusterbrokerop](./assets/clusterbrokerop.png) - -##### 查看集群详情-Broker信息-Leader Rebalance ##### - -**操作步骤** - -步骤1:点击集群名称,进入详情页。 - -步骤2:在broker列表上方点击【Leader Rebalance】,打开弹框。 - -步骤3:集群名称默认为当前集群,下拉选择要执行的Broker。点击确认,完成操作。 - -![LeaderRebalance](./assets/LeaderRebalance.png) - -##### 查看集群详情-Broker信息-Broker详情 ##### - -**操作步骤** - -步骤1:点击集群名称,进入详情页。点击Broker信息tab页。 - -步骤2:点击ID名称,可跳转至Broker详情页。 - -步骤3:可查看基本信息、监控信息、Topic信息、磁盘信息、partition信息、Topic分析。具体操作介绍可见“1.2 查看broker的详细信息” - -![clusterbrokerdetail](./assets/clusterbrokerdetail.png) - -![clusterbrokermo](./assets/clusterbrokermo.png) - -![brokerraskop](./assets/brokerraskop.png) - -![brokerpartitionop](./assets/brokerpartitionop.png) - -![brokertopicana](./assets/brokertopicana.png) - -##### 查看集群详情-消费组信息 ##### - -**操作步骤** - -步骤1:点击集群名称,进入详情页。 - -步骤2:点击“消费组信息”,查看消费组列表。 - -步骤3:可查看消费组名称、location、操作。点击详情,可查看消费的Topic有哪些。 - -![consumergroup](./assets/consumergroup.png) - -![consumertopic](./assets/consumertopic.png) - - -##### 查看集群详情-Region信息 ##### - -**操作步骤** - -步骤1:点击集群名称,进入详情页。 - -步骤2:点击“Region信息”,查看Region列表。 - -步骤3:可查看regionID、region名称、BrokerIdList、location、预估容量、实际流量、预估流量、修改时间、状态、备注、操作。 - -![brokerregion](./assets/brokerregion.png) - -##### 查看集群详情-新增Region ##### - -**操作步骤** - -步骤1:点击“新增Region”,打开新增region弹框。 - -步骤2:region名称:填写region名称。 - -集群名称:默认为当前集群名称。 - -Broker列表:选择broker。 - -状态:可选择正常、容量已满。 - -备注:可输入申请的原因。 - -![createregion](./assets/createregion.png) - -##### 查看集群详情-编辑Region ##### - -**操作步骤** - -步骤1:点击操作栏中的“编辑”,打开编辑region弹框。 - -步骤2:可编辑region名称、Broker列表、状态、备注。 - -![editregion](./assets/editregion.png) - -##### 查看集群详情-删除Region ##### - -**操作步骤** - -步骤1:点击操作栏中的“删除”,出现二次确认气泡框。 - -步骤2:点击【确认】即可完成删除。 - -![deleteregion](./assets/deleteregion.png) - -### 集群运维 ### -#### 迁移任务 ### - -**操作步骤** - -步骤1:点击“运维管控”>"集群运维">"迁移任务",可查看迁移任务列表 - -步骤2:展示迁移任务名称、创建时间、创建人、Topic数量、任务状态、进度、操作 - -![migrationtask](./assets/migrationtask.png) - -#### 新建迁移任务 ### - -**操作步骤** - -步骤1:点击“运维管控”>"集群运维">"迁移任务",可查看迁移任务列表 - -步骤2:点击【新建迁移任务】打开新建任务的弹框。 - -集群名称:下拉选择目标集群。 - -Topic名称:选择目标集群上的topic。 - -类型:选择region或者broker。 - -分区ID: - -计划开始时间:设置迁移的计划开始时间。 - -原本保存时间:默认24小时。 - -迁移保存时间: - -等等。点击【确认】,完成任务的创建。 -![createtask](./assets/createtask.png) - -#### 迁移任务详情 ### - -**操作步骤** - -步骤1:点击任务名称,可查看任务详情。 - -步骤2:展示了任务名称、创建时间、创建人、计划开始时间、完成时间、任务说明。以及涉及topic列表。 - -![migrationtaskdetail](./assets/migrationtaskdetail.png) - -#### 集群任务 ### - -**操作步骤** - -步骤1:点击“运维管控”>"集群运维">"集群任务",可查看集群任务列表 - -步骤2:展示迁移任务名称、创建时间、创建人、Topic数量、任务状态、进度、操作 - -![clustertask](./assets/clustertask.png) - -#### 新建集群任务 ### - -**操作步骤** - -步骤1:点击“运维管控”>"集群运维">"集群任务",可查看迁移任务列表 - -步骤2:点击【新建集群任务】打开新建集群任务的弹框。 - -集群名称:下拉选择目标集群。 - -任务类型:选择集群升级(按角色)、集群升级(按主机)、集群部署、集群回滚、集群扩容。 - -包版本:选择操作的包版本。 - -server配置:下拉选择server配置。 - -等等(根据以上选择情况的不同,会有不同参数的选择) - -主机列表:输入或粘贴主机列表,以回车键分割。 - -点击【确认】,完成任务的创建。 -![createclustertask](./assets/createclustertask.png) - -#### 集群任务详情 ### - -**操作步骤** - -步骤1:点击【详情】,可查看任务详情。 - -步骤2:展示了任务ID、集群ID、集群名称、创建时间、Kafka包、kafka包 MD5、操作人、server配置名、server配置 MD5。 - -以及升级主机列表、升级主机暂停点。 - -![clustertaskdetail](./assets/clustertaskdetail.png) - - -步骤3:点击【任务进度详情】tab,可切换查看任务进度详情。 - -![taskprogress](./assets/taskprogress.png) - -步骤4:点击列表内的【查看日志】,可查看该任务的详细日志。 - -![tasklog](./assets/tasklog.png) - -#### 版本管理 ### - -**操作步骤** - -步骤1:点击“运维管控”>"集群运维">"版本管理",可查看当前的版本列表。 - -步骤2:展示ID、集群名称、配置类型、文件名称、MD5、更新时间、更新人、备注、操作。 - -步骤3:对于配置类文件,点击“文件名称”列,可以查看到该配置的详情。 - - 集群名称是 * - -![Versionmanagement](./assets/Versionmanagement.png) - -#### 上传配置 ### - -**操作步骤** - -步骤1:点击【上传配置】,打开上传配置弹框。 - -步骤2:文件类型:选择上传的文件为Kafka压缩包,或KafkaServer配置。 - -上传:选择需上传的文件。 - -备注:输入备注信息,如输入上传的原因等。 - -步骤3:点击【确认】,完成上传。 - -![uploadversion](./assets/uploadversion.png) -### 平台管理 ### - -#### 应用管理-查看应用列表 #### -**操作步骤** - -步骤1:点击"运维管控">"平台管理">"应用管理"。 - -步骤2:显示平台应用列表,展示AppID、应用名称、应用描述、负责人、操作(修改、详情查看、申请下线)。 - -![appmanager](./assets/appmanager.png) - -#### 查看应用详情 #### -**操作步骤** - -步骤1:点击"运维管控">"平台管理">"应用管理"。 - -步骤2:显示平台应用列表,点击AppID名称,进入App详情页。 - -![appdetailop](./assets/appdetailop.png) -步骤3:点击【创建的Topic】可以查看到该应用创建的Topic列表。 - -步骤4:点击【有权限的Topic】,可以查看该应用有权限的Topic列表。 - -修改、申请下线等操作与用户侧-应用管理一致,可参见“1.4 应用管理” - - -#### 用户管理-用户列表 #### -**操作步骤** - -步骤1:点击"运维管控">"平台管理">"用户管理"。 - -步骤2:显示平台用户列表,展示用户名、角色权限、操作(修改、删除)。 - -![usersmanager](./assets/usersmanager.png) - -#### 用户管理-编辑用户 #### -**操作步骤** -步骤1:点击操作栏内的【修改】,打开修改弹窗。 - -步骤2:可以修改用户名、用户角色。 - -![edituser](./assets/edituser.png) - -#### 用户管理-添加用户 #### -**操作步骤** -步骤1:点击列表上方的【添加用户】,打开添加用户弹框。 - -步骤2:输入用户名称、选择用户角色,即可添加用户。 - -![createusers](./assets/createusers.png) - -#### 配置管理-配置列表 #### -**操作步骤** -步骤1:点击"运维管控">"平台管理">"配置管理"。 - -步骤2:列表展示配置键、配置值、修改时间、描述信息、操作(修改、删除)。部分列支持表头筛选和排序。 - -![configuremanager](./assets/configuremanager.png) - -#### 配置管理-编辑配置 #### - -**操作步骤** -步骤1:点击"运维管控">"平台管理">"配置管理"。 - -步骤2:点击列表操作栏中的【修改】按钮,打开修改弹框。可以修改配置键、配置值、备注信息。 - -![editconfigure](./assets/editconfigure.png) - -#### 配置管理-删除配置 #### - -**操作步骤** -步骤1:点击"运维管控">"平台管理">"配置管理"。 - -步骤2:点击列表操作栏中的【修改】按钮,打开修改弹框。可以修改配置键、配置值、备注信息。 - -![deleteconfigure](./assets/deleteconfigure.png) - -### 用户账单 ### - -#### 个人账单 #### - -**操作步骤** -步骤1:点击"运维管控">"账单管理">"个人账单"。 - -步骤2:展示个人账单列表,显示时间(精确到月)、用户名、Topic数量、时间、Quota、金额情况。 - -步骤3:可根据时间、用户名筛选查看账单。 - -![userbill](./assets/userbill.png) - -## 专家服务 ## - -### Topic分区热点 ### - -**操作步骤** -步骤1:点击"专家服务">"Topic分区热点">"分区热点Topic"。 - -步骤2:展示所有Topic列表,并可以根据物理集群、Topic名称进行筛选。 - -列表显示Topic名称、所在集群、分区热点状态(鼠标悬停可以查看具体的说明)、操作(数据迁移)。 - -### 数据迁移操作 ### - -**操作步骤** -步骤1:点击"专家服务">"Topic分区热点">"分区热点Topic"。 - -步骤2:可选择单个topic、或批量选中多个topic进行数据迁移操作。如图。 - -![hotpointtopic](./assets/hotpointtopic.png) - -步骤2:点击【数据迁移】之后出现迁移任务填写抽屉式弹框, - -![migrationtaskset](./assets/migrationtaskset.png) - -#### 迁移任务 #### - -**操作步骤** -步骤1:点击"专家服务">"Topic分区热点">"迁移任务"。 - -步骤2:可查看迁移任务列表,展示迁移任务名称、创建时间、创建人、Topic数量、任务状态、进度、操作。 - -![migrationtasklist](./assets/migrationtasklist.png) - -步骤2:点击任务名称可以查看到任务详情。 - -![migrationtaskdetail](./assets/migrationtaskdetail.png) - -#### Topic分区不足 #### - -**操作步骤** -步骤1:点击"专家服务">"Topic分区不足" - -步骤2:可查看Topic名称,所在集群、分区个数、分区平均流量、近三天峰值流量、操作。 - -步骤3:可通过选择集群、Topic名称查找目标Topic。 - -![topicunenough](./assets/topicunenough.png) - -#### Topic资源治理 #### - -**操作步骤** -步骤1:点击"专家服务">"Topic资源治理" - -步骤2:可查看Topic名称,所在集群、过期天数、发送连接、消费连接、创建人、状态、操作(通知用户)。 - -步骤3:可通过选择集群、Topic名称查找目标Topic。 - -步骤4:点击通知用户可以通知用户该Topic已过期。 - -![topicresource](./assets/topicresource.png) - -#### Topic异常诊断 #### - -**操作步骤** -步骤1:点击"专家服务">"异常诊断"。 - -步骤2:可查看Topic名称,所在独享集群、IOPS、流量。 - -步骤3:可通过时间筛选,查看时间范围内的异常topic。 - -![errordiagnosis](./assets/errordiagnosis.png) - diff --git a/docs/user_guide/新旧对比手册.md b/docs/user_guide/新旧对比手册.md new file mode 100644 index 00000000..55829033 --- /dev/null +++ b/docs/user_guide/新旧对比手册.md @@ -0,0 +1,92 @@ +## 4.2、新旧版本对比 + +### 4.2.1、全新的设计理念 + +- 在 0 侵入、0 门槛的前提下提供直观 GUI 用于管理和观测 Apache Kafka®,帮助用户降低 Kafka CLI 操作门槛,轻松实现对原生 Kafka 集群的可管、可见、可掌控,提升 Kafka 使用体验和降低管理成本。 +- 支持海量集群一键接入,无需任何改造,即可实现集群深度纳管,真正的 0 侵入、插件化系统设计,覆盖 0.10.x-3.x.x 众多 Kafka 版本无缝纳管。 + +### 4.2.2、产品名称&LOGO + +- 4.2.2.1、Know Streaming V3.0 + + - 名称:Know Streaming + - Logo:![text](http://img-ys011.didistatic.com/static/dc2img/do1_Y95tuWtZug7kcmAG1JiN) + +- 4.2.2.2、Logi-KM V2.x + + - 名称:Logi-KM + - Logo:![text](http://img-ys011.didistatic.com/static/dc2img/do1_C2DmjkMfqkZFkrMJeYkr) + +### 4.2.3、功能架构 + +- 4.2.3.1、Know Streaming V3.0 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_VQD9ke5jewpjCIWamUKV) + +- 4.2.3.2、Logi-KM V2.x + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_F211q5lVCXQCXQNzWalu) + +### 4.2.4、功能变更 + +- 多集群管理 + + - 增加健康监测体系、关键组件&指标 GUI 展示 + - 增加 2.8.x 以上 Kafka 集群接入,覆盖 0.10.x-3.x + - 删除逻辑集群、共享集群、Region 概念 + +- Cluster 管理 + + - 增加集群概览信息、集群配置变更记录 + - 增加 Cluster 健康分,健康检查规则支持自定义配置 + - 增加 Cluster 关键指标统计和 GUI 展示,支持自定义配置 + - 增加 Cluster 层 I/O、Disk 的 Load Reblance 功能,支持定时均衡任务(企业版) + - 删除限流、鉴权功能 + - 删除 APPID 概念 + +- Broker 管理 + + - 增加 Broker 健康分 + - 增加 Broker 关键指标统计和 GUI 展示,支持自定义配置 + - 增加 Broker 参数配置功能,需重启生效 + - 增加 Controller 变更记录 + - 增加 Broker Datalogs 记录 + - 删除 Leader Rebalance 功能 + - 删除 Broker 优先副本选举 + +- Topic 管理 + + - 增加 Topic 健康分 + - 增加 Topic 关键指标统计和 GUI 展示,支持自定义配置 + - 增加 Topic 参数配置功能,可实时生效 + - 增加 Topic 批量迁移、Topic 批量扩缩副本功能 + - 增加查看系统 Topic 功能 + - 优化 Partition 分布的 GUI 展示 + - 优化 Topic Message 数据采样 + - 删除 Topic 过期概念 + - 删除 Topic 申请配额功能 + +- Consumer 管理 + + - 优化了 ConsumerGroup 展示形式,增加 Consumer Lag 的 GUI 展示 + +- ACL 管理 + + - 增加原生 ACL GUI 配置功能,可配置生产、消费、自定义多种组合权限 + - 增加 KafkaUser 功能,可自定义新增 KafkaUser + +- 消息测试(企业版) + + - 增加生产者消息模拟器,支持 Data、Flow、Header、Options 自定义配置(企业版) + - 增加消费者消息模拟器,支持 Data、Flow、Header、Options 自定义配置(企业版) + +- Job + + - 优化 Job 模块,支持任务进度管理 + +- 系统管理 + + - 优化用户、角色管理体系,支持自定义角色配置页面及操作权限 + - 优化审计日志信息 + - 删除多租户体系 + - 删除工单流程 \ No newline at end of file diff --git a/docs/user_guide/用户使用手册.md b/docs/user_guide/用户使用手册.md new file mode 100644 index 00000000..189b5869 --- /dev/null +++ b/docs/user_guide/用户使用手册.md @@ -0,0 +1,858 @@ + +## 5.1、简介 + +Know Streaming 脱胎于众多互联网内部多年的 Kafka 运营实践经验,是面向 Kafka 用户、Kafka 运维人员打造的共享多租户 Kafka 管控平台。不会对 Apache Kafka 做侵入性改造,就可纳管 0.10.x-3.x 集群版本,帮助您提升集群管理水平;我们屏蔽了 Kafka 的复杂性,让普通运维人员都能成为 Kafka 专家。 + +## 5.2、产品功能架构 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_kBMhwsnLwh5zdFWemHYk) + +## 5.3、产品主要特点介绍 + +### 5.3.1、Kafka 命令行操作转化为 GUI,大大提高易用性 + +- 更多版本适配。支持 0.10.x-3.x.x 众多主流 Kafka 版本统一纳管 + +- 更多组件纳管。支持 Cluster、Broker、Topic、Message、Consumer、ALC 等组件 GUI 管理 + +- 更少成本投入。支持存量集群一键接入,无需任何改造,即可实现集群深度纳管,真正的 0 侵入、插件化系统设计 + +### 5.3.2、指标监控及可视化 + +- 集群配置可视化。通过提供管理界面,便于用户集中查看和控制集群参数配置、Topic、Message... + +- 核心指标趋势分析。通过实时采集集群关键指标,提供直观图表便于用户订阅跟踪集群健康度 + +- 监控告警生态集成。通过基于专家规则和算法的智能警报,确保集群可用性,保持集群平稳运行 + +### 5.3.3、高频的问题和操作沉淀形成特有的专家服务 + +- 分区迁移&扩容。通过提供 Topic 热点迁移和弹性扩容,有效提升集群可用性和稳定性 + +- 集群资源治理。通过分析 Topic 流量和请求,对无效 Topic 定期跟踪,提升集群资源使用率 + +## 5.4、用户体验路径 + +下面是用户第一次使用我们产品的典型体验路径: +![text](http://img-ys011.didistatic.com/static/dc2img/do1_Q1j4PCH6I8haPUFvxuRW) + +## 5.5、典型场景 + +### 5.5.1、用户管理 + +用户管理是提供给管理员进行人员管理和用户角色管理的功能模块,可以进行新增用户和分配角色。下面是一个典型的场景: +eg:团队加入了新成员,需要给这位成员分配一个使用系统的账号,需要以下几个步骤 + +- 步骤 1:点击“系统管理”>“用户管理”>“人员管理”>“新增用户”,输入“账号”、“实名”、“密码”,根据此账号所需要的权限,选择此账号所对应的角色。如果有满足权限的角色,则用户新增成功。如果没有满足权限的角色,则需要新增角色(步骤 2) +- 步骤 2:点击“系统管理”>“用户管理”>“角色管理”>“新增角色”。输入角色名称和描述,给此角色分配权限,点击“确定”,角色新增成功 + +- 步骤 3:根据此新增的角色,参考步骤 1,重新新增用户 + +- 步骤 4:此用户账号新增成功,可以进行登录产品使用 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_DcjHcE7uXnCMNVI1Pg5x) + +### 5.5.2、接入集群 + +- 步骤 1:点击“多集群管理”>“接入集群” + +- 步骤 2:填写相关集群信息 + +- 集群名称:支持中英文、下划线、短划线(-),最长 128 字符。平台内不能重复 + - Bootstrap Servers:输入 Bootstrap Servers 地址,不限制长度和输入内容。例如 192.168.1.1:9092,192.168.1.2:9092,192.168.1.3:9092,输入完成之后会进行连接测试,测试完成之后会给出测试结果连接成功 or 连接失败(以及失败的原因)。【备注:根据填写的 Bootstrap Servers 地址自动获取 zk 信息、mertics 信息、version 信息。若能获取成功,则自动展示。当 zk 已填写时,再次修改 bootstrap server 地址时就不再重新获取 zk 信息,按照用户维护的 zk 信息为准】 + - Zookeeper:输入 zookeeper 地址,例如:192.168.0.1:2181,192.168.0.2:2181,192.168.0.2:2181/ks-kafka,输入完成之后会进行连接测试,测试完成之后会给出测试结果连接成功 or 连接失败(以及失败的原因)【备注:根据填写的 zk 地址自动获取后续的 mertics、version 信息。若能获取成功,则自动展示】 + - Metrics 选填:JMX Port,输入 JMX 端口号;MaxConn,输入服务端最大允许的连接数 + - Security:若集群有账号密码,则输入账号密码 + - Version:下拉选择所支持的 kafka 版本,如果没有匹配则可以选择相近版本 + - 集群配置选填:输入用户创建 kafka 客户端进行信息获取的相关配置 + - 集群描述:输入集群的描述,最多 200 字符 + ![text](http://img-ys011.didistatic.com/static/dc2img/do1_rIqEziDtPjjt3awjn5FF) + +### 5.5.3、新增 Topic + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“新增 Topic”按钮>“创建 Topic“抽屉 + +- 步骤 2:输入“Topic 名称(不能重复)”、“Topic 描述”、“分区数”、“副本数”、“数据保存时间”、“清理策略(删除或压缩)” + +- 步骤 3:展开“更多配置”可以打开高级配置选项,根据自己需要输入相应配置参数 + +- 步骤 4:点击“确定”,创建 Topic 完成 + ![text](http://img-ys011.didistatic.com/static/dc2img/do1_ytcAlCmvJnGIWGpB7Usa) + +### 5.5.4、Topic 扩分区 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“Topic 列表“>操作项”扩分区“>“扩分区”抽屉 + +- 步骤 2:扩分区抽屉展示内容为“流量的趋势图”、“当前分区数及支持的最低消息写入速率”、“扩分区后支持的最低消息写入速率” + +- 步骤 3:输入所需的分区总数,自动计算出扩分区后支持的最低消息写入速率 + +- 步骤 4:点击确定,扩分区完成 + ![text](http://img-ys011.didistatic.com/static/dc2img/do1_XKyp5dNgLfZv1lYWnfcd) + +### 5.5.5、Topic 批量扩缩副本 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“批量操作下拉“>“批量扩缩副本“>“批量扩缩容”抽屉 + +- 步骤 2:选择所需要进行扩缩容的 Topic,可多选,所选择的 Topic 出现在下方 Topic 列表中 + +- 步骤 3:Topic 列表展示 Topic“近三天平均流量”、“近三天峰值流量及时间”、“Partition 数”、”当前副本数“、“新副本数” + +- 步骤 4:扩容时,选择目标节点,新增的副本会在选择的目标节点上;缩容时不需要选择目标节点,自动删除最后一个(或几个)副本 + +- 步骤 5:输入迁移任务配置参数,包含限流值和任务执行时间 + +- 步骤 6:输入任务描述 + +- 步骤 7:点击“确定”,创建 Topic 扩缩副本任务 + +- 步骤 8:去“Job”模块的 Job 列表查看创建的任务,如果已经执行则可以查看执行进度;如果未开始执行则可以编辑任务 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_zdlmdGupUvnbVuPWYWxM) + +### 5.5.6、Topic 批量迁移 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“批量操作下拉“>“批量迁移“>“批量迁移”抽屉 + +- 步骤 2:选择所需要进行迁移的 Topic,可多选,所选择的 Topic 出现在下方 Topic 列表中 + +- 步骤 3:选择所需要迁移的 partition 和迁移数据的时间范围 + +- 步骤 4:选择目标节点(节点数必须不小于最大副本数) + +- 步骤 5:点击“预览任务计划”,打开“任务计划”二次抽屉,可对目标 Broker ID 进行编辑 + +- 步骤 6:输入迁移任务配置参数,包含限流值和任务执行时间 + +- 步骤 7:输入任务描述 + +- 步骤 8:点击“确定”,创建 Topic 迁移任务 + +- 步骤 9:去“Job”模块的 Job 列表查看创建的任务,如果已经执行则可以查看执行进度;如果未开始执行则可以编辑任务 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_lHigQC1HKQX5MqgTxFnd) + +### 5.5.7、设置 Cluster 健康检查规则 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”>“集群健康状态旁边 icon”>“健康度设置抽屉” + +- 步骤 2:健康度设置抽屉展示出了检查项和其对应的权重,可以修改检查项的检查规则 + +- 步骤 3:检查规则可配置,分别为 + + - Cluster:集群 controller 数不等于 1(数字不可配置)不通过 + - Broker:RequestQueueSize 大于等于 10(默认为 10,可配置数字)不通过 + - Broker:NetworkProcessorAvgIdlePercent 的 Idle 小于等于 0.8%(默认为 0.8%,可配置数字)不通过 + - Topic:无 leader 的 Topic 数量,大于等于 1(默认为 1,数字可配置)不通过 + - Topic:Topic 在 10(默认为 10,数字可配置)个周期内 8(默认为 8,数字可配置)个周期内处于未同步的状态则不通过 + - ConsumerGroup:Group 在 10(默认为 10,数字可配置)个周期内进行 8(默认为 8,数字可配置)次 re-balance 不通过 + +- 步骤 4:设置完成后,点击“确认”,健康检查规则设置成功 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_ze9D1umrndrNT68x6xBP) + +### 5.5.8、图表指标筛选 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”>“指标筛选 icon”>“指标筛选抽屉” + +- 步骤 2:指标筛选抽屉展示信息为以下几类“Health”、“Cluster”、“Broker”、“Consumer”、“Security”、“Job” + +- 步骤 3:默认勾选比较重要的指标进行展示。根据需要选中/取消选中相应指标,点击”确认“,指标筛选成功,展示的图表随之变化 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_JAv7qDETRl27LutfmCjT) + +### 5.5.9、编辑 Broker 配置 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Brokers”>“Broker ID”>“Configuration”TAB>“编辑”按钮 + +- 步骤 2:输入配置项的新配置内容 + +- 步骤 3:(选填)点击“应用于全部 Broker”,将此配置项的修改应用于全部的 Broker + +- 步骤 4:点击“确认”,Broker 配置修改成功 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_6p352iefjkPPkhhJXjIC) + +### 5.5.10、重置 consumer Offset + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Consumer”>“Consumer Group”名称>“Consumer Group 详情”抽屉>“重置 Offset”按钮>“重置 Offset”抽屉 + +- 步骤 2:选择重置 Offset 的类型,可“重置到指定时间”或“重置分区” + +- 步骤 3:重置到指定时间,可选择“最新 Offset”或“自定义时间” + +- 步骤 4:重置分区,可选择 partition 和其重置的 offset + +- 步骤 5:点击“确认”,重置 Offset 开始执行 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_4GbglTfSNqSuqYMyXV37) + +### 5.5.11、新增 ACL + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Security”>“Users”>“新增 ACL” + +- 步骤 2:输入 ACL 配置参数 + + - ACL 用途:生产权限、消费权限、自定义权限 + - 生产权限时:可选择应用于所有 Kafka User 或者特定 Kafka User;可选择应用于所有 Topic 或者特定 Topic + - 消费权限时:可选择应用于所有 Kafka User 或者特定 Kafka User;可选择应用于所有 Topic 或者特定 Topic;可选择应用于所有 Consumer Group 或者特定 Consumer Group + +- 步骤 3:点击“确定”,新增 ACL 成功 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_sF59VTfrpyfAG5kJVZ62) + +## 5.6、页面级别详细功能介绍 + +### 5.6.1、登录/退出登录 + +- 登录:输入账号密码,点击登录 + +- 退出登录:鼠标悬停右上角“头像”或者“用户名”,出现小弹窗“登出”,点击“登出”,退出登录 + +### 5.6.2、系统管理 + +用户登录完成之后,点击页面右上角【系统管理】按钮,切换到系统管理的视角,可以进行配置管理、用户管理、审计日志查看。 +![text](http://img-ys011.didistatic.com/static/dc2img/do1_Grm5K5USIPgXa4w2TRQI) + +#### 5.6.2.1、配置管理 + +配置管理是提供给管理员一个快速配置配置文件的能力,所配置的配置文件将会在对应模块生效。 + +#### 5.6.2.2、查看配置列表 + +- 步骤 1:点击”系统管理“>“配置管理” + +- 步骤 2:列表展示配置所属模块、配置键、配置值、启用状态、更新时间、更新人。列表有操作项编辑、删除,可对配置模块、配置键、配置值、描述、启用状态进行配置,也可删除此条配置 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_gg8SMKKn9N6FrtFgJ2r8) + +#### 5.6.2.3、新增配置 + +- 步骤 1:点击“系统管理”>“配置管理”>“新增配置” + +- 步骤 2:模块:下拉选择所有可配置的模块;配置键:不限制输入内容,500 字以内;配置值:代码编辑器样式,不限内容不限长度;启用状态开关:可以启用/禁用此项配置 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_G8nF2wr1kpadinwRmzi3) + +#### 5.6.2.4、编辑配置 + +可对配置模块、配置键、配置值、描述、启用状态进行配置。 + +#### 5.6.2.5、用户管理 + +用户管理是提供给管理员进行人员管理和用户角色管理的功能模块,可以进行新增用户和分配角色。 + +#### 5.6.2.6、人员管理列表 + +- 步骤 1:点击“系统管理”>“用户管理”>“人员管理” + +- 步骤 2:人员管理列表展示用户角色、用户实名、用户分配的角色、更新时间、编辑操作。 + +- 步骤 3:列表支持”用户账号“、“用户实名”、“角色名”筛选。 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_12nBRBMQ1P5Eb6QrsCEh) + +#### 5.6.2.7、新增用户 + +- 步骤 1:点击“系统管理”>“用户管理”>“人员管理”>“新增用户” + +- 步骤 2:填写“用户账号”、“用户实名”、“用户密码”这些必填参数,可以对此账号分配已经存在的角色。 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_C8l8X2R9shjeGHZM7BAt) + +#### 5.6.2.8、编辑用户 + +- 步骤 1:点击“系统管理”>“用户管理”>“人员管理”>列表操作项“编辑” + +- 步骤 2:用户账号不可编辑;可以编辑“用户实名”,修改“用户密码”,重新分配“用户角色“ + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_l3c7dDFas3ryvvJA9YAB) + +#### 5.6.2.9、角色管理列表 + +- 步骤 1:点击“系统管理”>“用户管理”>“角色管理” + +- 步骤 2:角色列表展示信息为“角色 ID”、“名称”、“描述”、“分配用户数”、“最后修改人”、“最后更新时间”、操作项“查看详情”、操作项”分配用户“ + +- 步骤 3:列表有筛选框,可对“角色名称”进行筛选 + +- 步骤 4:列表操作项,“查看详情”可查看到角色绑定的权限项,”分配用户“可对此项角色下绑定的用户进行增减 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_kS37Rxfci8JPKy2PzjBi) + +#### 5.6.2.10、新增角色 + +- 步骤 1:点击“系统管理”>“用户管理”>“角色管理”>“新增角色” + +- 步骤 2:输入“角色名称”(角色名称只能由中英文大小写、数字、下划线\_组成,长度限制在 3 ~ 128 字符)、“角色描述“(不能为空)、“分配权限“(至少需要分配一项权限),点击确认,新增角色成功添加到角色列表 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_BeXdSyjLnuCySWZffHM5) + +#### 5.6.2.11、审计日志 + +- 步骤 1:点击“系统管理”>“审计日志“ +- 步骤 2:审计日志包含所有对于系统的操作记录,操作记录列表展示信息为下 + + - “模块”:操作对象所属的功能模块 + - “操作对象”:具体哪一个集群、任务 ID、topic、broker、角色等 + - “行为”:操作记录的行为,包含“新增”、“替换”、“读取”、“禁用”、“修改”、“删除”、“编辑”等 + - “操作内容”:具体操作的内容是什么 + - “操作时间”:操作发生的时间 + - “操作人”:此项操作所属的用户 + +- 步骤 3:操作记录列表可以对“模块“、”操作对象“、“操作内容”、”操作时间“进行筛选 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_x9Rlj4uzgAvNJpf2k7xk) + +### 5.6.3、多集群管理 + +#### 5.6.3.1、多集群列表 + +- 步骤 1:点击顶部导航栏“多集群管理” + +- 步骤 2:多集群管理页面包含的信息为:”集群信息总览“、“集群列表”、“列表筛选项”、“接入集群” + +- 步骤 3:集群列表筛选项为 + + - 集群信息总览:cluster 总数、live 数、down 数 + - 版本筛选:包含所有存在的集群版本 + - 健康分筛选:筛选项为 0、10、20、30、40、50、60、70、80、90、100 + - live、down 筛选:多选 + - 下拉框筛选排序,选项维度为“接入时间”、“健康分“、”Messages“、”MessageSize“、”BytesIn“、”BytesOut“、”Brokers“;可对这些维度进行“升序”、“降序”排序 + +- 步骤 4:每个卡片代表一个集群,其所展示的集群概览信息包括“健康分及健康检查项通过数”、“broker 数量”、“ZK 数量”、“版本号”、“BytesIn 均衡状态”、“BytesOut 均衡状态”、“Disk 均衡状态”、”Messages“、“MessageSize”、“BytesIn”、“BytesOut”、“接入时间” + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_hSBbBKbP4Ussti1ExFFg) + +#### 5.6.3.2、接入集群 + +- 步骤 1:点击“多集群管理”>“接入集群” + +- 步骤 2:填写相关集群信息 + - 集群名称:支持中英文、下划线、短划线(-),最长 128 字符。平台内不能重复 + - Bootstrap Servers:输入 Bootstrap Servers 地址,不限制长度和输入内容。例如 192.168.1.1:9092,192.168.1.2:9092,192.168.1.3:9092,输入完成之后会进行连接测试,测试完成之后会给出测试结果连接成功 or 连接失败(以及失败的原因)。【备注:根据填写的 Bootstrap Servers 地址自动获取 zk 信息、mertics 信息、version 信息。若能获取成功,则自动展示。当 zk 已填写时,再次修改 bootstrap server 地址时就不再重新获取 zk 信息,按照用户维护的 zk 信息为准】 + - Zookeeper:输入 zookeeper 地址,例如:192.168.0.1:2181,192.168.0.2:2181,192.168.0.2:2181/ks-kafka,输入完成之后会进行连接测试,测试完成之后会给出测试结果连接成功 or 连接失败(以及失败的原因)【备注:根据填写的 zk 地址自动获取后续的 mertics、version 信息。若能获取成功,则自动展示】 + - Metrics 选填:JMX Port,输入 JMX 端口号;MaxConn,输入服务端最大允许的连接数 + - Version:下拉选择所支持的 kafka 版本,如果没有匹配则可以选择相近版本 + - 集群配置选填:输入用户创建 kafka 客户端进行信息获取的相关配置 + - 集群描述:输入集群的描述,最多 200 字符 + ![text](http://img-ys011.didistatic.com/static/dc2img/do1_VrDZUw6nQ4MQJHzGgQiB) + +#### 5.6.3.3、删除集群 + +- 步骤 1:点击“多集群管理”>鼠标悬浮集群卡片>点击卡片右上角“删除 icon”>打开“删除弹窗” + +- 步骤 2:在删除弹窗中的“集群名称”输入框,输入所要删除集群的集群名称,点击“删除”,成功删除集群,解除平台的纳管关系(集群资源不会删除) + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_9mlrTis1Nlp9ixNKzgqf) + +### 5.6.4、Cluster 管理 + +#### 5.6.4.1、Cluster Overview + +- 步骤 1:点击“多集群管理”>“集群卡片”>进入单集群管理界面 + +- 步骤 2:左侧导航栏 + + - 一级导航:Cluster;二级导航:Overview、Load Rebalance + - 一级导航:Broker;二级导航:Overview、Brokers、Controller + - 一级导航:Topic;二级导航:Overview、Topics + - 一级导航:Consumer + - 一级导航:Testing;二级导航:Produce、Consume + - 一级导航:Security;二级导航:ACLs、Users + - 一级导航:Job + ![text](http://img-ys011.didistatic.com/static/dc2img/do1_Gl1xkQB9qZNTe7wZ3tSN) + +#### 5.6.4.2、查看 Cluster 概览信息 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview” + +- 步骤 2:cluster 概览信息包括以下内容 + + - 集群健康分,健康检查通过项 + - Cluster 信息:包含名称、版本、均衡状态 + - Broker 信息:Broker 总数、controller 信息、similar config 信息 + - Topic 信息:Topic 总数、No Leader、“集群卡片”>“Cluster”>“Overview”>“集群健康状态旁边 icon”>“健康度设置抽屉” + +- 步骤 2:健康度设置抽屉展示出了检查项和其对应的权重,可以修改检查项的检查规则 + +- 步骤 3:检查规则可配置,分别为 + + - Cluster:集群 controller 数不等于 1(数字不可配置)不通过 + - Broker:RequestQueueSize 大于等于 10(默认为 10,可配置数字)不通过 + - Broker:NetworkProcessorAvgIdlePercent 的 Idle 小于等于 0.8%(默认为 0.8%,可配置数字)不通过 + - Topic:无 leader 的 Topic 数量,大于等于 1(默认为 1,数字可配置)不通过 + - Topic:Topic 在 10(默认为 10,数字可配置)个周期内 8(默认为 8,数字可配置)个周期内处于未同步的状态 + - ConsumerGroup:Group 在 10(默认为 10,数字可配置)个周期内进行 8(默认为 8,数字可配置)次 re-balance 不通过 + +- 步骤 4:设置完成后,点击“确认”,健康检查规则设置成功 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_iQmEeXMAHVlYmABqcpZH) + +#### 5.6.4.4、查看 Cluster 健康检查详情 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”>“集群健康状态旁边【查看详情】”>“健康检查详情抽屉” + +- 步骤 2:健康检查详情抽屉展示信息为:“检查模块”、“检查项”、“权重”、“得分”、“检查时间”、“检查结果是否通过”,若未通过会展示未通过的对象 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_gITinqPtFjxQGuHwTmn1) + +#### 5.6.4.5、编辑 Cluster 信息 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”>“Cluster 名称旁边编辑 icon”>“编辑集群抽屉” + +- 步骤 2:可编辑的信息包括“集群名称”、“Bootstrap Servers”、“Zookeeper”、“JMX Port”、“Maxconn(最大连接数)”、“Security(认证措施)”、“Version(版本号)”、“集群配置”、“集群描述” + +- 步骤 3:点击“确定”,成功编辑集群信息 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_YAFls3sXy2daQiHSwgw6) + +#### 5.6.4.6、图表指标筛选 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”>“指标筛选 icon”>“指标筛选抽屉” + +- 步骤 2:指标筛选抽屉展示信息为以下几类“Health”、“Cluster”、“Broker”、“Consumer”、“Security”、“Job” + +- 步骤 3:默认勾选比较重要的指标进行展示。根据需要选中/取消选中相应指标,点击”确认“,指标筛选成功,展示的图表随之变化 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_JAv7qDETRl27LutfmCjT) + +#### 5.6.4.7、图表时间筛选 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”>“时间选择下拉框”>“时间选择弹窗” + +- 步骤 2:选择时间“最近 15 分钟”、“最近 1 小时”、“最近 6 小时”、“最近 12 小时”、“最近 1 天”,也可以自定义时间段范围 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_WGprHmq3XD3hiQHIFjTl) + +#### 5.6.4.8、查看集群历史变更记录 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Overview”>“历史变更记录”区域 + +- 步骤 2:历史变更记录区域展示了历史的配置变更,每条记录可展开收起。包含“配置对象”、“变更时间”、“变更内容”、“配置类型” + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_JTHd4fE6fUYDLJQjqLwY) + +### 5.6.5、Load Rebalance(企业版) + +#### 5.6.5.1、查看 Load Rebalance 概览信息 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Load Rebalance” + +- 步骤 2:Load Rebalance 概览信息包含“均衡状态卡片”、“Disk 信息卡片”、“BytesIn 信息卡片”、“BytesOut 信息卡片”、“Broker 均衡状态列表” + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_7cDf2bidHiU3UWpBktGd) + +#### 5.6.5.2、设置集群规格 + +提供对集群的每个节点的 Disk、BytesIn、BytesOut 的规格进行设置的功能 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Load Rebalance”>“State 卡片 icon“>”设置集群规格抽屉“ + +- 步骤 2:穿梭框左侧展示集群中的待选节点,穿梭框右侧展示已经选中的节点,选择自己所需设置规格的节点 + +- 步骤 3:设置“单机核数”、“单机磁盘”、“单机网络”,点击确定,完成设置 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_EyR6J7ZcjT2gaDHbpL2S) + +#### 5.6.5.3、均衡状态列表筛选 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Load Rebalance”>“筛选列表”按钮>筛选弹窗 + +- 步骤 2:可选择“Disk”、“BytesIn”、“BytesOut”三种维度,其各自对应“已均衡”、“未均衡”两种状态,可以组合进行筛选 + +- 步骤 3:点击“确认”,执行筛选操作 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_T9diCUV4VphFUuUd5kX3) + +#### 5.6.5.4、立即均衡 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Load Rebalance”>“立即均衡”按钮>“立即均衡抽屉” + +- 步骤 2:配置均衡策略 + + - 指标计算周期:默认近 10mins,可选择 + - 均衡维度:默认 Disk、BytesIn、BytesOut,可选择 + - 均衡区间:在表格内自定义配置均衡区间范围(单位:%,大于 0,小于 100) + - Topic 黑名单:选择 topic 黑名单。通过穿梭框(支持模糊选择)选出目标 topic(本次均衡,略过已选的 topic) + +- 步骤 3:配置运行参数 + + - 吞吐量优先:并行度 0(无限制), 策略是优先执行大小最大副本 + - 稳定性优先: 并行度 1 ,策略是优先执行大小最小副本 + - 自定义:可以自由设置并行度和优先执行的副本策略 + - 限流值:流量最大值,0-99999 自定义 + +- 步骤 4:点击“预览计划”按钮,打开执行计划弹窗。可以看到计划概览信息、计划明细信息 + +- 步骤 5:点击“预览计划弹窗”的“执行文件”,可以下载 json 格式的执行文件 + +- 步骤 6:点击“预览计划弹窗”的“立即均衡”按钮,开始执行均衡任务 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_I5BzRuHC9m74uE3mrjy8) + +#### 5.6.5.5、周期均衡 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Cluster”>“Load Rebalance”>“周期均衡”按钮>“周期均衡抽屉” + +- 步骤 2:配置均衡策略 + + - 指标计算周期:默认近 10mins,可选择 + - 均衡维度:默认 Disk、BytesIn、BytesOut,可选择 + - 均衡区间:在表格内自定义配置均衡区间范围(单位:%,大于 0,小于 100) + - Topic 黑名单:选择 topic 黑名单。通过穿梭框(支持模糊选择)选出目标 topic(本次均衡,略过已选的 topic) + +- 步骤 3:配置运行参数 + + - 任务并行度:每个节点同时迁移的副本数量 + - 任务周期:时间选择器,自定义选择运行周期 + - 稳定性优先: 并行度 1 ,策略是优先执行大小最小副本 + - 自定义:可以自由设置并行度和优先执行的副本策略 + - 限流值:流量最大值,0-99999 自定义 + +- 步骤 4:点击“预览计划”按钮,打开执行计划弹窗。可以看到计划概览信息、计划明细信息 + +- 步骤 5:点击“预览计划弹窗”的“执行文件”,可以下载 json 格式的执行文件 + +- 步骤 6:点击“预览计划弹窗”的“立即均衡”按钮,开始执行均衡任务 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_QGfzkR6CM8qICuFqTAuI) + +### 5.6.6、Broker + +#### 5.6.6.1、查看 Broker 概览信息 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Broker”>“Overview” + +- 步骤 2:Broker 概览信息包括以下内容 + + - 集群健康分,健康检查通过项 + - Broker 信息:包含名称、版本、均衡状态 + - Broker 信息:Broker 总数、controller 信息、similar config 信息 + - Topic 信息:Topic 总数、No Leader、“集群卡片”>“Brokers”>“Broker ID”>“Configuration”TAB>“编辑”按钮 + +- 步骤 2:输入配置项的新配置内容 + +- 步骤 3:(选填)点击“应用于全部 Broker”,将此配置项的修改应用于全部的 Broker + +- 步骤 4:点击“确认”,Broker 配置修改成功 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_YVT9IXRcqh3bU8w642wR) + +#### 5.6.6.3、查看 Broker DataLogs + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Brokers”>“Broker ID”>“Data Logs”TAB>“编辑”按钮 + +- 步骤 2:Broker DataLogs 列表展示的信息为“Folder”、“topic”、“Partition”、“Offset Lag”、“Size” + +- 步骤 3:输入框输入”Topic Name“可以筛选结果 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_LfTeBEkwaLsX95Ep1ix3) + +#### 5.6.6.4、查看 Controller 列表 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Broker”>“Controller” + +- 步骤 2:Controller 列表展示的信息为“Change Time”、“Broker ID”、“Broker Host” + +- 步骤 3:输入框输入“Broker Host“可以筛选结果 + +- 步骤 4:点击 Broker ID 可以打开 Broker 详情,进行修改配置或者查看 DataLogs + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_PwqY9cZ1DbIpBRC2mJE9) + +### 5.6.7、Topic + +#### 5.6.7.1、查看 Topic 概览信息 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Overview” + +- 步骤 2:Topic 概览信息包括以下内容 + + - 集群健康分,健康检查通过项 + - Topics:Topic 总数 + - Partitions:Partition 总数 + - PartitionNoLeader:没有 leader 的 partition 个数 + - < Min ISR:同步副本数小于 Min ISR + - =Min ISR:同步副本数等于 Min ISR + - Topic 指标图表 + ![text](http://img-ys011.didistatic.com/static/dc2img/do1_LTYaGiXhE5bI3CAApWwx) + +#### 5.6.7.2、查看 Topic 健康检查详情 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Overview”>“集群健康状态旁边【查看详情】”>“健康检查详情抽屉” + +- 步骤 2:健康检查详情抽屉展示信息为:“检查项”、“权重”、“得分”、“检查时间”、“检查结果是否通过”,若未通过会展示未通过的对象 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_VFSrBI8XMpWPtW7vvAr3) + +#### 5.6.7.3、查看 Topic 列表 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics” + +- 步骤 2:Topic 列表展示内容为“TopicName”、“Partitions”、“Replications”、“健康分”、“BytesIn”、“BytesOut”、“MessageSize”、“保存时间”、“描述”、操作项”扩分区“、操作项”删除“ + +- 步骤 3:筛选框输入“TopicName”可以对列表进行筛选;点击“展示系统 Topic”开关,可以筛选系统 topic + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_kenpn9ijRb2DbPN7wrr1) + +#### 5.6.7.4、新增 Topic + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“新增 Topic”按钮>“创建 Topic“抽屉 + +- 步骤 2:输入“Topic 名称(不能重复)”、“Topic 描述”、“分区数”、“副本数”、“数据保存时间”、“清理策略(删除或压缩)” + +- 步骤 3:展开“更多配置”可以打开高级配置选项,根据自己需要输入相应配置参数 + +- 步骤 4:点击“确定”,创建 Topic 完成 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_ZsaKRRqT69Ugw5yCHpE7) + +#### 5.6.7.5、Topic 扩分区 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“Topic 列表“>操作项”扩分区“>“扩分区”抽屉 + +- 步骤 2:扩分区抽屉展示内容为“流量的趋势图”、“当前分区数及支持的最低消息写入速率”、“扩分区后支持的最低消息写入速率” + +- 步骤 3:输入所需的分区总数,自动计算出扩分区后支持的最低消息写入速率 + +- 步骤 4:点击确定,扩分区完成 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_ifCma3pKlUnGd3UXunNi) + +#### 5.6.7.6、删除 Topic + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“Topic 列表“>操作项”删除“>“删除 Topic”弹窗 + +- 步骤 2:输入“TopicName”进行二次确认 + +- 步骤 3:点击“删除”,删除 Topic 完成 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_xdP42WmnyaK9zZiMWM6s) + +#### 5.6.7.7、Topic 批量扩缩副本 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“批量操作下拉“>“批量扩缩副本“>“批量扩缩容”抽屉 + +- 步骤 2:选择所需要进行扩缩容的 Topic,可多选,所选择的 Topic 出现在下方 Topic 列表中 + +- 步骤 3:Topic 列表展示 Topic“近三天平均流量”、“近三天峰值流量及时间”、“Partition 数”、”当前副本数“、“新副本数” + +- 步骤 4:扩容时,选择目标节点,新增的副本会在选择的目标节点上;缩容时不需要选择目标节点,自动删除最后一个(或几个)副本 + +- 步骤 5:输入迁移任务配置参数,包含限流值和任务执行时间 + +- 步骤 6:输入任务描述 + +- 步骤 7:点击“确定”,执行 Topic 扩缩容任务 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_DNIdGs7Uym3yppmvGrBd) + +#### 5.6.7.8、Topic 批量迁移 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Topic”>“Topics”>“批量操作下拉“>“批量迁移“>“批量迁移”抽屉 + +- 步骤 2:选择所需要进行迁移的 Topic,可多选,所选择的 Topic 出现在下方 Topic 列表中 + +- 步骤 3:选择所需要迁移的 partition 和迁移数据的时间范围 + +- 步骤 4:选择目标节点(节点数必须不小于最大副本数) + +- 步骤 5:点击“预览任务计划”,打开“任务计划”二次抽屉,可对每个 partition 的目标 Broker ID 进行编辑,目标 broker 应该等于副本数 + +- 步骤 6:输入迁移任务配置参数,包含限流值和任务执行时间 + +- 步骤 7:输入任务描述 + +- 步骤 8:点击“确定”,执行 Topic 迁移任务 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_zIL8ytfUYGBbmalrgZqU) + +### 5.6.8、Consumer + +#### 5.6.8.1、Consumer Overview + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Consumer” + +- 步骤 2:Consumer 概览信息包括以下内容 + + - 集群健康分,健康检查通过项 + - Groups:Consumer Group 总数 + - GroupsActives:活跃的 Group 总数 + - GroupsEmptys:Empty 的 Group 总数 + - GroupRebalance:进行 Rebalance 的 Group 总数 + - GroupDeads:Dead 的 Group 总数 + - Consumer Group 列表 + +- 操作 3:输入“Consumer Group”、“Topic Name‘,可对列表进行筛选 + +- 操作 4:点击列表“Consumer Group”名称,可以查看 Comsuer Group 详情 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_34nlDsafIfgxmv12YlNE) + +#### 5.6.8.2、查看 Consumer 列表 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Consumer”>“Consumer Group”名称>“Consumer Group 详情”抽屉 + +- 步骤 2:Consumer Group 详情有列表视图和图表视图 + +- 操作 3:列表视图展示信息为 Consumer 列表,包含”Topic Partition“、”Member ID“、”Current Offset“、“Log End Offset”、”Lag“、”Host“、”Client ID“ + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_RwRJUPR3rqFFF2ez4n2L) + +#### 5.6.8.3、重置 Offset + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Consumer”>“Consumer Group”名称>“Consumer Group 详情”抽屉>“重置 Offset”按钮>“重置 Offset”抽屉 + +- 步骤 2:选择重置 Offset 的类型,可“重置到指定时间”或“重置分区” + +- 步骤 3:重置到指定时间,可选择“最新 Offset”或“自定义时间” + +- 步骤 4:重置分区,可选择 partition 和其重置的 offset + +- 步骤 5:点击“确认”,重置 Offset 开始执行 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_bflSMxUjzwR5Jq5TrHyH) + +### 5.6.9、Testing + +#### 5.6.9.1、生产测试 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Testing”>“Produce” + +- 步骤 2:生产配置 + + - Data:选择数据写入的 topic,输入写入数据的 key(暂只支持 string 格式),输入写入数据的 value(暂只支持 string 格式)。其中 key 和 value 可以随机生成 + - Flow:输入单次发送的消息数量,默认为 1,可以手动修改。选择手动生产模式,代表每次点击按钮【Run】执行生产;选择周期生产模式,需要填写运行总时间和运行时间间隔。 + - Header:输入 Header 的 key,value + - Options:选择 Froce Partition,代表消息仅发送到这些选择的 Partition。选择数据压缩格式。选择 Acks 参数,none 意思是消息发送了就认为发送成功;leader 意思是 leader 接收到消息(不管 follower 有没有同步成功)认为消息发送成功;all 意思是所有的 follower 消息同步成功认为是消息发送成功 + +- 步骤 3:点击按钮【Run】,生产测试开始,可以从右侧看到生产测试的信息 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_211Ds4VgLnb2AyAygZex) + +#### 5.6.9.2、消费测试 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Testing”>“Consume” + +- 步骤 2:消费配置 + + - Topic:选择数据从哪个 topic 进行消费 + - Start From:选择数据从什么地方开始消费,可以根据时间选择或者根据 Offset 进行选择 + - Until:选择消费截止到什么地方,可以根据时间或者 offset 或者消息数等进行选择 + - Filter:选择过滤器的规则。包含/不包含某【key,value】;等于/大于/小于多少条消息 + +- 步骤 3:点击按钮【Run】,消费测试开始,可以在右边看到消费的明细信息 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_vhmnjv61Ql3M3xIzHHiQ) + +### 5.6.10、Security + +#### 5.6.10.1、查看 ACL 概览信息 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Security”>“ACLs” + +- 步骤 2:ACL 概览信息包括以下内容 + + - Enable:是否可用 + - ACLs:ACL 总数 + - Users:User 总数 + - Topics:Topic 总数 + - Consumer Groups:Consumer Group 总数 + - ACL 列表 + ![text](http://img-ys011.didistatic.com/static/dc2img/do1_vE2GwXmBwlQCtE4HfhBz) + +#### 5.6.10.2、新增 ACl + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Security”>“Users”>“新增 ACL” + +- 步骤 2:输入 ACL 配置参数 + + - ACL 用途:生产权限、消费权限、自定义权限 + - 生产权限时:可选择应用于所有 Kafka User 或者特定 Kafka User;可选择应用于所有 Topic 或者特定 Topic + - 消费权限时:可选择应用于所有 Kafka User 或者特定 Kafka User;可选择应用于所有 Topic 或者特定 Topic;可选择应用于所有 Consumer Group 或者特定 Consumer Group + +- 步骤 3:点击“确定”,新增 ACL 成功 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_ygNmK5QIQcC8BsskMDy7) + +#### 5.6.10.3、查看 User 信息 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Security”>“ACLs” + +- 步骤 2:User 列表展示内容包括“Kafka User 名称”、“认证方式”、“passwprd”、操作项”修改密码“、”操作项“删除” + +- 步骤 3:筛选框输入“Kafka User”可筛选出列表中相关 Kafka User + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_Whkf1YVj95mtGgCEisgT) + +#### 5.6.10.4、新增 Kafka User + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Security”>“Users”>“新增 Kafka User” + +- 步骤 2:输入 Kafka User 名称、认证方式、密码 + +- 步骤 3:点击“确定”,新增 Kafka User 成功 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_qb1VrYK41sWrEqpRrXDr) + +### 5.6.11、Job + +#### 5.6.11.1、查看 Job 概览信息 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Job“ + +- 步骤 2:Job 概览信息包括以下内容 + + - Jobs:Job 总数 + - Doing:正在运行的 Job 总数 + - Prepare:准备运行的 Job 总数 + - Success:运行成功的 Job 总数 + - Fail:运行失败的 Job 总数 + - Job 列表 + ![text](http://img-ys011.didistatic.com/static/dc2img/do1_cRvE49MTQseHdgztu9cZ) + +#### 5.6.11.2、Job 查看进度 + +Doing 状态下的任务可以查看进度 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Job”>“Job”列表>操作项“查看进度”>“查看进度”抽屉 + +- 步骤 2: + + - 均衡任务:任务基本信息、均衡计划、任务执行明细信息 + - 扩缩副本:任务基本信息、任务执行明细信息、节点流量情况 + - Topic 迁移:任务基本信息、任务执行明细信息、节点流量情况 + ![text](http://img-ys011.didistatic.com/static/dc2img/do1_SLAkBrmpj5HU3zzv5iuc) + +#### 5.6.11.3、Job 编辑任务 + +Prepare 状态下的任务可以进行编辑 + +- 步骤 1:点击“多集群管理”>“集群卡片”>“Job”>“Job”列表>操作项“编辑” + +- 步骤 2:对任务执行的参数进行重新配置 + + - 集群均衡:可以对指标计算周期、均衡维度、topic 黑名单、运行配置等参数重新设置 + - Topic 迁移:可以对 topic 需要迁移的 partition、迁移数据的时间范围、目标 broker 节点、限流值、执行时间、描述等参数重新配置 + - topic 扩缩副本:可以对最终副本数、限流值、任务执行时间、描述等参数重新配置 + +- 步骤 3:点击“确定”,编辑任务成功 + +![text](http://img-ys011.didistatic.com/static/dc2img/do1_375fhB1FgpU7cIvbemki) diff --git a/docs/开源版与商业版特性对比.md b/docs/开源版与商业版特性对比.md deleted file mode 100644 index 853efcdb..00000000 --- a/docs/开源版与商业版特性对比.md +++ /dev/null @@ -1,55 +0,0 @@ - ---- - -![kafka-manager-logo](assets/images/common/logo_name.png) - -**一站式`Apache Kafka`集群指标监控与运维管控平台** - ---- - -**开源版、商业版对比** - -纲要:Logi-KafakManager的商业特性是强依赖于滴滴Kafka Gateway和滴滴kafka引擎。 -滴滴KafkaGateway主要负责:服务发现、安全管控(身份鉴权、生产消费鉴权等)、流量管控(应用配额等)等; -滴滴Kafka引擎主要负责:更丰富的监控指标(broker实时耗时、压缩指标、分区落盘等)、磁盘过载保护等 -备注:两个版本的产品功能页面是一样的。区别在于开源版未使用滴滴KafkaGateway(滴滴Kafka引擎),部分产品功能/功能不起作用或者页面无数据 - - - - -| 模块 |对比指标 |底层依赖 |开源版 |商业版 |备注 | -| --- | --- | --- | --- | --- | --- | -| 服务发现 | bootstrap地址变更对客户端无影响 | Gateway | | 是| | -| 安全管控 | 身份鉴权(appID+password) | Gateway | | 是 | | -| | 权限鉴权(Topic+appID) | Gateway | | 是 | | -| 指标监控 | Topic实时流量、历史流量 | | 是 | 是 | | -| | Broker实时耗时、历史耗时 | 引擎 | | 是 | | -| | 分区落盘 | 引擎 | | 是 | | -| | Topic里的数据压缩格式 | 引擎 | | 是 | | -| | 连接信息(Topic上有哪些应用连接) | Gateway | | 是| | -| | 流量管控(应用配额、生产消费限流等) | Gateway | | 是 | | -| 监控报警 | | | 是 | 是 | 监控指标上报,需对接外部监控系统(夜莺or企业内部监控系统) | -| Topic运维 | 申请分区 | | 是 | 是 | | -| | 调整配额 | Gateway | | | 是 | -| | Topic数据采样 | | 是 | 是 | | -| | 消费组管理(重置消费偏移等) | | 是 | 是 | | -| 集群管理 | 集群接入(部署) | | 是 | 是 | 需手动部署集群,或借助外部的自动化部署系统(夜莺)来部署系统 | -| | 集群指标监控 | | 是 | 是 | | -| | 按照Region、逻辑集群进行管理 | | 是 | 是 | | -| | Topic迁移 | | 是 | 是| | -| | 集群任务(集群版本管理、升级、扩缩容、回滚等) | | 是 | 是 | 需借助夜莺或自动化部署系统来实现 | -| | 磁盘过载保护 | 引擎 | | 是 | | -| | 指定broker作为优选controller | Gateway | | 是 | | -| Gateway管理 | 管理 Gateway的配置文件 | Gateway | | 是 | | -| 资源治理 | 专家服务(Topic分区热点、Topic分区不足、Topic长期未使用、Topic流量异常) | | 是 | 是 | 开源版:具备问题发现与基础的问题解决能力;商业版:可在此基础上,融入滴滴内部的资源治理经验,提供更加专家化的问题解决方法 | -| | 健康分 | | 是 | 是 | 开源版:具备基础的健康分算法;商业版:可融入更多的指标统计,及定制化的健康分算法 | -| 运营管理 | 资源审批(应用申请、Topic申请、分区申请、配额申请、集群申请等,都需要通过工单进行审批) | |是 | 是 | | -| | 账单体系(根据流量核算Topic、集群费用) | | 是 | 是| | - - -**总结** - -滴滴LogiKM的商业特性体现在滴滴Kafka Gateway、滴滴Kafka引擎、内部沉淀出的资源治理专家经验、可定制化的健康分算法。 -从场景来看,滴滴Logi-KafkaManager的开源版本在kafka集群运维、的Topic管理、监控告警、资源治理等kafka核心场景都充分开源用户的使用需求并且有着出色的表现。而商业版相较于开源版在安全管控、流量管控、更丰富的指标监控、资源治理专家经验的具有明显提升,更加符合企业业务需求。 -除此之外,商业版还可根据企业实际需求对平台源码进行定制化改造,并提供运维保障,稳定性保障,运营保障等服务。 - diff --git a/kafka-manager-common/pom.xml b/kafka-manager-common/pom.xml deleted file mode 100644 index f784bf8d..00000000 --- a/kafka-manager-common/pom.xml +++ /dev/null @@ -1,116 +0,0 @@ - - - 4.0.0 - com.xiaojukeji.kafka - kafka-manager-common - ${kafka-manager.revision} - jar - - - kafka-manager - com.xiaojukeji.kafka - ${kafka-manager.revision} - - - - true - true - 1.8 - 1.8 - UTF-8 - UTF-8 - - - - - org.springframework - spring-web - - - - - org.apache.httpcomponents - httpcore - - - org.apache.httpcomponents - httpclient - - - commons-httpclient - commons-httpclient - - - org.apache.httpcomponents - httpmime - - - - - org.apache.curator - curator-recipes - - - org.apache.zookeeper - zookeeper - - - - - io.springfox - springfox-swagger2 - - - io.springfox - springfox-swagger-ui - - - io.swagger - swagger-annotations - - - - - com.fasterxml.jackson.core - jackson-databind - - - com.alibaba - fastjson - - - - commons-beanutils - commons-beanutils - - - org.apache.kafka - kafka_2.10 - - - commons-lang - commons-lang - - - org.apache.commons - commons-pool2 - - - javax.servlet - javax.servlet-api - - - - junit - junit - - - - org.projectlombok - lombok - compile - - - \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/annotations/ApiLevel.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/annotations/ApiLevel.java deleted file mode 100644 index 1a3e91d7..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/annotations/ApiLevel.java +++ /dev/null @@ -1,24 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.annotations; - -import com.xiaojukeji.kafka.manager.common.constant.ApiLevelContent; - -import java.lang.annotation.Documented; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -/** - * 接口分级限流 - * @author zengqiao - * @date 2020-07-20 - */ -@Target(ElementType.METHOD) -@Retention(RUNTIME) -@Documented -public @interface ApiLevel { - int level() default ApiLevelContent.LEVEL_DEFAULT_4; - - int rateLimit() default Integer.MAX_VALUE; -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/AccountRoleEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/AccountRoleEnum.java deleted file mode 100644 index 55412490..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/AccountRoleEnum.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * 用户角色 - * @author zengqiao_cn@163.com - * @date 19/4/15 - */ -public enum AccountRoleEnum { - UNKNOWN(-1, "unknown"), - - NORMAL(0, "normal"), - - RD(1, "rd"), - - OP(2, "op"); - - private Integer role; - - private String message; - - AccountRoleEnum(Integer role, String message) { - this.role = role; - this.message = message; - } - - public Integer getRole() { - return role; - } - - public String getMessage() { - return message; - } - - @Override - public String toString() { - return "AccountRoleEnum{" + - "role=" + role + - ", message='" + message + '\'' + - '}'; - } - - public static AccountRoleEnum getUserRoleEnum(Integer role) { - for (AccountRoleEnum elem: AccountRoleEnum.values()) { - if (elem.role.equals(role)) { - return elem; - } - } - return AccountRoleEnum.UNKNOWN; - } - - public static AccountRoleEnum getUserRoleEnum(String roleName) { - for (AccountRoleEnum elem: AccountRoleEnum.values()) { - if (elem.message.equalsIgnoreCase(roleName)) { - return elem; - } - } - return AccountRoleEnum.UNKNOWN; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/ClusterComboEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/ClusterComboEnum.java deleted file mode 100644 index 06f6b91b..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/ClusterComboEnum.java +++ /dev/null @@ -1,37 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * @author zengqiao - * @date 20/4/21 - */ -public enum ClusterComboEnum { - BYTES_IN_200(200*1024*1024, "200MB/s"), - BYTES_IN_400(400*1024*1024, "400MB/s"), - BYTES_IN_600(600*1024*1024, "600MB/s"), - ; - - private Integer code; - - private String message; - - ClusterComboEnum(Integer code, String message) { - this.code = code; - this.message = message; - } - - public Integer getCode() { - return code; - } - - public String getMessage() { - return message; - } - - @Override - public String toString() { - return "ClusterComboEnum{" + - "code=" + code + - ", message='" + message + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/ClusterModeEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/ClusterModeEnum.java deleted file mode 100644 index 199cc7cd..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/ClusterModeEnum.java +++ /dev/null @@ -1,48 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * 集群模式 - * @author zengqiao - * @date 20/4/1 - */ -public enum ClusterModeEnum { - /** - * 共享模式 - */ - SHARED_MODE(0, "共享集群"), - - /** - * 独享模式 - */ - EXCLUSIVE_MODE(1, "独享集群"), - - /** - * 独立模式 - */ - INDEPENDENT_MODE(2, "独立集群"); - - private Integer code; - - private String message; - - ClusterModeEnum(Integer code, String message) { - this.code = code; - this.message = message; - } - - public Integer getCode() { - return code; - } - - public String getMessage() { - return message; - } - - @Override - public String toString() { - return "ClusterModeEnum{" + - "code=" + code + - ", message='" + message + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/ConsumeHealthEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/ConsumeHealthEnum.java deleted file mode 100644 index f5cda2ed..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/ConsumeHealthEnum.java +++ /dev/null @@ -1,38 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * 消费健康 - * @author zengqiao - * @date 20/5/22 - */ -public enum ConsumeHealthEnum { - UNKNOWN(-1, "unknown"), - HEALTH(0, "health"), - UNHEALTH(1, "unhealth"), - ; - - private Integer code; - - private String message; - - ConsumeHealthEnum(Integer code, String message) { - this.code = code; - this.message = message; - } - - public Integer getCode() { - return code; - } - - public String getMessage() { - return message; - } - - @Override - public String toString() { - return "ConsumeHealthEnum{" + - "code=" + code + - ", message='" + message + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/DBStatusEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/DBStatusEnum.java deleted file mode 100644 index 89518f83..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/DBStatusEnum.java +++ /dev/null @@ -1,28 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * @author zengqiao - * @date 20/6/4 - */ -public enum DBStatusEnum { - DEAD(-1), - ALIVE(0) - ; - - private int status; - - DBStatusEnum(int status) { - this.status = status; - } - - public int getStatus() { - return status; - } - - @Override - public String toString() { - return "DBStatusEnum{" + - "status=" + status + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/IDCEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/IDCEnum.java deleted file mode 100644 index 2b3cad7c..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/IDCEnum.java +++ /dev/null @@ -1,35 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * @author zengqiao - * @date 20/5/26 - */ -public enum IDCEnum { - CN("cn", "国内"), - ; - - private String idc; - - private String name; - - IDCEnum(String idc, String name) { - this.idc = idc; - this.name = name; - } - - public String getIdc() { - return idc; - } - - public String getName() { - return name; - } - - @Override - public String toString() { - return "IDCEnum{" + - "idc='" + idc + '\'' + - ", name='" + name + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/KafkaBrokerRoleEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/KafkaBrokerRoleEnum.java deleted file mode 100644 index 246b4b5e..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/KafkaBrokerRoleEnum.java +++ /dev/null @@ -1,30 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * @author zengqiao - * @date 20/5/20 - */ -public enum KafkaBrokerRoleEnum { - NORMAL("NormalBroker"), - - COORDINATOR("Coordinator"), - - CONTROLLER("Controller"), - ; - private String role; - - KafkaBrokerRoleEnum(String role) { - this.role = role; - } - - public String getRole() { - return role; - } - - @Override - public String toString() { - return "KafkaBrokerRoleEnum{" + - "role='" + role + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/KafkaClientEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/KafkaClientEnum.java deleted file mode 100644 index 0b35277e..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/KafkaClientEnum.java +++ /dev/null @@ -1,38 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * @author zengqiao - * @date 20/5/29 - */ -public enum KafkaClientEnum { - PRODUCE_CLIENT(0, "Produce"), - - FETCH_CLIENT(1, "Fetch"), - - ; - - private Integer code; - - private String name; - - KafkaClientEnum(Integer code, String name) { - this.code = code; - this.name = name; - } - - public Integer getCode() { - return code; - } - - public String getName() { - return name; - } - - @Override - public String toString() { - return "KafkaClientEnum{" + - "code=" + code + - ", name='" + name + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/KafkaFileEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/KafkaFileEnum.java deleted file mode 100644 index f1196f91..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/KafkaFileEnum.java +++ /dev/null @@ -1,54 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * @author zengqiao - * @date 20/4/26 - */ -public enum KafkaFileEnum { - PACKAGE(0, "Kafka压缩包", ".tgz"), - - SERVER_CONFIG(1, "KafkaServer配置", ".properties"), - ; - - private Integer code; - - private String message; - - private String suffix; - - KafkaFileEnum(Integer code, String message, String suffix) { - this.code = code; - this.message = message; - this.suffix = suffix; - } - - public Integer getCode() { - return code; - } - - public String getMessage() { - return message; - } - - public String getSuffix() { - return suffix; - } - - @Override - public String toString() { - return "KafkaFileEnum{" + - "code=" + code + - ", message='" + message + '\'' + - ", suffix=" + suffix + - '}'; - } - - public static KafkaFileEnum getByCode(Integer code) { - for (KafkaFileEnum elem: KafkaFileEnum.values()) { - if (elem.getCode().equals(code)) { - return elem; - } - } - return null; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/ModuleEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/ModuleEnum.java deleted file mode 100644 index 280506cb..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/ModuleEnum.java +++ /dev/null @@ -1,78 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -import com.google.common.collect.Maps; - -import java.util.Map; - -/** - * @author zhongyuankai_i - * @date 20/09/03 - */ -public enum ModuleEnum { - TOPIC(0, "Topic"), - - APP(1, "应用"), - - QUOTA(2, "配额"), - - AUTHORITY(3, "权限"), - - CLUSTER(4, "集群"), - - PARTITION(5, "分区"), - - GATEWAY_CONFIG(6, "Gateway配置"), - - UNKNOWN(-1, "未知") - ; - ModuleEnum(int code, String message) { - this.code = code; - this.message = message; - } - - private int code; - - private String message; - - public int getCode() { - return code; - } - - public String getMessage() { - return message; - } - - public Map toMap() { - Map map = Maps.newHashMap(); - map.put("code", code); - map.put("message", message); - return map; - } - - public static ModuleEnum valueOf(Integer code) { - if (code == null) { - return ModuleEnum.UNKNOWN; - } - for (ModuleEnum state : ModuleEnum.values()) { - if (state.getCode() == code) { - return state; - } - } - - return ModuleEnum.UNKNOWN; - } - - public static boolean validate(Integer code) { - if (code == null) { - return false; - } - for (ModuleEnum state : ModuleEnum.values()) { - if (state.getCode() == code) { - return true; - } - } - - return false; - } - -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/OffsetLocationEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/OffsetLocationEnum.java deleted file mode 100644 index aeeff95d..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/OffsetLocationEnum.java +++ /dev/null @@ -1,36 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * @author limeng - * @date 2017/11/21 - */ -public enum OffsetLocationEnum { - /** - * 存储于zk - */ - ZOOKEEPER("zookeeper"), - - /** - * 存储于broker - */ - BROKER("broker"); - - public final String location; - - OffsetLocationEnum(String location) { - this.location = location; - } - - public static OffsetLocationEnum getOffsetStoreLocation(String location) { - if (location == null) { - return null; - } - - for (OffsetLocationEnum offsetStoreLocation: OffsetLocationEnum.values()) { - if (offsetStoreLocation.location.equals(location)) { - return offsetStoreLocation; - } - } - return null; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/OffsetPosEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/OffsetPosEnum.java deleted file mode 100644 index 7f5deea2..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/OffsetPosEnum.java +++ /dev/null @@ -1,42 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * offset获取的位置 - * @author zengqiao - * @date 19/5/29 - */ -public enum OffsetPosEnum { - NONE(0), - - BEGINNING(1), - - END(2), - - BOTH(3); - - public final Integer code; - - OffsetPosEnum(Integer code) { - this.code = code; - } - - public Integer getCode() { - return code; - } - - public static OffsetPosEnum getOffsetPosEnum(Integer code) { - for (OffsetPosEnum offsetPosEnum : values()) { - if (offsetPosEnum.getCode().equals(code)) { - return offsetPosEnum; - } - } - return NONE; - } - - @Override - public String toString() { - return "OffsetPosEnum{" + - "code=" + code + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/OffsetResetTypeEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/OffsetResetTypeEnum.java deleted file mode 100644 index 170946e8..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/OffsetResetTypeEnum.java +++ /dev/null @@ -1,28 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * @author zengqiao - * @date 20/10/26 - */ -public enum OffsetResetTypeEnum { - RESET_BY_TIME(0), - - RESET_BY_OFFSET(1); - - private final Integer code; - - OffsetResetTypeEnum(Integer code) { - this.code = code; - } - - public Integer getCode() { - return code; - } - - @Override - public String toString() { - return "OffsetResetTypeEnum{" + - "code=" + code + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/OperateEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/OperateEnum.java deleted file mode 100644 index af69ea50..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/OperateEnum.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * @author zhongyuankai - * @date 20/09/03 - */ -public enum OperateEnum { - ADD(0, "新增"), - - DELETE(1, "删除"), - - EDIT(2, "修改"), - - UNKNOWN(-1, "unknown"), - ; - - OperateEnum(int code, String message) { - this.code = code; - this.message = message; - } - - private int code; - - private String message; - - public int getCode() { - return code; - } - - public String getMessage() { - return message; - } - - public static OperateEnum valueOf(Integer code) { - if (code == null) { - return OperateEnum.UNKNOWN; - } - for (OperateEnum state : OperateEnum.values()) { - if (state.getCode() == code) { - return state; - } - } - - return OperateEnum.UNKNOWN; - } - - public static boolean validate(Integer code) { - if (code == null) { - return true; - } - for (OperateEnum state : OperateEnum.values()) { - if (state.getCode() == code) { - return true; - } - } - - return false; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/OperationStatusEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/OperationStatusEnum.java deleted file mode 100644 index b88135a5..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/OperationStatusEnum.java +++ /dev/null @@ -1,38 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * 操作状态类型 - * @author zengqiao - * @date 19/11/21 - */ -public enum OperationStatusEnum { - CREATE(0, "创建"), - UPDATE(1, "更新"), - DELETE(2, "删除"), - ; - - private Integer code; - - private String message; - - OperationStatusEnum(Integer code, String message) { - this.code = code; - this.message = message; - } - - public Integer getCode() { - return code; - } - - public String getMessage() { - return message; - } - - @Override - public String toString() { - return "OperationStatusEnum{" + - "code=" + code + - ", message='" + message + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/PeakFlowStatusEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/PeakFlowStatusEnum.java deleted file mode 100644 index f39ac91a..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/PeakFlowStatusEnum.java +++ /dev/null @@ -1,42 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * 峰值状态枚举 - * @author zengqiao - * @date 20/5/11 - */ -public enum PeakFlowStatusEnum { - BETWEEN_ALL(0, "全部"), - BETWEEN_00_60(1, "使用率0%-60%"), - BETWEEN_60_80(2, "使用率60%-80%"), - BETWEEN_80_100(3, "使用率80%-100%"), - BETWEEN_100_PLUS(4, "使用率大于100%"), - BETWEEN_EXCEPTION(5, "数据获取失败"), - - ; - - private Integer code; - - private String message; - - PeakFlowStatusEnum(Integer code, String message) { - this.code = code; - this.message = message; - } - - public Integer getCode() { - return code; - } - - public String getMessage() { - return message; - } - - @Override - public String toString() { - return "PeakFlowStatusEnum{" + - "code=" + code + - ", message='" + message + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/RebalanceDimensionEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/RebalanceDimensionEnum.java deleted file mode 100644 index c5259461..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/RebalanceDimensionEnum.java +++ /dev/null @@ -1,40 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * 优先副本选举维度 - * @author zengqiao - * @date 20/4/23 - */ -public enum RebalanceDimensionEnum { - CLUSTER(0, "Cluster维度"), - REGION(1, "Region维度"), - BROKER(2, "Broker维度"), - TOPIC(3, "Topic维度"), - PARTITION(4, "Partition维度"), - ; - - private Integer code; - - private String message; - - RebalanceDimensionEnum(Integer code, String message) { - this.code = code; - this.message = message; - } - - public Integer getCode() { - return code; - } - - public String getMessage() { - return message; - } - - @Override - public String toString() { - return "RebalanceDimensionEnum{" + - "code=" + code + - ", message='" + message + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TaskStatusEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TaskStatusEnum.java deleted file mode 100644 index a478eafe..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TaskStatusEnum.java +++ /dev/null @@ -1,61 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * 任务状态 - * @author zengqiao - * @date 2017/6/29. - */ -public enum TaskStatusEnum { - UNKNOWN( -1, "未知"), - - NEW( 0, "新建"), - - RUNNABLE( 20, "就绪"), - WAITING( 21, "等待"), - - RUNNING( 30, "运行中"), - KILLING( 31, "杀死中"), - - BLOCKED( 40, "暂停"), - - UNFINISHED( 99, "未完成"), - FINISHED( 100, "完成"), - - SUCCEED( 101, "成功"), - FAILED( 102, "失败"), - CANCELED( 103, "取消"), - IGNORED( 104, "忽略"), - TIMEOUT( 105, "超时"), - KILL_FAILED(106, "杀死失败"), - - ; - - private Integer code; - - private String message; - - TaskStatusEnum(Integer code, String message) { - this.code = code; - this.message = message; - } - - public Integer getCode() { - return code; - } - - public String getMessage() { - return message; - } - - @Override - public String toString() { - return "TaskStatusEnum{" + - "code=" + code + - ", message='" + message + '\'' + - '}'; - } - - public static Boolean isFinished(Integer code) { - return code >= FINISHED.getCode(); - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TaskStatusReassignEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TaskStatusReassignEnum.java deleted file mode 100644 index fc8adcc1..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TaskStatusReassignEnum.java +++ /dev/null @@ -1,50 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * @author zengqiao - * @date 20/6/11 - */ -public enum TaskStatusReassignEnum { - UNKNOWN(TaskStatusEnum.UNKNOWN), - - NEW(TaskStatusEnum.NEW), - - RUNNABLE(TaskStatusEnum.RUNNABLE), - - RUNNING(TaskStatusEnum.RUNNING), - -// FINISHED(TaskStatusEnum.FINISHED), - SUCCEED(TaskStatusEnum.SUCCEED), - FAILED(TaskStatusEnum.FAILED), - CANCELED(TaskStatusEnum.CANCELED), - ; - - private Integer code; - - private String message; - - TaskStatusReassignEnum(TaskStatusEnum taskStatusEnum) { - this.code = taskStatusEnum.getCode(); - this.message = taskStatusEnum.getMessage(); - } - - public Integer getCode() { - return code; - } - - public String getMessage() { - return message; - } - - @Override - public String toString() { - return "TaskStatusReassignEnum{" + - "code=" + code + - ", message='" + message + '\'' + - '}'; - } - - public static Boolean isFinished(Integer code) { - return SUCCEED.getCode().equals(code) || FAILED.getCode().equals(code) || CANCELED.getCode().equals(code); - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TopicAuthorityEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TopicAuthorityEnum.java deleted file mode 100644 index 7abafb8c..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TopicAuthorityEnum.java +++ /dev/null @@ -1,44 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * topic权限 - * @author zhongyuankai - * @date 20/4/29 - */ -public enum TopicAuthorityEnum { - DENY(0, "无"), - - READ(1, "只读"), - - WRITE(2, "只写"), - - READ_WRITE(3, "可读可写"), - - OWNER(4, "可管理"), - ; - - private Integer code; - - private String message; - - TopicAuthorityEnum(Integer code, String message) { - this.code = code; - this.message = message; - } - - public Integer getCode() { - return code; - } - - public String getMessage() { - return message; - } - - @Override - public String toString() { - return "TopicAuthorityEnum{" + - "code=" + code + - ", message='" + message + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TopicExpiredStatusEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TopicExpiredStatusEnum.java deleted file mode 100644 index 6a2f32c1..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TopicExpiredStatusEnum.java +++ /dev/null @@ -1,40 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * 过期Topic状态 - * @author zengqiao - * @date 21/01/25 - */ -public enum TopicExpiredStatusEnum { - ALREADY_NOTIFIED_AND_DELETED(-2, "已通知, 已下线"), - ALREADY_NOTIFIED_AND_CAN_DELETE(-1, "已通知, 可下线"), - ALREADY_EXPIRED_AND_WAIT_NOTIFY(0, "已过期, 待通知"), - ALREADY_NOTIFIED_AND_WAIT_RESPONSE(1, "已通知, 待反馈"), - - ; - - private int status; - - private String message; - - TopicExpiredStatusEnum(int status, String message) { - this.status = status; - this.message = message; - } - - public int getStatus() { - return status; - } - - public String getMessage() { - return message; - } - - @Override - public String toString() { - return "TopicExpiredStatusEnum{" + - "status=" + status + - ", message='" + message + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TopicOffsetChangedEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TopicOffsetChangedEnum.java deleted file mode 100644 index 4c88f25c..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TopicOffsetChangedEnum.java +++ /dev/null @@ -1,37 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * @author zengqiao - * @date 20/8/24 - */ -public enum TopicOffsetChangedEnum { - UNKNOWN(-1, "unknown"), - NO(0, "no"), - YES(1, "yes"), - ; - - private Integer code; - - private String message; - - TopicOffsetChangedEnum(Integer code, String message) { - this.code = code; - this.message = message; - } - - public Integer getCode() { - return code; - } - - public String getMessage() { - return message; - } - - @Override - public String toString() { - return "TopicOffsetChangedEnum{" + - "code=" + code + - ", message='" + message + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TopicReassignActionEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TopicReassignActionEnum.java deleted file mode 100644 index 2c4361c6..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/TopicReassignActionEnum.java +++ /dev/null @@ -1,39 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum; - -/** - * Topic迁移动作 - * @author zengqiao - * @date 20/4/16 - */ -public enum TopicReassignActionEnum { - START("start"), - MODIFY("modify"), - CANCEL("cancel"), - ; - - private String action; - - TopicReassignActionEnum(String action) { - this.action = action; - } - - public String getAction() { - return action; - } - - @Override - public String toString() { - return "TopicReassignActionEnum{" + - "action='" + action + '\'' + - '}'; - } - - public static TopicReassignActionEnum getByAction(String action) { - for (TopicReassignActionEnum elem: TopicReassignActionEnum.values()) { - if (elem.action.equals(action)) { - return elem; - } - } - return null; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/gateway/GatewayConfigKeyEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/gateway/GatewayConfigKeyEnum.java deleted file mode 100644 index b3403e69..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/bizenum/gateway/GatewayConfigKeyEnum.java +++ /dev/null @@ -1,49 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.bizenum.gateway; - -/** - * @author zengqiao - * @date 20/7/28 - */ -public enum GatewayConfigKeyEnum { - SD_CLUSTER_ID("SD_CLUSTER_ID", "SD_CLUSTER_ID"), - SD_QUEUE_SIZE("SD_QUEUE_SIZE", "SD_QUEUE_SIZE"), - SD_APP_RATE("SD_APP_RATE", "SD_APP_RATE"), - SD_IP_RATE("SD_IP_RATE", "SD_IP_RATE"), - SD_SP_RATE("SD_SP_RATE", "SD_SP_RATE"), - - ; - - private String configType; - - private String configName; - - GatewayConfigKeyEnum(String configType, String configName) { - this.configType = configType; - this.configName = configName; - } - - public String getConfigType() { - return configType; - } - - public String getConfigName() { - return configName; - } - - @Override - public String toString() { - return "GatewayConfigKeyEnum{" + - "configType='" + configType + '\'' + - ", configName='" + configName + '\'' + - '}'; - } - - public static GatewayConfigKeyEnum getByConfigType(String configType) { - for (GatewayConfigKeyEnum configKeyEnum: GatewayConfigKeyEnum.values()) { - if (configKeyEnum.getConfigType().equals(configType)) { - return configKeyEnum; - } - } - return null; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ApiLevelContent.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ApiLevelContent.java deleted file mode 100644 index 8136cd16..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ApiLevelContent.java +++ /dev/null @@ -1,18 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.constant; - -/** - * @author zengqiao - * @date 20/7/28 - */ -public class ApiLevelContent { - public static final int LEVEL_VIP_1 = 1; - - public static final int LEVEL_IMPORTANT_2 = 2; - - public static final int LEVEL_NORMAL_3 = 3; - - public static final int LEVEL_DEFAULT_4 = 4; - - private ApiLevelContent() { - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ApiPrefix.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ApiPrefix.java deleted file mode 100644 index 5422076c..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ApiPrefix.java +++ /dev/null @@ -1,28 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.constant; - -/** - * Api前缀 - * @author zengqiao - * @date 20/4/16 - */ -public class ApiPrefix { - public static final String API_PREFIX = "/api/"; - private static final String API_V1_PREFIX = API_PREFIX + "v1/"; - - // login - public static final String API_V1_SSO_PREFIX = API_V1_PREFIX + "sso/"; - - // console - public static final String API_V1_NORMAL_PREFIX = API_V1_PREFIX + "normal/"; - public static final String API_V1_RD_PREFIX = API_V1_PREFIX + "rd/"; - public static final String API_V1_OP_PREFIX = API_V1_PREFIX + "op/"; - - // open - public static final String API_V1_THIRD_PART_PREFIX = API_V1_PREFIX + "third-part/"; - - // gateway - public static final String GATEWAY_API_V1_PREFIX = "/gateway" + API_V1_PREFIX; - - private ApiPrefix() { - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ConfigConstant.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ConfigConstant.java deleted file mode 100644 index 361c841f..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ConfigConstant.java +++ /dev/null @@ -1,36 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.constant; - -/** - * 配置的常量KEY - * @author zengqiao - * @date 20/7/1 - */ -public class ConfigConstant { - /** - * 专家服务 - */ - public static final String REGION_HOT_TOPIC_CONFIG_KEY = "REGION_HOT_TOPIC_CONFIG"; - public static final String TOPIC_INSUFFICIENT_PARTITION_CONFIG_KEY = "TOPIC_INSUFFICIENT_PARTITION_CONFIG"; - public static final String EXPIRED_TOPIC_CONFIG_KEY = "EXPIRED_TOPIC_CONFIG"; - - /** - * - */ - public static final String PRODUCE_CONSUMER_METRICS_CONFIG_KEY = "PRODUCE_CONSUMER_METRICS_CONFIG_KEY"; - - public static final String PRODUCE_TOPIC_METRICS_CONFIG_KEY = "PRODUCE_TOPIC_METRICS_CONFIG_KEY"; - - public static final long MAX_LIMIT_NUM = 200L; - - /** - * broker 默认最大峰值流量 100M - */ - public static final Long DEFAULT_BROKER_CAPACITY_LIMIT = 100 * 1024 * 1024L; - - public static final String BROKER_CAPACITY_LIMIT_CONFIG_KEY = "BROKER_CAPACITY_LIMIT_CONFIG"; - - public static final String KAFKA_CLUSTER_DO_CONFIG_KEY = "KAFKA_CLUSTER_DO_CONFIG"; - - private ConfigConstant() { - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/Constant.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/Constant.java deleted file mode 100644 index 02331255..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/Constant.java +++ /dev/null @@ -1,51 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.constant; - -/** - * @author zengqiao - * @date 20/2/28 - */ -public class Constant { - public static final Integer SUCCESS = 0; - - public static final Integer MAX_AVG_BYTES_DURATION = 10; - - public static final Integer BATCH_INSERT_SIZE = 30; - - public static final Integer DEFAULT_SESSION_TIMEOUT_UNIT_MS = 30000; - - public static final Integer MAX_TOPIC_OPERATION_SIZE_PER_REQUEST = 10; - - /** - * 不进行过滤的BrokerId - */ - public static final Integer NOT_FILTER_BROKER_ID = -1; - - /** - * 默认最近20分钟的连接信息 - */ - public static final Long TOPIC_CONNECTION_LATEST_TIME_MS = 20 * 60 * 1000L; - - /** - * 工单相关 - */ - public static final String HANDLE_APP_APPLY_MAX_NUM = "handle_app_apply_order_num"; - - public static final Integer HANDLE_APP_APPLY_MAX_NUM_DEFAULT = 10; - - public static final String AUTO_HANDLE_USER_NAME = "auto_handle"; - - public static final String AUTO_HANDLE_CHINESE_NAME = "自动审批"; - - public static final String UNKNOWN_VERSION = "unknownVersion"; - - public static final String UNKNOWN_USER = "UNKNOWN_USER"; - - public static final String DEFAULT_USER_NAME = "kafka-admin"; - - public static final Integer DEFAULT_MAX_CAL_TOPIC_EXPIRED_DAY = 90; - - public static final Integer INVALID_CODE = -1; - - private Constant() { - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/KafkaConstant.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/KafkaConstant.java deleted file mode 100644 index 463e9b1a..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/KafkaConstant.java +++ /dev/null @@ -1,26 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.constant; - -/** - * @author zengqiao - * @date 20/5/20 - */ -public class KafkaConstant { - public static final String COORDINATOR_TOPIC_NAME = "__consumer_offsets"; - - public static final String TRANSACTION_TOPIC_NAME = "__transaction_state"; - - public static final String BROKER_HOST_NAME_SUFFIX = ".diditaxi.com"; - - public static final String CLIENT_VERSION_CODE_UNKNOWN = "-1"; - - public static final String CLIENT_VERSION_NAME_UNKNOWN = "unknown"; - - public static final String RETENTION_MS_KEY = "retention.ms"; - - public static final String EXTERNAL_KEY = "EXTERNAL"; - - public static final String INTERNAL_KEY = "INTERNAL"; - - private KafkaConstant() { - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/KafkaMetricsCollections.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/KafkaMetricsCollections.java deleted file mode 100644 index be82317a..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/KafkaMetricsCollections.java +++ /dev/null @@ -1,45 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.constant; - -/** - * - * @author zengqiao - * @date 20/4/22 - */ -public class KafkaMetricsCollections { - public static final int COMMON_DETAIL_METRICS = 0; - - /** - * Broker流量详情 - */ - public static final int BROKER_TO_DB_METRICS = 101; // Broker入DB的Metrics指标 - public static final int BROKER_OVERVIEW_PAGE_METRICS = 103; // Broker状态概览的指标 - public static final int BROKER_ANALYSIS_METRICS = 105; // Broker分析的指标 - public static final int BROKER_TOPIC_ANALYSIS_METRICS = 106; // Broker分析的指标 - public static final int BROKER_BASIC_PAGE_METRICS = 107; // Broker基本信息页面的指标 - public static final int BROKER_STATUS_PAGE_METRICS = 108; // Broker状态 - public static final int BROKER_HEALTH_SCORE_METRICS = 109; // Broker健康分 - - /** - * Topic流量详情 - */ - public static final int TOPIC_FLOW_OVERVIEW = 201; - public static final int TOPIC_METRICS_TO_DB = 202; - public static final int TOPIC_REQUEST_TIME_METRICS_TO_DB = 203; - public static final int TOPIC_BASIC_PAGE_METRICS = 204; - public static final int TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS = 205; - public static final int TOPIC_THROTTLED_METRICS_TO_DB = 206; - - - /** - * App+Topic流量详情 - */ - public static final int APP_TOPIC_METRICS_TO_DB = 300; - - /** - * Broker信息 - */ - public static final int BROKER_VERSION = 400; - - private KafkaMetricsCollections() { - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/LoginConstant.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/LoginConstant.java deleted file mode 100644 index 8c9b47aa..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/LoginConstant.java +++ /dev/null @@ -1,17 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.constant; - -/** - * 登录常量 - * @author zengqiao - * @date 20/5/8 - */ -public class LoginConstant { - public static final String SESSION_USERNAME_KEY = "username"; - - public static final String COOKIE_CHINESE_USERNAME_KEY = "chineseName"; - - public static final Integer COOKIE_OR_SESSION_MAX_AGE_UNIT_MS = 24 * 60 * 60 * 1000; - - private LoginConstant() { - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/SystemCodeConstant.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/SystemCodeConstant.java deleted file mode 100644 index 510a90c1..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/SystemCodeConstant.java +++ /dev/null @@ -1,12 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.constant; - -/** - * @author zengqiao - * @date 20/7/28 - */ -public class SystemCodeConstant { - public static final String KAFKA_MANAGER = "kafka-manager"; - - private SystemCodeConstant() { - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/TopicCreationConstant.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/TopicCreationConstant.java deleted file mode 100644 index 4d569907..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/TopicCreationConstant.java +++ /dev/null @@ -1,62 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.constant; - -import java.util.Properties; - -/** - * @author zengqiao - * @date 20/7/28 - */ -public class TopicCreationConstant { - /** - * LogX创建Topic配置KEY - */ - public static final String LOG_X_CREATE_TOPIC_CONFIG_KEY_NAME = "LOG_X_CREATE_TOPIC_CONFIG"; - - /** - * 内部创建Topic配置KEY - */ - public static final String INNER_CREATE_TOPIC_CONFIG_KEY = "INNER_CREATE_TOPIC_CONFIG_KEY"; - - public static final Integer DEFAULT_REPLICA = 3; - - public static final Integer DEFAULT_PARTITION_NUM = 1; - - public static final Integer DEFAULT_RETENTION_TIME_UNIT_HOUR = 24; - - public static final String TOPIC_RETENTION_TIME_KEY_NAME = "retention.ms"; - - public static final String TOPIC_RETENTION_BYTES_KEY_NAME = "retention.bytes"; - - public static final Long DEFAULT_QUOTA = 3 * 1024 * 1024L; - - public static Properties createNewProperties(Long retentionTime) { - Properties properties = new Properties(); - properties.put(TOPIC_RETENTION_TIME_KEY_NAME, String.valueOf(retentionTime)); - return properties; - } - - public static final Long AUTO_EXEC_MAX_BYTES_IN_UNIT_B = 30 * 1024 * 1024L; - - /** - * Topic 前缀 - */ - public static final String TOPIC_NAME_PREFIX_US = "us01_"; - - public static final String TOPIC_NAME_PREFIX_RU = "ru01_"; - - public static final Integer TOPIC_NAME_MAX_LENGTH = 255; - - - /** - * 单次自动化审批, 默认允许的通过单子 - */ - public static final Integer DEFAULT_MAX_PASSED_ORDER_NUM_PER_TASK = 1; - - /** - * 单次自动化审批, 最多允许的通过单子 - */ - public static final Integer MAX_PASSED_ORDER_NUM_PER_TASK = 200; - - private TopicCreationConstant() { - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/TopicSampleConstant.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/TopicSampleConstant.java deleted file mode 100644 index d409e862..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/TopicSampleConstant.java +++ /dev/null @@ -1,22 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.constant; - -/** - * 采样相关配置 - * @author zengqiao - * @date 20/5/8 - */ -public class TopicSampleConstant { - /** - * TOPIC_SAMPLE_MAX_MSG_NUM: 最大采样条数 - * TOPIC_SAMPLE_MAX_TIMEOUT_MS:采样超时时间 - * TOPIC_SAMPLE_POLL_TIME_OUT_MS:采样单次poll超时时间 - * TOPIC_SAMPLE_MAX_DATA_LENGTH:截断情况下, 采样的数据最大长度 - */ - public static final Integer MAX_MSG_NUM = 100; - public static final Integer MAX_TIMEOUT_UNIT_MS = 10000; - public static final Integer POLL_TIME_OUT_UNIT_MS = 2000; - public static final Integer MAX_DATA_LENGTH_UNIT_BYTE = 2048; - - private TopicSampleConstant() { - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/TrickLoginConstant.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/TrickLoginConstant.java deleted file mode 100644 index 0bb92d2e..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/TrickLoginConstant.java +++ /dev/null @@ -1,24 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.constant; - -public class TrickLoginConstant { - /** - * HTTP Header key - */ - public static final String TRICK_LOGIN_SWITCH = "Trick-Login-Switch"; - - public static final String TRICK_LOGIN_USER = "Trick-Login-User"; - - /** - * 配置允许 trick 登录用户名单 - */ - public static final String TRICK_LOGIN_LEGAL_USER_CONFIG_KEY = "SECURITY.TRICK_USERS"; - - /** - * 开关状态值 - */ - public static final String TRICK_LOGIN_SWITCH_ON = "on"; - public static final String TRICK_LOGIN_SWITCH_OFF = "off"; - - private TrickLoginConstant() { - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ConsumerMetadata.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ConsumerMetadata.java deleted file mode 100644 index ae943c7c..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ConsumerMetadata.java +++ /dev/null @@ -1,37 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity; - -import kafka.admin.AdminClient; - -import java.util.*; - -/** - * @author zengqiao - * @date 19/5/14 - */ -public class ConsumerMetadata { - private Set consumerGroupSet = new HashSet<>(); - - private Map> topicNameConsumerGroupMap = new HashMap<>(); - - private Map consumerGroupSummaryMap = new HashMap<>(); - - public ConsumerMetadata(Set consumerGroupSet, - Map> topicNameConsumerGroupMap, - Map consumerGroupSummaryMap) { - this.consumerGroupSet = consumerGroupSet; - this.topicNameConsumerGroupMap = topicNameConsumerGroupMap; - this.consumerGroupSummaryMap = consumerGroupSummaryMap; - } - - public Set getConsumerGroupSet() { - return consumerGroupSet; - } - - public Map> getTopicNameConsumerGroupMap() { - return topicNameConsumerGroupMap; - } - - public Map getConsumerGroupSummaryMap() { - return consumerGroupSummaryMap; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/KafkaVersion.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/KafkaVersion.java deleted file mode 100644 index ba71d612..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/KafkaVersion.java +++ /dev/null @@ -1,82 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity; - -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; - -/** - * @author zengqiao - * @date 20/6/15 - */ -public class KafkaVersion { - private static final String DIDI_VERSION_EXTEND = "d"; - - public static final Long VERSION_0_10_3 = 10030000L; // 0.10.2+ - public static final Long VERSION_MAX = Long.MAX_VALUE; - - private volatile String version = null; - - private volatile long versionNum = Long.MAX_VALUE; - - public boolean initialized() { - if (ValidateUtils.isNull(version)) { - return false; - } - return true; - } - - public String getVersion() { - return version; - } - - public long getVersionNum() { - return versionNum; - } - - @Override - public String toString() { - return "KafkaVersion{" + - "version='" + version + '\'' + - ", versionNum=" + versionNum + - '}'; - } - - public long init(String version) { - version = version.toLowerCase(); - String[] splitElems = version.split("-"); - int splitElemLength = splitElems.length; - if (splitElemLength <= 0) { - versionNum = Long.MAX_VALUE; - return versionNum; - } - - try { - // kafka的version - String[] kafkaVersion = splitElems[0].split("\\."); - int kafkaVersionLength = kafkaVersion.length; - - versionNum = kafkaVersionLength > 0? Integer.valueOf(kafkaVersion[0]): 0; - versionNum = versionNum * 100 + (kafkaVersionLength > 1? Integer.valueOf(kafkaVersion[1]): 0); - versionNum = versionNum * 100 + (kafkaVersionLength > 2? Integer.valueOf(kafkaVersion[2]): 0); - } catch (Exception e) { - // Kafka版本信息获取不到时, 直接返回空 - this.versionNum = Long.MAX_VALUE; - return versionNum; - } - - // 成功获取版本信息 - versionNum = versionNum * 10000; - this.version = version; - - // 补充扩展信息 - try { - for (int idx = 0; idx < splitElemLength; ++idx) { - if (splitElems[idx].equals(DIDI_VERSION_EXTEND) && idx < splitElemLength - 1) { - versionNum = versionNum + (Integer.valueOf(splitElems[idx + 1])); - return versionNum; - } - } - } catch (Exception e) { - // 扩展版本信息获取不到时, 忽略 - } - return versionNum; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/Result.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/Result.java deleted file mode 100644 index 471a3d07..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/Result.java +++ /dev/null @@ -1,135 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity; - -import com.alibaba.fastjson.JSON; -import com.xiaojukeji.kafka.manager.common.constant.Constant; - -import java.io.Serializable; - -/** - * @author huangyiminghappy@163.com - * @date 2019-07-08 - */ -public class Result implements Serializable { - private static final long serialVersionUID = -2772975319944108658L; - - private T data; - private String message; - private String tips; - private int code; - - public Result(T data) { - this.data = data; - this.code = ResultStatus.SUCCESS.getCode(); - this.message = ResultStatus.SUCCESS.getMessage(); - } - - public Result() { - this(null); - } - - public Result(Integer code, String message) { - this.message = message; - this.code = code; - } - - public Result(Integer code, T data, String message) { - this.data = data; - this.message = message; - this.code = code; - } - - public T getData() - { - return (T)this.data; - } - - public void setData(T data) - { - this.data = data; - } - - public String getMessage() - { - return this.message; - } - - public void setMessage(String message) - { - this.message = message; - } - - public String getTips() { - return tips; - } - - public void setTips(String tips) { - this.tips = tips; - } - - public int getCode() - { - return this.code; - } - - public void setCode(int code) - { - this.code = code; - } - - @Override - public String toString() - { - return JSON.toJSONString(this); - } - - public static Result buildSuc() { - Result result = new Result<>(); - result.setCode(ResultStatus.SUCCESS.getCode()); - result.setMessage(ResultStatus.SUCCESS.getMessage()); - return result; - } - - public static Result buildSuc(T data) { - Result result = new Result<>(); - result.setCode(ResultStatus.SUCCESS.getCode()); - result.setMessage(ResultStatus.SUCCESS.getMessage()); - result.setData(data); - return result; - } - - public static Result buildGatewayFailure(String message) { - Result result = new Result<>(); - result.setCode(ResultStatus.GATEWAY_INVALID_REQUEST.getCode()); - result.setMessage(message); - result.setData(null); - return result; - } - - public static Result buildFailure(String message) { - Result result = new Result<>(); - result.setCode(ResultStatus.FAIL.getCode()); - result.setMessage(message); - result.setData(null); - return result; - } - - public static Result buildFrom(ResultStatus resultStatus) { - Result result = new Result<>(); - result.setCode(resultStatus.getCode()); - result.setMessage(resultStatus.getMessage()); - return result; - } - - public static Result buildFrom(ResultStatus resultStatus, T data) { - Result result = new Result<>(); - result.setCode(resultStatus.getCode()); - result.setMessage(resultStatus.getMessage()); - result.setData(data); - return result; - } - - public boolean failed() { - return !Constant.SUCCESS.equals(code); - } - -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ResultStatus.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ResultStatus.java deleted file mode 100644 index 0f8aebd6..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ResultStatus.java +++ /dev/null @@ -1,139 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity; - -import com.xiaojukeji.kafka.manager.common.constant.Constant; - -/** - * 返回状态 - * @author zengqiao - * @date 20/4/16 - */ -public enum ResultStatus { - GATEWAY_INVALID_REQUEST(-1, "invalid request"), - - SUCCESS(Constant.SUCCESS, "success"), - - FAIL(1, "操作失败"), - - /** - * 操作错误[1000, 2000) - * ------------------------------------------------------------------------------------------ - */ - OPERATION_FAILED(1401, "operation failed"), - OPERATION_FORBIDDEN(1402, "operation forbidden"), - API_CALL_EXCEED_LIMIT(1403, "api call exceed limit"), - USER_WITHOUT_AUTHORITY(1404, "user without authority"), - CHANGE_ZOOKEEPER_FORBIDDEN(1405, "change zookeeper forbidden"), - - - APP_OFFLINE_FORBIDDEN(1406, "先下线topic,才能下线应用~"), - - - TOPIC_OPERATION_PARAM_NULL_POINTER(1450, "参数错误"), - TOPIC_OPERATION_PARTITION_NUM_ILLEGAL(1451, "分区数错误"), - TOPIC_OPERATION_BROKER_NUM_NOT_ENOUGH(1452, "Broker数不足错误"), - TOPIC_OPERATION_TOPIC_NAME_ILLEGAL(1453, "Topic名称非法"), - TOPIC_OPERATION_TOPIC_EXISTED(1454, "Topic已存在"), - TOPIC_OPERATION_UNKNOWN_TOPIC_PARTITION(1455, "Topic未知"), - TOPIC_OPERATION_TOPIC_CONFIG_ILLEGAL(1456, "Topic配置错误"), - TOPIC_OPERATION_TOPIC_IN_DELETING(1457, "Topic正在删除"), - TOPIC_OPERATION_UNKNOWN_ERROR(1458, "未知错误"), - - /** - * 参数错误[2000, 3000) - * ------------------------------------------------------------------------------------------ - */ - PARAM_ILLEGAL(2000, "param illegal"), - CG_LOCATION_ILLEGAL(2001, "consumer group location illegal"), - ORDER_ALREADY_HANDLED(2002, "order already handled"), - APP_ID_OR_PASSWORD_ILLEGAL(2003, "app or password illegal"), - SYSTEM_CODE_ILLEGAL(2004, "system code illegal"), - CLUSTER_TASK_HOST_LIST_ILLEGAL(2005, "主机列表错误,请检查主机列表"), - JSON_PARSER_ERROR(2006, "json parser error"), - - BROKER_NUM_NOT_ENOUGH(2050, "broker not enough"), - CONTROLLER_NOT_ALIVE(2051, "controller not alive"), - CLUSTER_METADATA_ERROR(2052, "cluster metadata error"), - TOPIC_CONFIG_ERROR(2053, "topic config error"), - - /** - * 参数错误 - 资源检查错误 - * 因为外部系统的问题, 操作时引起的错误, [7000, 8000) - * ------------------------------------------------------------------------------------------ - */ - RESOURCE_NOT_EXIST(7100, "资源不存在"), - CLUSTER_NOT_EXIST(7101, "cluster not exist"), - BROKER_NOT_EXIST(7102, "broker not exist"), - TOPIC_NOT_EXIST(7103, "topic not exist"), - PARTITION_NOT_EXIST(7104, "partition not exist"), - ACCOUNT_NOT_EXIST(7105, "account not exist"), - APP_NOT_EXIST(7106, "app not exist"), - ORDER_NOT_EXIST(7107, "order not exist"), - CONFIG_NOT_EXIST(7108, "config not exist"), - IDC_NOT_EXIST(7109, "idc not exist"), - TASK_NOT_EXIST(7110, "task not exist"), - AUTHORITY_NOT_EXIST(7111, "authority not exist"), - MONITOR_NOT_EXIST(7112, "monitor not exist"), - QUOTA_NOT_EXIST(7113, "quota not exist, please check clusterId, topicName and appId"), - CONSUMER_GROUP_NOT_EXIST(7114, "consumerGroup not exist"), - TOPIC_BIZ_DATA_NOT_EXIST(7115, "topic biz data not exist, please sync topic to db"), - - // 资源已存在 - RESOURCE_ALREADY_EXISTED(7200, "资源已经存在"), - TOPIC_ALREADY_EXIST(7201, "topic already existed"), - - // 资源重名 - RESOURCE_NAME_DUPLICATED(7300, "资源名称重复"), - - // 资源已被使用 - RESOURCE_ALREADY_USED(7400, "资源早已被使用"), - - - /** - * 因为外部系统的问题, 操作时引起的错误, [8000, 9000) - * ------------------------------------------------------------------------------------------ - */ - MYSQL_ERROR(8010, "operate database failed"), - - ZOOKEEPER_CONNECT_FAILED(8020, "zookeeper connect failed"), - ZOOKEEPER_READ_FAILED(8021, "zookeeper read failed"), - ZOOKEEPER_WRITE_FAILED(8022, "zookeeper write failed"), - ZOOKEEPER_DELETE_FAILED(8023, "zookeeper delete failed"), - - // 调用集群任务里面的agent失败 - CALL_CLUSTER_TASK_AGENT_FAILED(8030, " call cluster task agent failed"), - - // 调用监控系统失败 - CALL_MONITOR_SYSTEM_ERROR(8040, " call monitor-system failed"), - - // 存储相关的调用失败 - STORAGE_UPLOAD_FILE_FAILED(8050, "upload file failed"), - STORAGE_FILE_TYPE_NOT_SUPPORT(8051, "File type not support"), - STORAGE_DOWNLOAD_FILE_FAILED(8052, "download file failed"), - LDAP_AUTHENTICATION_FAILED(8053, "ldap authentication failed"), - - ; - - private int code; - private String message; - - ResultStatus(int code, String message) { - this.code = code; - this.message = message; - } - - public int getCode() { - return code; - } - - public void setCode(int code) { - this.code = code; - } - - public String getMessage() { - return message; - } - - public void setMessage(String message) { - this.message = message; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/TopicOperationResult.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/TopicOperationResult.java deleted file mode 100644 index 3c979d8d..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/TopicOperationResult.java +++ /dev/null @@ -1,83 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity; - -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/2 - */ -public class TopicOperationResult { - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "状态码, 0:成功, 其他失败") - private Integer code; - - @ApiModelProperty(value = "信息") - private String message; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getCode() { - return code; - } - - public void setCode(Integer code) { - this.code = code; - } - - public String getMessage() { - return message; - } - - public void setMessage(String message) { - this.message = message; - } - - @Override - public String toString() { - return "TopicOperationResult{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", code=" + code + - ", message='" + message + '\'' + - '}'; - } - - public static TopicOperationResult buildFrom(Long clusterId, String topicName, Result rs) { - return buildFrom(clusterId, topicName, rs.getCode(), rs.getMessage()); - } - - public static TopicOperationResult buildFrom(Long clusterId, String topicName, ResultStatus rs) { - return buildFrom(clusterId, topicName, rs.getCode(), rs.getMessage()); - } - - private static TopicOperationResult buildFrom(Long clusterId, - String topicName, - Integer code, - String message) { - TopicOperationResult result = new TopicOperationResult(); - result.setClusterId(clusterId); - result.setTopicName(topicName); - result.setCode(code); - result.setMessage(message); - return result; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/AppTopicDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/AppTopicDTO.java deleted file mode 100644 index ac202605..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/AppTopicDTO.java +++ /dev/null @@ -1,91 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao; - -/** - * AppTopic信息 - * @author zengqiao - * @date 20/5/11 - */ -public class AppTopicDTO { - private Long logicalClusterId; - - private String logicalClusterName; - - private Long physicalClusterId; - - private String topicName; - - private Integer access; - - private String operator; - - private Long gmtCreate; - - public Long getLogicalClusterId() { - return logicalClusterId; - } - - public void setLogicalClusterId(Long logicalClusterId) { - this.logicalClusterId = logicalClusterId; - } - - public String getLogicalClusterName() { - return logicalClusterName; - } - - public void setLogicalClusterName(String logicalClusterName) { - this.logicalClusterName = logicalClusterName; - } - - public Long getPhysicalClusterId() { - return physicalClusterId; - } - - public void setPhysicalClusterId(Long physicalClusterId) { - this.physicalClusterId = physicalClusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getAccess() { - return access; - } - - public void setAccess(Integer access) { - this.access = access; - } - - public String getOperator() { - return operator; - } - - public void setOperator(String operator) { - this.operator = operator; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - @Override - public String toString() { - return "AppTopicDTO{" + - "logicalClusterId=" + logicalClusterId + - ", logicalClusterName='" + logicalClusterName + '\'' + - ", physicalClusterId=" + physicalClusterId + - ", topicName='" + topicName + '\'' + - ", access=" + access + - ", operator='" + operator + '\'' + - ", gmtCreate=" + gmtCreate + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/BrokerBasicDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/BrokerBasicDTO.java deleted file mode 100644 index eb8e01d2..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/BrokerBasicDTO.java +++ /dev/null @@ -1,91 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao; - -/** - * Broker基本信息 - * @author zengqiao_cn@163.com - * @date 19/4/8 - */ -public class BrokerBasicDTO { - private String host; - - private Integer port; - - private Integer jmxPort; - - private Integer topicNum; - - private Integer partitionCount; - - private Long startTime; - - private Integer leaderCount; - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public Integer getPort() { - return port; - } - - public void setPort(Integer port) { - this.port = port; - } - - public Integer getJmxPort() { - return jmxPort; - } - - public void setJmxPort(Integer jmxPort) { - this.jmxPort = jmxPort; - } - - public Integer getTopicNum() { - return topicNum; - } - - public void setTopicNum(Integer topicNum) { - this.topicNum = topicNum; - } - - public Integer getPartitionCount() { - return partitionCount; - } - - public void setPartitionCount(Integer partitionCount) { - this.partitionCount = partitionCount; - } - - public Long getStartTime() { - return startTime; - } - - public void setStartTime(Long startTime) { - this.startTime = startTime; - } - - public Integer getLeaderCount() { - return leaderCount; - } - - public void setLeaderCount(Integer leaderCount) { - this.leaderCount = leaderCount; - } - - @Override - public String toString() { - return "BrokerBasicInfoDTO{" + - "host='" + host + '\'' + - ", port=" + port + - ", jmxPort=" + jmxPort + - ", topicNum=" + topicNum + - ", partitionCount=" + partitionCount + - ", startTime=" + startTime + - ", leaderCount=" + leaderCount + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/BrokerOverviewDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/BrokerOverviewDTO.java deleted file mode 100644 index 7ce643b6..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/BrokerOverviewDTO.java +++ /dev/null @@ -1,189 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao; - -import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers.BrokerMetadata; - -/** - * @author zengqiao_cn@163.com - * @date 19/4/21 - */ -public class BrokerOverviewDTO { - private Integer brokerId; - - private String host; - - private Integer port; - - private Integer jmxPort; - - private Long startTime; - - private Object byteIn; - - private Object byteOut; - - private Integer partitionCount; - - private Integer underReplicatedPartitions; - - private Boolean underReplicated; - - private Integer status; - - private Integer peakFlowStatus; - - private String kafkaVersion; - - private Integer leaderCount; - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public Integer getPort() { - return port; - } - - public void setPort(Integer port) { - this.port = port; - } - - public Integer getJmxPort() { - return jmxPort; - } - - public void setJmxPort(Integer jmxPort) { - this.jmxPort = jmxPort; - } - - public Long getStartTime() { - return startTime; - } - - public void setStartTime(Long startTime) { - this.startTime = startTime; - } - - public Object getByteIn() { - return byteIn; - } - - public void setByteIn(Object byteIn) { - this.byteIn = byteIn; - } - - public Object getByteOut() { - return byteOut; - } - - public void setByteOut(Object byteOut) { - this.byteOut = byteOut; - } - - public Integer getPartitionCount() { - return partitionCount; - } - - public void setPartitionCount(Integer partitionCount) { - this.partitionCount = partitionCount; - } - - public Integer getUnderReplicatedPartitions() { - return underReplicatedPartitions; - } - - public void setUnderReplicatedPartitions(Integer underReplicatedPartitions) { - this.underReplicatedPartitions = underReplicatedPartitions; - } - - public Boolean getUnderReplicated() { - return underReplicated; - } - - public void setUnderReplicated(Boolean underReplicated) { - this.underReplicated = underReplicated; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Integer getPeakFlowStatus() { - return peakFlowStatus; - } - - public void setPeakFlowStatus(Integer peakFlowStatus) { - this.peakFlowStatus = peakFlowStatus; - } - - public String getKafkaVersion() { - return kafkaVersion; - } - - public void setKafkaVersion(String kafkaVersion) { - this.kafkaVersion = kafkaVersion; - } - - public Integer getLeaderCount() { - return leaderCount; - } - - public void setLeaderCount(Integer leaderCount) { - this.leaderCount = leaderCount; - } - - public static BrokerOverviewDTO newInstance(BrokerMetadata brokerMetadata, - BrokerMetrics brokerMetrics, - String kafkaVersion) { - BrokerOverviewDTO brokerOverviewDTO = new BrokerOverviewDTO(); - brokerOverviewDTO.setBrokerId(brokerMetadata.getBrokerId()); - brokerOverviewDTO.setHost(brokerMetadata.getHost()); - brokerOverviewDTO.setPort(brokerMetadata.getPort()); - brokerOverviewDTO.setJmxPort(brokerMetadata.getJmxPort()); - brokerOverviewDTO.setStartTime(brokerMetadata.getTimestamp()); - brokerOverviewDTO.setStatus(0); - if (brokerMetrics == null) { - return brokerOverviewDTO; - } - brokerOverviewDTO.setByteIn( - brokerMetrics.getSpecifiedMetrics("BytesInPerSecOneMinuteRate") - ); - brokerOverviewDTO.setByteOut( - brokerMetrics.getSpecifiedMetrics("BytesOutPerSecOneMinuteRate") - ); - brokerOverviewDTO.setPartitionCount( - brokerMetrics.getSpecifiedMetrics("PartitionCountValue", Integer.class) - ); - brokerOverviewDTO.setUnderReplicatedPartitions( - brokerMetrics.getSpecifiedMetrics("UnderReplicatedPartitionsValue", Integer.class) - ); - - if (!ValidateUtils.isNull(brokerOverviewDTO.getUnderReplicatedPartitions())) { - brokerOverviewDTO.setUnderReplicated(brokerOverviewDTO.getUnderReplicatedPartitions() > 0); - } - brokerOverviewDTO.setLeaderCount( - brokerMetrics.getSpecifiedMetrics("LeaderCountValue", Integer.class) - ); - brokerOverviewDTO.setKafkaVersion(kafkaVersion); - return brokerOverviewDTO; - } - - -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/ClusterDetailDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/ClusterDetailDTO.java deleted file mode 100644 index 2e903485..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/ClusterDetailDTO.java +++ /dev/null @@ -1,202 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/4/23 - */ -public class ClusterDetailDTO { - private Long clusterId; - - private String clusterName; - - private String zookeeper; - - private String bootstrapServers; - - private String kafkaVersion; - - private String idc; - - private Integer mode; - - private String securityProperties; - - private String jmxProperties; - - private Integer status; - - private Date gmtCreate; - - private Date gmtModify; - - private Integer brokerNum; - - private Integer topicNum; - - private Integer consumerGroupNum; - - private Integer controllerId; - - private Integer regionNum; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getZookeeper() { - return zookeeper; - } - - public void setZookeeper(String zookeeper) { - this.zookeeper = zookeeper; - } - - public String getBootstrapServers() { - return bootstrapServers; - } - - public void setBootstrapServers(String bootstrapServers) { - this.bootstrapServers = bootstrapServers; - } - - public String getKafkaVersion() { - return kafkaVersion; - } - - public void setKafkaVersion(String kafkaVersion) { - this.kafkaVersion = kafkaVersion; - } - - public String getIdc() { - return idc; - } - - public void setIdc(String idc) { - this.idc = idc; - } - - public Integer getMode() { - return mode; - } - - public void setMode(Integer mode) { - this.mode = mode; - } - - public String getSecurityProperties() { - return securityProperties; - } - - public void setSecurityProperties(String securityProperties) { - this.securityProperties = securityProperties; - } - - public String getJmxProperties() { - return jmxProperties; - } - - public void setJmxProperties(String jmxProperties) { - this.jmxProperties = jmxProperties; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - public Integer getBrokerNum() { - return brokerNum; - } - - public void setBrokerNum(Integer brokerNum) { - this.brokerNum = brokerNum; - } - - public Integer getTopicNum() { - return topicNum; - } - - public void setTopicNum(Integer topicNum) { - this.topicNum = topicNum; - } - - public Integer getConsumerGroupNum() { - return consumerGroupNum; - } - - public void setConsumerGroupNum(Integer consumerGroupNum) { - this.consumerGroupNum = consumerGroupNum; - } - - public Integer getControllerId() { - return controllerId; - } - - public void setControllerId(Integer controllerId) { - this.controllerId = controllerId; - } - - public Integer getRegionNum() { - return regionNum; - } - - public void setRegionNum(Integer regionNum) { - this.regionNum = regionNum; - } - - @Override - public String toString() { - return "ClusterDetailDTO{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", zookeeper='" + zookeeper + '\'' + - ", bootstrapServers='" + bootstrapServers + '\'' + - ", kafkaVersion='" + kafkaVersion + '\'' + - ", idc='" + idc + '\'' + - ", mode=" + mode + - ", securityProperties='" + securityProperties + '\'' + - ", jmxProperties='" + jmxProperties + '\'' + - ", status=" + status + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - ", brokerNum=" + brokerNum + - ", topicNum=" + topicNum + - ", consumerGroupNum=" + consumerGroupNum + - ", controllerId=" + controllerId + - ", regionNum=" + regionNum + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/PartitionAttributeDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/PartitionAttributeDTO.java deleted file mode 100644 index e9e648ff..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/PartitionAttributeDTO.java +++ /dev/null @@ -1,24 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao; - -/** - * @author zhongyuankai - * @date 2020/5/26 - */ -public class PartitionAttributeDTO { - private Long logSize; - - public Long getLogSize() { - return logSize; - } - - public void setLogSize(Long logSize) { - this.logSize = logSize; - } - - @Override - public String toString() { - return "PartitionAttributeDTO{" + - "logSize=" + logSize + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/PartitionOffsetDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/PartitionOffsetDTO.java deleted file mode 100644 index bfe80a6b..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/PartitionOffsetDTO.java +++ /dev/null @@ -1,62 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao; - -/** - * Topic Offset - * @author zengqiao - * @date 19/6/2 - */ -public class PartitionOffsetDTO { - private Integer partitionId; - - private Long offset; - - private Long timestamp; - - public PartitionOffsetDTO() { - } - - public PartitionOffsetDTO(Integer partitionId, Long offset) { - this.partitionId = partitionId; - this.offset = offset; - } - - public PartitionOffsetDTO(Integer partitionId, Long offset, Long timestamp) { - this.partitionId = partitionId; - this.offset = offset; - this.timestamp = timestamp; - } - - public Integer getPartitionId() { - return partitionId; - } - - public void setPartitionId(Integer partitionId) { - this.partitionId = partitionId; - } - - public Long getOffset() { - return offset; - } - - public void setOffset(Long offset) { - this.offset = offset; - } - - public Long getTimestamp() { - return timestamp; - } - - public void setTimestamp(Long timestamp) { - this.timestamp = timestamp; - } - - @Override - public String toString() { - return "TopicOffsetDTO{" + - ", partitionId=" + partitionId + - ", offset=" + offset + - ", timestamp=" + timestamp + - '}'; - } -} - diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/RdTopicBasic.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/RdTopicBasic.java deleted file mode 100644 index bf57a800..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/RdTopicBasic.java +++ /dev/null @@ -1,115 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao; - -import java.util.List; -import java.util.Properties; - -/** - * @author zengqiao - * @date 20/6/10 - */ -public class RdTopicBasic { - private Long clusterId; - - private String clusterName; - - private String topicName; - - private Long retentionTime; - - private String appId; - - private String appName; - - private Properties properties; - - private String description; - - private List regionNameList; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Long getRetentionTime() { - return retentionTime; - } - - public void setRetentionTime(Long retentionTime) { - this.retentionTime = retentionTime; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public Properties getProperties() { - return properties; - } - - public void setProperties(Properties properties) { - this.properties = properties; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public List getRegionNameList() { - return regionNameList; - } - - public void setRegionNameList(List regionNameList) { - this.regionNameList = regionNameList; - } - - @Override - public String toString() { - return "RdTopicBasic{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", retentionTime=" + retentionTime + - ", appId='" + appId + '\'' + - ", appName='" + appName + '\'' + - ", properties=" + properties + - ", description='" + description + '\'' + - ", regionNameList='" + regionNameList + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/TopicDiskLocation.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/TopicDiskLocation.java deleted file mode 100644 index dde7deb8..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/TopicDiskLocation.java +++ /dev/null @@ -1,103 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/7/8 - */ -public class TopicDiskLocation { - private Long clusterId; - - private String topicName; - - private Integer brokerId; - - private String diskName; - - private List leaderPartitions; - - private List followerPartitions; - - private Boolean isUnderReplicated; - - private List underReplicatedPartitions; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public String getDiskName() { - return diskName; - } - - public void setDiskName(String diskName) { - this.diskName = diskName; - } - - public List getLeaderPartitions() { - return leaderPartitions; - } - - public void setLeaderPartitions(List leaderPartitions) { - this.leaderPartitions = leaderPartitions; - } - - public List getFollowerPartitions() { - return followerPartitions; - } - - public void setFollowerPartitions(List followerPartitions) { - this.followerPartitions = followerPartitions; - } - - public Boolean getUnderReplicated() { - return isUnderReplicated; - } - - public void setUnderReplicated(Boolean underReplicated) { - isUnderReplicated = underReplicated; - } - - public List getUnderReplicatedPartitions() { - return underReplicatedPartitions; - } - - public void setUnderReplicatedPartitions(List underReplicatedPartitions) { - this.underReplicatedPartitions = underReplicatedPartitions; - } - - @Override - public String toString() { - return "TopicDiskLocation{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", brokerId=" + brokerId + - ", diskName='" + diskName + '\'' + - ", leaderPartitions=" + leaderPartitions + - ", followerPartitions=" + followerPartitions + - ", isUnderReplicated=" + isUnderReplicated + - ", underReplicatedPartitions=" + underReplicatedPartitions + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/account/Account.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/account/Account.java deleted file mode 100644 index b387e6d5..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/account/Account.java +++ /dev/null @@ -1,71 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.account; - -import com.xiaojukeji.kafka.manager.common.bizenum.AccountRoleEnum; - -/** - * 用户信息 - * @author zengqiao - * @date 20/6/10 - */ -public class Account { - private String username; - - private String chineseName; - - private String department; - - private AccountRoleEnum accountRoleEnum; - - public Account(String username, String chineseName, String department, AccountRoleEnum accountRoleEnum) { - this.username = username; - this.chineseName = chineseName; - this.department = department; - this.accountRoleEnum = accountRoleEnum; - } - - public Account() { - super(); - } - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - public String getChineseName() { - return chineseName; - } - - public void setChineseName(String chineseName) { - this.chineseName = chineseName; - } - - public String getDepartment() { - return department; - } - - public void setDepartment(String department) { - this.department = department; - } - - public AccountRoleEnum getAccountRoleEnum() { - return accountRoleEnum; - } - - public void setAccountRoleEnum(AccountRoleEnum accountRoleEnum) { - this.accountRoleEnum = accountRoleEnum; - } - - @Override - public String toString() { - return "Account{" + - "username='" + username + '\'' + - ", chineseName='" + chineseName + '\'' + - ", department='" + department + '\'' + - ", accountRoleEnum=" + accountRoleEnum + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/analysis/AnalysisBrokerDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/analysis/AnalysisBrokerDTO.java deleted file mode 100644 index a5441f86..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/analysis/AnalysisBrokerDTO.java +++ /dev/null @@ -1,114 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.analysis; - -import java.util.List; - -/** - * @author zengqiao - * @date 19/12/29 - */ -public class AnalysisBrokerDTO { - private Long clusterId; - - private Integer brokerId; - - private Long baseTime; - - private Double bytesIn; - - private Double bytesOut; - - private Double messagesIn; - - private Double totalFetchRequests; - - private Double totalProduceRequests; - - List topicAnalysisVOList; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public Long getBaseTime() { - return baseTime; - } - - public void setBaseTime(Long baseTime) { - this.baseTime = baseTime; - } - - public Double getBytesIn() { - return bytesIn; - } - - public void setBytesIn(Double bytesIn) { - this.bytesIn = bytesIn; - } - - public Double getBytesOut() { - return bytesOut; - } - - public void setBytesOut(Double bytesOut) { - this.bytesOut = bytesOut; - } - - public Double getMessagesIn() { - return messagesIn; - } - - public void setMessagesIn(Double messagesIn) { - this.messagesIn = messagesIn; - } - - public Double getTotalFetchRequests() { - return totalFetchRequests; - } - - public void setTotalFetchRequests(Double totalFetchRequests) { - this.totalFetchRequests = totalFetchRequests; - } - - public Double getTotalProduceRequests() { - return totalProduceRequests; - } - - public void setTotalProduceRequests(Double totalProduceRequests) { - this.totalProduceRequests = totalProduceRequests; - } - - public List getTopicAnalysisVOList() { - return topicAnalysisVOList; - } - - public void setTopicAnalysisVOList(List topicAnalysisVOList) { - this.topicAnalysisVOList = topicAnalysisVOList; - } - - @Override - public String toString() { - return "AnalysisBrokerDTO{" + - "clusterId=" + clusterId + - ", brokerId=" + brokerId + - ", baseTime=" + baseTime + - ", bytesIn=" + bytesIn + - ", bytesOut=" + bytesOut + - ", messagesIn=" + messagesIn + - ", totalFetchRequests=" + totalFetchRequests + - ", totalProduceRequests=" + totalProduceRequests + - ", topicAnalysisVOList=" + topicAnalysisVOList + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/analysis/AnalysisTopicDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/analysis/AnalysisTopicDTO.java deleted file mode 100644 index cf78e887..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/analysis/AnalysisTopicDTO.java +++ /dev/null @@ -1,134 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.analysis; - -/** - * @author zengqiao - * @date 19/12/29 - */ -public class AnalysisTopicDTO { - private String topicName; - - private Double bytesIn; - - private Double bytesInRate; - - private Double bytesOut; - - private Double bytesOutRate; - - private Double messagesIn; - - private Double messagesInRate; - - private Double totalFetchRequests; - - private Double totalFetchRequestsRate; - - private Double totalProduceRequests; - - private Double totalProduceRequestsRate; - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Double getBytesIn() { - return bytesIn; - } - - public void setBytesIn(Double bytesIn) { - this.bytesIn = bytesIn; - } - - public Double getBytesInRate() { - return bytesInRate; - } - - public void setBytesInRate(Double bytesInRate) { - this.bytesInRate = bytesInRate; - } - - public Double getBytesOut() { - return bytesOut; - } - - public void setBytesOut(Double bytesOut) { - this.bytesOut = bytesOut; - } - - public Double getBytesOutRate() { - return bytesOutRate; - } - - public void setBytesOutRate(Double bytesOutRate) { - this.bytesOutRate = bytesOutRate; - } - - public Double getMessagesIn() { - return messagesIn; - } - - public void setMessagesIn(Double messagesIn) { - this.messagesIn = messagesIn; - } - - public Double getMessagesInRate() { - return messagesInRate; - } - - public void setMessagesInRate(Double messagesInRate) { - this.messagesInRate = messagesInRate; - } - - public Double getTotalFetchRequests() { - return totalFetchRequests; - } - - public void setTotalFetchRequests(Double totalFetchRequests) { - this.totalFetchRequests = totalFetchRequests; - } - - public Double getTotalFetchRequestsRate() { - return totalFetchRequestsRate; - } - - public void setTotalFetchRequestsRate(Double totalFetchRequestsRate) { - this.totalFetchRequestsRate = totalFetchRequestsRate; - } - - public Double getTotalProduceRequests() { - return totalProduceRequests; - } - - public void setTotalProduceRequests(Double totalProduceRequests) { - this.totalProduceRequests = totalProduceRequests; - } - - public Double getTotalProduceRequestsRate() { - return totalProduceRequestsRate; - } - - public void setTotalProduceRequestsRate(Double totalProduceRequestsRate) { - this.totalProduceRequestsRate = totalProduceRequestsRate; - } - - @Override - public String toString() { - return "AnalysisTopicDTO{" + - "topicName='" + topicName + '\'' + - ", bytesIn=" + bytesIn + - ", bytesInRate=" + bytesInRate + - ", bytesOut=" + bytesOut + - ", bytesOutRate=" + bytesOutRate + - ", messagesIn=" + messagesIn + - ", messagesInRate=" + messagesInRate + - ", totalFetchRequests=" + totalFetchRequests + - ", totalFetchRequestsRate=" + totalFetchRequestsRate + - ", totalProduceRequests=" + totalProduceRequests + - ", totalProduceRequestsRate=" + totalProduceRequestsRate + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/api/ApiCount.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/api/ApiCount.java deleted file mode 100644 index aefe940a..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/api/ApiCount.java +++ /dev/null @@ -1,50 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.api; - -import java.util.concurrent.atomic.AtomicInteger; - -/** - * @author zengqiao - * @date 20/7/27 - */ -public class ApiCount { - private int apiLevel; - - private Integer maxNum; - - private AtomicInteger currentNum; - - public ApiCount(int apiLevel, Integer maxNum, AtomicInteger currentNum) { - this.apiLevel = apiLevel; - this.maxNum = maxNum; - this.currentNum = currentNum; - } - - public int getApiLevel() { - return apiLevel; - } - - public Integer getMaxNum() { - return maxNum; - } - - public AtomicInteger getCurrentNum() { - return currentNum; - } - - public Boolean incAndCheckIsOverFlow() { - return maxNum < currentNum.incrementAndGet(); - } - - public int decPresentNum() { - return currentNum.decrementAndGet(); - } - - @Override - public String toString() { - return "ApiCount{" + - "apiLevel=" + apiLevel + - ", maxNum=" + maxNum + - ", currentNum=" + currentNum + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/cluster/ClusterBrokerStatus.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/cluster/ClusterBrokerStatus.java deleted file mode 100644 index 33f1e74e..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/cluster/ClusterBrokerStatus.java +++ /dev/null @@ -1,37 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.cluster; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/7/14 - */ -public class ClusterBrokerStatus { - private List brokerReplicaStatusList; - - private List brokerBytesInStatusList; - - public List getBrokerReplicaStatusList() { - return brokerReplicaStatusList; - } - - public void setBrokerReplicaStatusList(List brokerReplicaStatusList) { - this.brokerReplicaStatusList = brokerReplicaStatusList; - } - - public List getBrokerBytesInStatusList() { - return brokerBytesInStatusList; - } - - public void setBrokerBytesInStatusList(List brokerBytesInStatusList) { - this.brokerBytesInStatusList = brokerBytesInStatusList; - } - - @Override - public String toString() { - return "ClusterBrokerStatus{" + - "brokerReplicaStatusList=" + brokerReplicaStatusList + - ", brokerBytesInStatusList=" + brokerBytesInStatusList + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/cluster/ControllerPreferredCandidate.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/cluster/ControllerPreferredCandidate.java deleted file mode 100644 index 2de2fe57..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/cluster/ControllerPreferredCandidate.java +++ /dev/null @@ -1,53 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.cluster; - -public class ControllerPreferredCandidate { - private Integer brokerId; - - private String host; - - private Long startTime; - - private Integer status; - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public Long getStartTime() { - return startTime; - } - - public void setStartTime(Long startTime) { - this.startTime = startTime; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - @Override - public String toString() { - return "ControllerPreferredBroker{" + - "brokerId=" + brokerId + - ", host='" + host + '\'' + - ", startTime=" + startTime + - ", status=" + status + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/cluster/LogicalCluster.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/cluster/LogicalCluster.java deleted file mode 100644 index a7525374..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/cluster/LogicalCluster.java +++ /dev/null @@ -1,134 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.cluster; - -/** - * @author zengqiao - * @date 20/4/1 - */ -public class LogicalCluster { - private Long logicalClusterId; - - private String logicalClusterName; - - private String logicalClusterIdentification; - - private Integer mode; - - private Integer topicNum; - - private String clusterVersion; - - private Long physicalClusterId; - - private String bootstrapServers; - - private String description; - - private Long gmtCreate; - - private Long gmtModify; - - public Long getLogicalClusterId() { - return logicalClusterId; - } - - public void setLogicalClusterId(Long logicalClusterId) { - this.logicalClusterId = logicalClusterId; - } - - public String getLogicalClusterName() { - return logicalClusterName; - } - - public void setLogicalClusterName(String logicalClusterName) { - this.logicalClusterName = logicalClusterName; - } - - public String getLogicalClusterIdentification() { - return logicalClusterIdentification; - } - - public void setLogicalClusterIdentification(String logicalClusterIdentification) { - this.logicalClusterIdentification = logicalClusterIdentification; - } - - public Integer getMode() { - return mode; - } - - public void setMode(Integer mode) { - this.mode = mode; - } - - public Integer getTopicNum() { - return topicNum; - } - - public void setTopicNum(Integer topicNum) { - this.topicNum = topicNum; - } - - public String getClusterVersion() { - return clusterVersion; - } - - public void setClusterVersion(String clusterVersion) { - this.clusterVersion = clusterVersion; - } - - public Long getPhysicalClusterId() { - return physicalClusterId; - } - - public void setPhysicalClusterId(Long physicalClusterId) { - this.physicalClusterId = physicalClusterId; - } - - public String getBootstrapServers() { - return bootstrapServers; - } - - public void setBootstrapServers(String bootstrapServers) { - this.bootstrapServers = bootstrapServers; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Long getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Long gmtModify) { - this.gmtModify = gmtModify; - } - - @Override - public String toString() { - return "LogicalCluster{" + - "logicalClusterId=" + logicalClusterId + - ", logicalClusterName='" + logicalClusterName + '\'' + - ", logicalClusterIdentification='" + logicalClusterIdentification + '\'' + - ", mode=" + mode + - ", topicNum=" + topicNum + - ", clusterVersion='" + clusterVersion + '\'' + - ", physicalClusterId=" + physicalClusterId + - ", bootstrapServers='" + bootstrapServers + '\'' + - ", description='" + description + '\'' + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/cluster/LogicalClusterMetrics.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/cluster/LogicalClusterMetrics.java deleted file mode 100644 index a3cb9c34..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/cluster/LogicalClusterMetrics.java +++ /dev/null @@ -1,80 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.cluster; - -/** - * @author zengqiao - * @date 20/6/29 - */ -public class LogicalClusterMetrics { - - private Double totalProduceRequestsPerSec = 0.0; - - private Double bytesInPerSec = 0.0; - - private Double bytesOutPerSec = 0.0; - - private Double bytesRejectedPerSec = 0.0; - - private Double messagesInPerSec = 0.0; - - private Long gmtCreate; - - public Double getBytesInPerSec() { - return bytesInPerSec; - } - - public void setBytesInPerSec(Double bytesInPerSec) { - this.bytesInPerSec = bytesInPerSec; - } - - public Double getBytesOutPerSec() { - return bytesOutPerSec; - } - - public void setBytesOutPerSec(Double bytesOutPerSec) { - this.bytesOutPerSec = bytesOutPerSec; - } - - public Double getBytesRejectedPerSec() { - return bytesRejectedPerSec; - } - - public void setBytesRejectedPerSec(Double bytesRejectedPerSec) { - this.bytesRejectedPerSec = bytesRejectedPerSec; - } - - public Double getMessagesInPerSec() { - return messagesInPerSec; - } - - public void setMessagesInPerSec(Double messagesInPerSec) { - this.messagesInPerSec = messagesInPerSec; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Double getTotalProduceRequestsPerSec() { - return totalProduceRequestsPerSec; - } - - public void setTotalProduceRequestsPerSec(Double totalProduceRequestsPerSec) { - this.totalProduceRequestsPerSec = totalProduceRequestsPerSec; - } - - @Override - public String toString() { - return "LogicalClusterMetrics{" + - "totalProduceRequestsPerSec=" + totalProduceRequestsPerSec + - ", bytesInPerSec=" + bytesInPerSec + - ", bytesOutPerSec=" + bytesOutPerSec + - ", bytesRejectedPerSec=" + bytesRejectedPerSec + - ", messagesInPerSec=" + messagesInPerSec + - ", gmtCreate=" + gmtCreate + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/CreateTopicConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/CreateTopicConfig.java deleted file mode 100644 index 897222a3..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/CreateTopicConfig.java +++ /dev/null @@ -1,49 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.config; - -import com.xiaojukeji.kafka.manager.common.constant.TopicCreationConstant; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/7/24 - */ -public class CreateTopicConfig { - /** - * 单次自动化审批, 允许的通过单子 - */ - private Integer maxPassedOrderNumPerTask; - - private List configList; - - public Integer getMaxPassedOrderNumPerTask() { - if (ValidateUtils.isNull(maxPassedOrderNumPerTask)) { - return TopicCreationConstant.DEFAULT_MAX_PASSED_ORDER_NUM_PER_TASK; - } - if (maxPassedOrderNumPerTask > TopicCreationConstant.MAX_PASSED_ORDER_NUM_PER_TASK) { - return TopicCreationConstant.MAX_PASSED_ORDER_NUM_PER_TASK; - } - return maxPassedOrderNumPerTask; - } - - public void setMaxPassedOrderNumPerTask(Integer maxPassedOrderNumPerTask) { - this.maxPassedOrderNumPerTask = maxPassedOrderNumPerTask; - } - - public List getConfigList() { - return configList; - } - - public void setConfigList(List configList) { - this.configList = configList; - } - - @Override - public String toString() { - return "CreateTopicConfig{" + - "maxPassedOrderNumPerTask=" + maxPassedOrderNumPerTask + - ", configList=" + configList + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/CreateTopicElemConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/CreateTopicElemConfig.java deleted file mode 100644 index 0f74b1e4..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/CreateTopicElemConfig.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.config; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/7/24 - */ -public class CreateTopicElemConfig { - private Long clusterId; - - private List brokerIdList; - - private List regionIdList; - - private Integer partitionNum; - - private Integer replicaNum; - - private Integer retentionTimeUnitHour; - - private Long autoExecMaxPeakBytesInUnitB; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public List getBrokerIdList() { - return brokerIdList; - } - - public void setBrokerIdList(List brokerIdList) { - this.brokerIdList = brokerIdList; - } - - public List getRegionIdList() { - return regionIdList; - } - - public void setRegionIdList(List regionIdList) { - this.regionIdList = regionIdList; - } - - public Integer getReplicaNum() { - return replicaNum; - } - - public void setReplicaNum(Integer replicaNum) { - this.replicaNum = replicaNum; - } - - public Integer getRetentionTimeUnitHour() { - return retentionTimeUnitHour; - } - - public void setRetentionTimeUnitHour(Integer retentionTimeUnitHour) { - this.retentionTimeUnitHour = retentionTimeUnitHour; - } - - public Long getAutoExecMaxPeakBytesInUnitB() { - return autoExecMaxPeakBytesInUnitB; - } - - public void setAutoExecMaxPeakBytesInUnitB(Long autoExecMaxPeakBytesInUnitB) { - this.autoExecMaxPeakBytesInUnitB = autoExecMaxPeakBytesInUnitB; - } - - public Integer getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - @Override - public String toString() { - return "CreateTopicElemConfig{" + - "clusterId=" + clusterId + - ", brokerIdList=" + brokerIdList + - ", regionIdList=" + regionIdList + - ", partitionNum=" + partitionNum + - ", replicaNum=" + replicaNum + - ", retentionTimeUnitHour=" + retentionTimeUnitHour + - ", autoExecMaxPeakBytesInUnitB=" + autoExecMaxPeakBytesInUnitB + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/MaxAvgBytesInConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/MaxAvgBytesInConfig.java deleted file mode 100644 index 6a26f955..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/MaxAvgBytesInConfig.java +++ /dev/null @@ -1,25 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.config; - -/** - * 峰值均值流入流量配置 - * @author zengqiao - * @date 20/6/9 - */ -public class MaxAvgBytesInConfig { - private Integer duration; - - public Integer getDuration() { - return duration; - } - - public void setDuration(Integer duration) { - this.duration = duration; - } - - @Override - public String toString() { - return "MaxAvgBytesInConfig{" + - "duration=" + duration + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/TopicAnomalyFlowConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/TopicAnomalyFlowConfig.java deleted file mode 100644 index 1797eb6e..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/TopicAnomalyFlowConfig.java +++ /dev/null @@ -1,57 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.config; - -/** - * @author zengqiao - * @date 20/8/23 - */ -public class TopicAnomalyFlowConfig { - private Long minTopicBytesInUnitB; - - private Double bytesInIncUnitB; - - private Long minTopicProduceQps; - - private Double produceQpsInc; - - public Long getMinTopicBytesInUnitB() { - return minTopicBytesInUnitB; - } - - public void setMinTopicBytesInUnitB(Long minTopicBytesInUnitB) { - this.minTopicBytesInUnitB = minTopicBytesInUnitB; - } - - public Double getBytesInIncUnitB() { - return bytesInIncUnitB; - } - - public void setBytesInIncUnitB(Double bytesInIncUnitB) { - this.bytesInIncUnitB = bytesInIncUnitB; - } - - public Long getMinTopicProduceQps() { - return minTopicProduceQps; - } - - public void setMinTopicProduceQps(Long minTopicProduceQps) { - this.minTopicProduceQps = minTopicProduceQps; - } - - public Double getProduceQpsInc() { - return produceQpsInc; - } - - public void setProduceQpsInc(Double produceQpsInc) { - this.produceQpsInc = produceQpsInc; - } - - @Override - public String toString() { - return "TopicAnomalyFlowConfig{" + - "minTopicBytesInUnitB=" + minTopicBytesInUnitB + - ", bytesInIncUnitB=" + bytesInIncUnitB + - ", minTopicProduceQps=" + minTopicProduceQps + - ", produceQpsInc=" + produceQpsInc + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/TopicNameConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/TopicNameConfig.java deleted file mode 100644 index 787bd238..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/TopicNameConfig.java +++ /dev/null @@ -1,44 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.config; - -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; - -/** - * @author zengqiao - * @date 20/8/31 - */ -public class TopicNameConfig { - private Long clusterId; - - private String topicName; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - @Override - public String toString() { - return "TopicNameConfig{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - '}'; - } - - public boolean legal() { - if (ValidateUtils.isNull(clusterId) || ValidateUtils.isBlank(topicName)) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/expert/RegionTopicHotConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/expert/RegionTopicHotConfig.java deleted file mode 100644 index 030a3621..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/expert/RegionTopicHotConfig.java +++ /dev/null @@ -1,58 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.config.expert; - -import java.util.ArrayList; -import java.util.List; - -/** - * @author zengqiao - * @date 20/8/23 - */ -public class RegionTopicHotConfig { - private Long minTopicBytesInUnitB; - - private Integer maxDisPartitionNum; - - private List ignoreClusterIdList; - - public Long getMinTopicBytesInUnitB() { - if (minTopicBytesInUnitB == null) { - return 3 * 1024 * 1024L; - } - return minTopicBytesInUnitB; - } - - public void setMinTopicBytesInUnitB(Long minTopicBytesInUnitB) { - this.minTopicBytesInUnitB = minTopicBytesInUnitB; - } - - public Integer getMaxDisPartitionNum() { - if (maxDisPartitionNum == null) { - return 3; - } - return maxDisPartitionNum; - } - - public void setMaxDisPartitionNum(Integer maxDisPartitionNum) { - this.maxDisPartitionNum = maxDisPartitionNum; - } - - public List getIgnoreClusterIdList() { - if (ignoreClusterIdList == null) { - return new ArrayList<>(); - } - return ignoreClusterIdList; - } - - public void setIgnoreClusterIdList(List ignoreClusterIdList) { - this.ignoreClusterIdList = ignoreClusterIdList; - } - - @Override - public String toString() { - return "RegionTopicHotConfig{" + - "minTopicBytesInUnitB=" + minTopicBytesInUnitB + - ", maxDisPartitionNum=" + maxDisPartitionNum + - ", ignoreClusterIdList=" + ignoreClusterIdList + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/expert/TopicExpiredConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/expert/TopicExpiredConfig.java deleted file mode 100644 index be49cb18..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/expert/TopicExpiredConfig.java +++ /dev/null @@ -1,49 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.config.expert; - -import java.util.ArrayList; -import java.util.List; - -/** - * @author zengqiao - * @date 20/9/17 - */ -public class TopicExpiredConfig { - private Integer minExpiredDay = 30; - - private String filterRegex = ""; - - private List ignoreClusterIdList = new ArrayList<>(); - - public Integer getMinExpiredDay() { - return minExpiredDay; - } - - public void setMinExpiredDay(Integer minExpiredDay) { - this.minExpiredDay = minExpiredDay; - } - - public List getIgnoreClusterIdList() { - return ignoreClusterIdList; - } - - public void setIgnoreClusterIdList(List ignoreClusterIdList) { - this.ignoreClusterIdList = ignoreClusterIdList; - } - - public String getFilterRegex() { - return filterRegex; - } - - public void setFilterRegex(String filterRegex) { - this.filterRegex = filterRegex; - } - - @Override - public String toString() { - return "TopicExpiredConfig{" + - "minExpiredDay=" + minExpiredDay + - ", filterRegex='" + filterRegex + '\'' + - ", ignoreClusterIdList=" + ignoreClusterIdList + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/expert/TopicInsufficientPartitionConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/expert/TopicInsufficientPartitionConfig.java deleted file mode 100644 index d954c6d3..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/config/expert/TopicInsufficientPartitionConfig.java +++ /dev/null @@ -1,50 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.config.expert; - -import java.util.ArrayList; -import java.util.List; - -/** - * 专家服务-Topic分区不足配置 - * @author zengqiao - * @date 20/8/23 - */ -public class TopicInsufficientPartitionConfig { - private Long maxBytesInPerPartitionUnitB = 3 * 1024 * 1024L; - - private Long minTopicBytesInUnitB = 3 * 1024 * 1024L; - - private List ignoreClusterIdList = new ArrayList<>(); - - public Long getMaxBytesInPerPartitionUnitB() { - return maxBytesInPerPartitionUnitB; - } - - public void setMaxBytesInPerPartitionUnitB(Long maxBytesInPerPartitionUnitB) { - this.maxBytesInPerPartitionUnitB = maxBytesInPerPartitionUnitB; - } - - public Long getMinTopicBytesInUnitB() { - return minTopicBytesInUnitB; - } - - public void setMinTopicBytesInUnitB(Long minTopicBytesInUnitB) { - this.minTopicBytesInUnitB = minTopicBytesInUnitB; - } - - public List getIgnoreClusterIdList() { - return ignoreClusterIdList; - } - - public void setIgnoreClusterIdList(List ignoreClusterIdList) { - this.ignoreClusterIdList = ignoreClusterIdList; - } - - @Override - public String toString() { - return "TopicInsufficientPartitionConfig{" + - "maxBytesInPerPartitionUnitB=" + maxBytesInPerPartitionUnitB + - ", minTopicBytesInUnitB=" + minTopicBytesInUnitB + - ", ignoreClusterIdList=" + ignoreClusterIdList + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/consumer/ConsumeDetailDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/consumer/ConsumeDetailDTO.java deleted file mode 100644 index a40484e4..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/consumer/ConsumeDetailDTO.java +++ /dev/null @@ -1,57 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.consumer; - -/** - * @author zengqiao - * @date 20/1/9 - */ -public class ConsumeDetailDTO { - private Integer partitionId; - - private Long offset; - - private Long consumeOffset; - - private String consumerId; - - public Integer getPartitionId() { - return partitionId; - } - - public void setPartitionId(Integer partitionId) { - this.partitionId = partitionId; - } - - public Long getOffset() { - return offset; - } - - public void setOffset(Long offset) { - this.offset = offset; - } - - public Long getConsumeOffset() { - return consumeOffset; - } - - public void setConsumeOffset(Long consumeOffset) { - this.consumeOffset = consumeOffset; - } - - public String getConsumerId() { - return consumerId; - } - - public void setConsumerId(String consumerId) { - this.consumerId = consumerId; - } - - @Override - public String toString() { - return "ConsumeDetailDTO{" + - "partitionId=" + partitionId + - ", offset=" + offset + - ", consumeOffset=" + consumeOffset + - ", consumerId='" + consumerId + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/consumer/ConsumerGroup.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/consumer/ConsumerGroup.java deleted file mode 100644 index 9f188086..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/consumer/ConsumerGroup.java +++ /dev/null @@ -1,71 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.consumer; - -import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum; - -import java.util.Objects; - -public class ConsumerGroup { - private Long clusterId; - - private String consumerGroup; - - private OffsetLocationEnum offsetStoreLocation; - - public ConsumerGroup(Long clusterId, String consumerGroup, OffsetLocationEnum offsetStoreLocation) { - this.clusterId = clusterId; - this.consumerGroup = consumerGroup; - this.offsetStoreLocation = offsetStoreLocation; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getConsumerGroup() { - return consumerGroup; - } - - public void setConsumerGroup(String consumerGroup) { - this.consumerGroup = consumerGroup; - } - - public OffsetLocationEnum getOffsetStoreLocation() { - return offsetStoreLocation; - } - - public void setOffsetStoreLocation(OffsetLocationEnum offsetStoreLocation) { - this.offsetStoreLocation = offsetStoreLocation; - } - - @Override - public String toString() { - return "ConsumerGroup{" + - "clusterId=" + clusterId + - ", consumerGroup='" + consumerGroup + '\'' + - ", offsetStoreLocation=" + offsetStoreLocation + - '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - ConsumerGroup that = (ConsumerGroup) o; - return clusterId.equals(that.clusterId) - && consumerGroup.equals(that.consumerGroup) - && offsetStoreLocation == that.offsetStoreLocation; - } - - @Override - public int hashCode() { - return Objects.hash(clusterId, consumerGroup, offsetStoreLocation); - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/consumer/ConsumerGroupSummary.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/consumer/ConsumerGroupSummary.java deleted file mode 100644 index ca89836e..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/consumer/ConsumerGroupSummary.java +++ /dev/null @@ -1,68 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.consumer; - -import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum; - -import java.util.List; - -public class ConsumerGroupSummary { - private Long clusterId; - - private String consumerGroup; - - private OffsetLocationEnum offsetStoreLocation; - - private List appIdList; - - private String state; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getConsumerGroup() { - return consumerGroup; - } - - public void setConsumerGroup(String consumerGroup) { - this.consumerGroup = consumerGroup; - } - - public OffsetLocationEnum getOffsetStoreLocation() { - return offsetStoreLocation; - } - - public void setOffsetStoreLocation(OffsetLocationEnum offsetStoreLocation) { - this.offsetStoreLocation = offsetStoreLocation; - } - - public List getAppIdList() { - return appIdList; - } - - public void setAppIdList(List appIdList) { - this.appIdList = appIdList; - } - - public String getState() { - return state; - } - - public void setState(String state) { - this.state = state; - } - - @Override - public String toString() { - return "ConsumerGroupSummary{" + - "clusterId=" + clusterId + - ", consumerGroup='" + consumerGroup + '\'' + - ", offsetStoreLocation=" + offsetStoreLocation + - ", appIdList=" + appIdList + - ", state='" + state + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/expert/TopicAnomalyFlow.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/expert/TopicAnomalyFlow.java deleted file mode 100644 index 558636d9..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/expert/TopicAnomalyFlow.java +++ /dev/null @@ -1,90 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.expert; - -/** - * @author zengqiao - * @date 20/3/30 - */ -public class TopicAnomalyFlow { - private Long clusterId; - - private String clusterName; - - private String topicName; - - private Double bytesIn; - - private Double bytesInIncr; - - private Double iops; - - private Double iopsIncr; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Double getBytesIn() { - return bytesIn; - } - - public void setBytesIn(Double bytesIn) { - this.bytesIn = bytesIn; - } - - public Double getBytesInIncr() { - return bytesInIncr; - } - - public void setBytesInIncr(Double bytesInIncr) { - this.bytesInIncr = bytesInIncr; - } - - public Double getIops() { - return iops; - } - - public void setIops(Double iops) { - this.iops = iops; - } - - public Double getIopsIncr() { - return iopsIncr; - } - - public void setIopsIncr(Double iopsIncr) { - this.iopsIncr = iopsIncr; - } - - @Override - public String toString() { - return "AnomalyFlowTopicDTO{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", bytesIn=" + bytesIn + - ", bytesInIncr=" + bytesInIncr + - ", iops=" + iops + - ", iopsIncr=" + iopsIncr + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/expert/TopicInsufficientPartition.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/expert/TopicInsufficientPartition.java deleted file mode 100644 index 1199d9c2..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/expert/TopicInsufficientPartition.java +++ /dev/null @@ -1,111 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.expert; - -import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/3/30 - */ -public class TopicInsufficientPartition { - private ClusterDO clusterDO; - - private String topicName; - - private Integer presentPartitionNum; - - private Integer suggestedPartitionNum; - - private List maxAvgBytesInList; - - private Double bytesInPerPartition; - - private List brokerIdList; - - public TopicInsufficientPartition( - ClusterDO clusterDO, - String topicName, - Integer presentPartitionNum, - Integer suggestedPartitionNum, - List maxAvgBytesInList, - Double bytesInPerPartition, - List brokerIdList) { - this.clusterDO = clusterDO; - this.topicName = topicName; - this.presentPartitionNum = presentPartitionNum; - this.suggestedPartitionNum = suggestedPartitionNum; - this.maxAvgBytesInList = maxAvgBytesInList; - this.bytesInPerPartition = bytesInPerPartition; - this.brokerIdList = brokerIdList; - } - - public ClusterDO getClusterDO() { - return clusterDO; - } - - public void setClusterDO(ClusterDO clusterDO) { - this.clusterDO = clusterDO; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getPresentPartitionNum() { - return presentPartitionNum; - } - - public void setPresentPartitionNum(Integer presentPartitionNum) { - this.presentPartitionNum = presentPartitionNum; - } - - public Integer getSuggestedPartitionNum() { - return suggestedPartitionNum; - } - - public void setSuggestedPartitionNum(Integer suggestedPartitionNum) { - this.suggestedPartitionNum = suggestedPartitionNum; - } - - public List getMaxAvgBytesInList() { - return maxAvgBytesInList; - } - - public void setMaxAvgBytesInList(List maxAvgBytesInList) { - this.maxAvgBytesInList = maxAvgBytesInList; - } - - public Double getBytesInPerPartition() { - return bytesInPerPartition; - } - - public void setBytesInPerPartition(Double bytesInPerPartition) { - this.bytesInPerPartition = bytesInPerPartition; - } - - public List getBrokerIdList() { - return brokerIdList; - } - - public void setBrokerIdList(List brokerIdList) { - this.brokerIdList = brokerIdList; - } - - @Override - public String toString() { - return "TopicInsufficientPartition{" + - "clusterDO=" + clusterDO + - ", topicName='" + topicName + '\'' + - ", presentPartitionNum=" + presentPartitionNum + - ", suggestedPartitionNum=" + suggestedPartitionNum + - ", maxAvgBytesInList=" + maxAvgBytesInList + - ", bytesInPerPartition=" + bytesInPerPartition + - ", brokerIdList=" + brokerIdList + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/expert/TopicRegionHot.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/expert/TopicRegionHot.java deleted file mode 100644 index 17f2f392..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/expert/TopicRegionHot.java +++ /dev/null @@ -1,70 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.expert; - -import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO; - -import java.util.Map; - -/** - * Region内热点Topic - * @author zengqiao - * @date 20/3/27 - */ -public class TopicRegionHot { - private ClusterDO clusterDO; - - private String topicName; - - private Long retentionTime; - - private Map brokerIdPartitionNumMap; - - public TopicRegionHot(ClusterDO clusterDO, String topicName, Long retentionTime, Map - brokerIdPartitionNumMap) { - this.clusterDO = clusterDO; - this.topicName = topicName; - this.retentionTime = retentionTime; - this.brokerIdPartitionNumMap = brokerIdPartitionNumMap; - } - - public ClusterDO getClusterDO() { - return clusterDO; - } - - public void setClusterDO(ClusterDO clusterDO) { - this.clusterDO = clusterDO; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Long getRetentionTime() { - return retentionTime; - } - - public void setRetentionTime(Long retentionTime) { - this.retentionTime = retentionTime; - } - - public Map getBrokerIdPartitionNumMap() { - return brokerIdPartitionNumMap; - } - - public void setBrokerIdPartitionNumMap(Map brokerIdPartitionNumMap) { - this.brokerIdPartitionNumMap = brokerIdPartitionNumMap; - } - - @Override - public String toString() { - return "ExpertRegionTopicHot{" + - "clusterDO=" + clusterDO + - ", topicName='" + topicName + '\'' + - ", retentionTime=" + retentionTime + - ", brokerIdPartitionNumMap=" + brokerIdPartitionNumMap + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/AppRateConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/AppRateConfig.java deleted file mode 100644 index 1ed045e4..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/AppRateConfig.java +++ /dev/null @@ -1,30 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.gateway; - -/** - * @author zengqiao - * @date 20/7/29 - */ -public class AppRateConfig extends BaseGatewayConfig { - private Long appRateLimit; - - public AppRateConfig(Long version, Long appRateLimit) { - this.version = version; - this.appRateLimit = appRateLimit; - } - - public Long getAppRateLimit() { - return appRateLimit; - } - - public void setAppRateLimit(Long appRateLimit) { - this.appRateLimit = appRateLimit; - } - - @Override - public String toString() { - return "AppRateConfig{" + - "appRateLimit=" + appRateLimit + - ", version=" + version + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/BaseGatewayConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/BaseGatewayConfig.java deleted file mode 100644 index 528eb214..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/BaseGatewayConfig.java +++ /dev/null @@ -1,24 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.gateway; - -/** - * @author zengqiao - * @date 20/7/29 - */ -public class BaseGatewayConfig { - protected Long version; - - public Long getVersion() { - return version; - } - - public void setVersion(Long version) { - this.version = version; - } - - @Override - public String toString() { - return "GatewayConfig{" + - "version=" + version + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/IpRateConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/IpRateConfig.java deleted file mode 100644 index 06c6cfbb..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/IpRateConfig.java +++ /dev/null @@ -1,30 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.gateway; - -/** - * @author zengqiao - * @date 20/7/29 - */ -public class IpRateConfig extends BaseGatewayConfig { - private Long ipRateLimit; - - public IpRateConfig(Long version, Long ipRateLimit) { - this.version = version; - this.ipRateLimit = ipRateLimit; - } - - public Long getIpRateLimit() { - return ipRateLimit; - } - - public void setIpRateLimit(Long ipRateLimit) { - this.ipRateLimit = ipRateLimit; - } - - @Override - public String toString() { - return "IpRateConfig{" + - "ipRateLimit=" + ipRateLimit + - ", version=" + version + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/KafkaBootstrapServerConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/KafkaBootstrapServerConfig.java deleted file mode 100644 index e61181c5..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/KafkaBootstrapServerConfig.java +++ /dev/null @@ -1,33 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.gateway; - -import java.util.List; -import java.util.Map; - -/** - * @author zengqiao - * @date 20/7/29 - */ -public class KafkaBootstrapServerConfig extends BaseGatewayConfig { - private Map> clusterIdBootstrapServersMap; - - public KafkaBootstrapServerConfig(Long version, Map> clusterIdBootstrapServersMap) { - this.version = version; - this.clusterIdBootstrapServersMap = clusterIdBootstrapServersMap; - } - - public Map> getClusterIdBootstrapServersMap() { - return clusterIdBootstrapServersMap; - } - - public void setClusterIdBootstrapServersMap(Map> clusterIdBootstrapServersMap) { - this.clusterIdBootstrapServersMap = clusterIdBootstrapServersMap; - } - - @Override - public String toString() { - return "KafkaBootstrapServerConfig{" + - "clusterIdBootstrapServersMap=" + clusterIdBootstrapServersMap + - ", version=" + version + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/RequestQueueConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/RequestQueueConfig.java deleted file mode 100644 index ff2f2184..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/RequestQueueConfig.java +++ /dev/null @@ -1,30 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.gateway; - -/** - * @author zengqiao - * @date 20/7/29 - */ -public class RequestQueueConfig extends BaseGatewayConfig { - private Long maxRequestQueueSize; - - public RequestQueueConfig(Long version, Long maxRequestQueueSize) { - this.version = version; - this.maxRequestQueueSize = maxRequestQueueSize; - } - - public Long getMaxRequestQueueSize() { - return maxRequestQueueSize; - } - - public void setMaxRequestQueueSize(Long maxRequestQueueSize) { - this.maxRequestQueueSize = maxRequestQueueSize; - } - - @Override - public String toString() { - return "RequestQueueConfig{" + - "maxRequestQueueSize=" + maxRequestQueueSize + - ", version=" + version + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/SpRateConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/SpRateConfig.java deleted file mode 100644 index 15f21cf9..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/SpRateConfig.java +++ /dev/null @@ -1,32 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.gateway; - -import java.util.Map; - -/** - * @author zengqiao - * @date 20/7/29 - */ -public class SpRateConfig extends BaseGatewayConfig { - private Map spRateMap; - - public SpRateConfig(Long version, Map spRateMap) { - this.version = version; - this.spRateMap = spRateMap; - } - - public Map getSpRateMap() { - return spRateMap; - } - - public void setSpRateMap(Map spRateMap) { - this.spRateMap = spRateMap; - } - - @Override - public String toString() { - return "SpRateConfig{" + - "spRateMap=" + spRateMap + - ", version=" + version + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/TopicQuota.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/TopicQuota.java deleted file mode 100644 index 6b734348..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/gateway/TopicQuota.java +++ /dev/null @@ -1,81 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.gateway; - -import com.xiaojukeji.kafka.manager.common.entity.dto.gateway.TopicQuotaDTO; - -/** - * @author zhongyuankai - * @date 2020/4/27 - */ -public class TopicQuota { - private String appId; - - private Long clusterId; - - private String topicName; - - private Long produceQuota; - - private Long consumeQuota; - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Long getProduceQuota() { - return produceQuota; - } - - public void setProduceQuota(Long produceQuota) { - this.produceQuota = produceQuota; - } - - public Long getConsumeQuota() { - return consumeQuota; - } - - public void setConsumeQuota(Long consumeQuota) { - this.consumeQuota = consumeQuota; - } - - @Override - public String toString() { - return "TopicQuota{" + - "appId='" + appId + '\'' + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", produceQuota=" + produceQuota + - ", consumeQuota=" + consumeQuota + - '}'; - } - - public static TopicQuota buildFrom(TopicQuotaDTO dto) { - TopicQuota topicQuota = new TopicQuota(); - topicQuota.setAppId(dto.getAppId()); - topicQuota.setClusterId(dto.getClusterId()); - topicQuota.setTopicName(dto.getTopicName()); - topicQuota.setProduceQuota(dto.getProduceQuota()); - topicQuota.setConsumeQuota(dto.getConsumeQuota()); - return topicQuota; - } - -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/reassign/ReassignStatus.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/reassign/ReassignStatus.java deleted file mode 100644 index 3e9ec3cb..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/reassign/ReassignStatus.java +++ /dev/null @@ -1,130 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.reassign; - -import com.xiaojukeji.kafka.manager.common.bizenum.TaskStatusReassignEnum; -import com.xiaojukeji.kafka.manager.common.zookeeper.znode.ReassignmentElemData; -import kafka.common.TopicAndPartition; - -import java.util.List; -import java.util.Map; - -/** - * @author zengqiao - * @date 20/5/14 - */ -public class ReassignStatus { - private Long subTaskId; - - private Long clusterId; - - private String clusterName; - - private String topicName; - - private Integer status; - - private Long realThrottle; - - private Long maxThrottle; - - private Long minThrottle; - - private List reassignList; - - private Map reassignStatusMap; - - public Long getSubTaskId() { - return subTaskId; - } - - public void setSubTaskId(Long subTaskId) { - this.subTaskId = subTaskId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Long getRealThrottle() { - return realThrottle; - } - - public void setRealThrottle(Long realThrottle) { - this.realThrottle = realThrottle; - } - - public Long getMaxThrottle() { - return maxThrottle; - } - - public void setMaxThrottle(Long maxThrottle) { - this.maxThrottle = maxThrottle; - } - - public Long getMinThrottle() { - return minThrottle; - } - - public void setMinThrottle(Long minThrottle) { - this.minThrottle = minThrottle; - } - - public List getReassignList() { - return reassignList; - } - - public void setReassignList(List reassignList) { - this.reassignList = reassignList; - } - - public Map getReassignStatusMap() { - return reassignStatusMap; - } - - public void setReassignStatusMap(Map reassignStatusMap) { - this.reassignStatusMap = reassignStatusMap; - } - - @Override - public String toString() { - return "ReassignStatus{" + - "subTaskId=" + subTaskId + - ", clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", status=" + status + - ", realThrottle=" + realThrottle + - ", maxThrottle=" + maxThrottle + - ", minThrottle=" + minThrottle + - ", reassignList=" + reassignList + - ", reassignStatusMap=" + reassignStatusMap + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/remote/KafkaConsumerMetrics.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/remote/KafkaConsumerMetrics.java deleted file mode 100644 index 932cd2a1..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/remote/KafkaConsumerMetrics.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.remote; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/8/25 - */ -public class KafkaConsumerMetrics { - private Long clusterId; - - private String topicName; - - private String consumerGroup; - - private String location; - - private Integer partitionNum; - - private List consumeDetailList; - - private Long createTime; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getConsumerGroup() { - return consumerGroup; - } - - public void setConsumerGroup(String consumerGroup) { - this.consumerGroup = consumerGroup; - } - - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - - public Integer getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - public List getConsumeDetailList() { - return consumeDetailList; - } - - public void setConsumeDetailList(List consumeDetailList) { - this.consumeDetailList = consumeDetailList; - } - - public Long getCreateTime() { - return createTime; - } - - public void setCreateTime(Long createTime) { - this.createTime = createTime; - } - - @Override - public String toString() { - return "KafkaConsumerMetrics{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", consumerGroup='" + consumerGroup + '\'' + - ", location='" + location + '\'' + - ", partitionNum=" + partitionNum + - ", consumeDetailList=" + consumeDetailList + - ", createTime=" + createTime + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/remote/KafkaConsumerMetricsElem.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/remote/KafkaConsumerMetricsElem.java deleted file mode 100644 index 678f995f..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/remote/KafkaConsumerMetricsElem.java +++ /dev/null @@ -1,46 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.remote; - -/** - * @author zengqiao - * @date 20/8/31 - */ -public class KafkaConsumerMetricsElem { - private Integer partitionId; - - private Long partitionOffset; - - private Long consumeOffset; - - public Integer getPartitionId() { - return partitionId; - } - - public void setPartitionId(Integer partitionId) { - this.partitionId = partitionId; - } - - public Long getPartitionOffset() { - return partitionOffset; - } - - public void setPartitionOffset(Long partitionOffset) { - this.partitionOffset = partitionOffset; - } - - public Long getConsumeOffset() { - return consumeOffset; - } - - public void setConsumeOffset(Long consumeOffset) { - this.consumeOffset = consumeOffset; - } - - @Override - public String toString() { - return "KafkaConsumerMetricsElem{" + - "partitionId=" + partitionId + - ", partitionOffset=" + partitionOffset + - ", consumeOffset=" + consumeOffset + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/remote/KafkaTopicMetrics.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/remote/KafkaTopicMetrics.java deleted file mode 100644 index 599384a4..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/remote/KafkaTopicMetrics.java +++ /dev/null @@ -1,79 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.remote; - -/** - * @author zengqiao - * @date 20/8/31 - */ -public class KafkaTopicMetrics { - private Long clusterId; - - private String topic; - - private Integer partitionNum; - - private Double messagesInPerSec; - - private Double bytesInPerSec; - - private Long timestamp; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopic() { - return topic; - } - - public void setTopic(String topic) { - this.topic = topic; - } - - public Integer getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - public Double getMessagesInPerSec() { - return messagesInPerSec; - } - - public void setMessagesInPerSec(Double messagesInPerSec) { - this.messagesInPerSec = messagesInPerSec; - } - - public Double getBytesInPerSec() { - return bytesInPerSec; - } - - public void setBytesInPerSec(Double bytesInPerSec) { - this.bytesInPerSec = bytesInPerSec; - } - - public Long getTimestamp() { - return timestamp; - } - - public void setTimestamp(Long timestamp) { - this.timestamp = timestamp; - } - - @Override - public String toString() { - return "KafkaTopicMetrics{" + - "clusterId=" + clusterId + - ", topic='" + topic + '\'' + - ", partitionNum=" + partitionNum + - ", messagesInPerSec=" + messagesInPerSec + - ", bytesInPerSec=" + bytesInPerSec + - ", timestamp=" + timestamp + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/MineTopicSummary.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/MineTopicSummary.java deleted file mode 100644 index 7f02e51b..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/MineTopicSummary.java +++ /dev/null @@ -1,133 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.topic; - -/** - * @author zengqiao - * @date 20/5/12 - */ -public class MineTopicSummary { - private Long logicalClusterId; - - private String logicalClusterName; - - private Long physicalClusterId; - - private String topicName; - - private Object bytesIn; - - private Object bytesOut; - - private String appId; - - private String appName; - - private String appPrincipals; - - private Integer access; - - private String description; - - public Long getLogicalClusterId() { - return logicalClusterId; - } - - public void setLogicalClusterId(Long logicalClusterId) { - this.logicalClusterId = logicalClusterId; - } - - public String getLogicalClusterName() { - return logicalClusterName; - } - - public void setLogicalClusterName(String logicalClusterName) { - this.logicalClusterName = logicalClusterName; - } - - public Long getPhysicalClusterId() { - return physicalClusterId; - } - - public void setPhysicalClusterId(Long physicalClusterId) { - this.physicalClusterId = physicalClusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Object getBytesIn() { - return bytesIn; - } - - public void setBytesIn(Object bytesIn) { - this.bytesIn = bytesIn; - } - - public Object getBytesOut() { - return bytesOut; - } - - public void setBytesOut(Object bytesOut) { - this.bytesOut = bytesOut; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public String getAppPrincipals() { - return appPrincipals; - } - - public void setAppPrincipals(String appPrincipals) { - this.appPrincipals = appPrincipals; - } - - public Integer getAccess() { - return access; - } - - public void setAccess(Integer access) { - this.access = access; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - @Override - public String toString() { - return "MineTopicSummary{" + - "logicalClusterId=" + logicalClusterId + - ", logicalClusterName='" + logicalClusterName + '\'' + - ", physicalClusterId=" + physicalClusterId + - ", topicName='" + topicName + '\'' + - ", bytesIn=" + bytesIn + - ", bytesOut=" + bytesOut + - ", appId='" + appId + '\'' + - ", appName='" + appName + '\'' + - ", appPrincipals='" + appPrincipals + '\'' + - ", access=" + access + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicAppData.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicAppData.java deleted file mode 100644 index a06e9736..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicAppData.java +++ /dev/null @@ -1,123 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.topic; - -/** - * @author zhongyuankai - * @date 2020/6/8 - */ -public class TopicAppData { - private Long clusterId; - - private String topicName; - - private String appId; - - private String appName; - - private String appPrincipals; - - private Long produceQuota; - - private Long consumerQuota; - - private Boolean produceThrottled; - - private Boolean fetchThrottled; - - private Integer access; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public String getAppPrincipals() { - return appPrincipals; - } - - public void setAppPrincipals(String appPrincipals) { - this.appPrincipals = appPrincipals; - } - - public Long getProduceQuota() { - return produceQuota; - } - - public void setProduceQuota(Long produceQuota) { - this.produceQuota = produceQuota; - } - - public Long getConsumerQuota() { - return consumerQuota; - } - - public void setConsumerQuota(Long consumerQuota) { - this.consumerQuota = consumerQuota; - } - - public Boolean getProduceThrottled() { - return produceThrottled; - } - - public void setProduceThrottled(Boolean produceThrottled) { - this.produceThrottled = produceThrottled; - } - - public Boolean getFetchThrottled() { - return fetchThrottled; - } - - public void setFetchThrottled(Boolean fetchThrottled) { - this.fetchThrottled = fetchThrottled; - } - - public Integer getAccess() { - return access; - } - - public void setAccess(Integer access) { - this.access = access; - } - - @Override - public String toString() { - return "TopicAppDTO{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", appId='" + appId + '\'' + - ", appName='" + appName + '\'' + - ", appPrincipals='" + appPrincipals + '\'' + - ", produceQuota=" + produceQuota + - ", consumerQuota=" + consumerQuota + - ", produceThrottled=" + produceThrottled + - ", fetchThrottled=" + fetchThrottled + - ", access=" + access + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicBasicDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicBasicDTO.java deleted file mode 100644 index 9150569b..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicBasicDTO.java +++ /dev/null @@ -1,191 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.topic; - -import java.util.List; - -/** - * @author arthur - * @date 2018/09/03 - */ -public class TopicBasicDTO { - private Long clusterId; - - private String appId; - - private String appName; - - private String principals; - - private String topicName; - - private String description; - - private List regionNameList; - - private Integer score; - - private String topicCodeC; - - private Integer partitionNum; - - private Integer replicaNum; - - private Integer brokerNum; - - private Long modifyTime; - - private Long createTime; - - private Long retentionTime; - - private Long retentionBytes; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public String getPrincipals() { - return principals; - } - - public void setPrincipals(String principals) { - this.principals = principals; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public List getRegionNameList() { - return regionNameList; - } - - public void setRegionNameList(List regionNameList) { - this.regionNameList = regionNameList; - } - - public Integer getScore() { - return score; - } - - public void setScore(Integer score) { - this.score = score; - } - - public String getTopicCodeC() { - return topicCodeC; - } - - public void setTopicCodeC(String topicCodeC) { - this.topicCodeC = topicCodeC; - } - - public Integer getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - public Integer getReplicaNum() { - return replicaNum; - } - - public void setReplicaNum(Integer replicaNum) { - this.replicaNum = replicaNum; - } - - public Integer getBrokerNum() { - return brokerNum; - } - - public void setBrokerNum(Integer brokerNum) { - this.brokerNum = brokerNum; - } - - public Long getModifyTime() { - return modifyTime; - } - - public void setModifyTime(Long modifyTime) { - this.modifyTime = modifyTime; - } - - public Long getCreateTime() { - return createTime; - } - - public void setCreateTime(Long createTime) { - this.createTime = createTime; - } - - public Long getRetentionTime() { - return retentionTime; - } - - public void setRetentionTime(Long retentionTime) { - this.retentionTime = retentionTime; - } - - public Long getRetentionBytes() { - return retentionBytes; - } - - public void setRetentionBytes(Long retentionBytes) { - this.retentionBytes = retentionBytes; - } - - @Override - public String toString() { - return "TopicBasicDTO{" + - "clusterId=" + clusterId + - ", appId='" + appId + '\'' + - ", appName='" + appName + '\'' + - ", principals='" + principals + '\'' + - ", topicName='" + topicName + '\'' + - ", description='" + description + '\'' + - ", regionNameList=" + regionNameList + - ", score=" + score + - ", topicCodeC='" + topicCodeC + '\'' + - ", partitionNum=" + partitionNum + - ", replicaNum=" + replicaNum + - ", brokerNum=" + brokerNum + - ", modifyTime=" + modifyTime + - ", createTime=" + createTime + - ", retentionTime=" + retentionTime + - ", retentionBytes=" + retentionBytes + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicBrokerDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicBrokerDTO.java deleted file mode 100644 index 75c4638d..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicBrokerDTO.java +++ /dev/null @@ -1,81 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.topic; - -import java.util.List; - -/** - * @author zhongyuankai - * @date 20/4/17 - */ -public class TopicBrokerDTO { - private Integer brokerId; - - private String host; - - private Integer partitionNum; - - private List partitionIdList; - - private List leaderPartitionIdList; - - private boolean alive; - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public Integer getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - public List getPartitionIdList() { - return partitionIdList; - } - - public void setPartitionIdList(List partitionIdList) { - this.partitionIdList = partitionIdList; - } - - public List getLeaderPartitionIdList() { - return leaderPartitionIdList; - } - - public void setLeaderPartitionIdList(List leaderPartitionIdList) { - this.leaderPartitionIdList = leaderPartitionIdList; - } - - public boolean isAlive() { - return alive; - } - - public void setAlive(boolean alive) { - this.alive = alive; - } - - @Override - public String toString() { - return "TopicBrokerDTO{" + - "brokerId=" + brokerId + - ", host='" + host + '\'' + - ", partitionNum=" + partitionNum + - ", partitionIdList=" + partitionIdList + - ", leaderPartitionIdList=" + leaderPartitionIdList + - ", alive=" + alive + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicBusinessInfo.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicBusinessInfo.java deleted file mode 100644 index 978550c8..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicBusinessInfo.java +++ /dev/null @@ -1,68 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.topic; - -/** - * @author zhongyuankai - * @date 20/09/08 - */ -public class TopicBusinessInfo { - private String appId; - - private String appName; - - private String principals; - - private Long clusterId; - - private String topicName; - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public String getPrincipals() { - return principals; - } - - public void setPrincipals(String principals) { - this.principals = principals; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - @Override - public String toString() { - return "TopicBusinessInfoVO{" + - "appId='" + appId + '\'' + - ", appName='" + appName + '\'' + - ", principals='" + principals + '\'' + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicConnection.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicConnection.java deleted file mode 100644 index abb40327..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicConnection.java +++ /dev/null @@ -1,90 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.topic; - -/** - * @author zengqiao - * @date 20/4/20 - */ -public class TopicConnection { - private Long clusterId; - - private String topicName; - - private String appId; - - private String ip; - - private String hostname; - - private String clientType; - - private String clientVersion; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getIp() { - return ip; - } - - public void setIp(String ip) { - this.ip = ip; - } - - public String getHostname() { - return hostname; - } - - public void setHostname(String hostname) { - this.hostname = hostname; - } - - public String getClientType() { - return clientType; - } - - public void setClientType(String clientType) { - this.clientType = clientType; - } - - public String getClientVersion() { - return clientVersion; - } - - public void setClientVersion(String clientVersion) { - this.clientVersion = clientVersion; - } - - @Override - public String toString() { - return "TopicConnectionDTO{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", appId='" + appId + '\'' + - ", ip='" + ip + '\'' + - ", hostname='" + hostname + '\'' + - ", clientType='" + clientType + '\'' + - ", clientVersion='" + clientVersion + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicDTO.java deleted file mode 100644 index f64fd49c..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicDTO.java +++ /dev/null @@ -1,101 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.topic; - -/** - * @author zengqiao - * @date 20/5/12 - */ -public class TopicDTO { - private Long logicalClusterId; - - private String logicalClusterName; - - private String topicName; - - private String description; - - private String appId; - - private String appName; - - private String appPrincipals; - - private Boolean needAuth; - - public Long getLogicalClusterId() { - return logicalClusterId; - } - - public void setLogicalClusterId(Long logicalClusterId) { - this.logicalClusterId = logicalClusterId; - } - - public String getLogicalClusterName() { - return logicalClusterName; - } - - public void setLogicalClusterName(String logicalClusterName) { - this.logicalClusterName = logicalClusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public String getAppPrincipals() { - return appPrincipals; - } - - public void setAppPrincipals(String appPrincipals) { - this.appPrincipals = appPrincipals; - } - - public Boolean getNeedAuth() { - return needAuth; - } - - public void setNeedAuth(Boolean needAuth) { - this.needAuth = needAuth; - } - - @Override - public String toString() { - return "TopicDTO{" + - "logicalClusterId=" + logicalClusterId + - ", logicalClusterName='" + logicalClusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", description='" + description + '\'' + - ", appId='" + appId + '\'' + - ", appName='" + appName + '\'' + - ", appPrincipals='" + appPrincipals + '\'' + - ", needAuth=" + needAuth + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicExpiredData.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicExpiredData.java deleted file mode 100644 index efa4eac6..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicExpiredData.java +++ /dev/null @@ -1,71 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.topic; - -import com.xiaojukeji.kafka.manager.common.entity.pojo.LogicalClusterDO; -import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.AppDO; - -/** - * @author zengqiao - * @date 20/9/2 - */ -public class TopicExpiredData { - private Long clusterId; - - private String topicName; - - private LogicalClusterDO logicalClusterDO; - - private AppDO appDO; - - private Integer fetchConnectionNum; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public LogicalClusterDO getLogicalClusterDO() { - return logicalClusterDO; - } - - public void setLogicalClusterDO(LogicalClusterDO logicalClusterDO) { - this.logicalClusterDO = logicalClusterDO; - } - - public AppDO getAppDO() { - return appDO; - } - - public void setAppDO(AppDO appDO) { - this.appDO = appDO; - } - - public Integer getFetchConnectionNum() { - return fetchConnectionNum; - } - - public void setFetchConnectionNum(Integer fetchConnectionNum) { - this.fetchConnectionNum = fetchConnectionNum; - } - - @Override - public String toString() { - return "TopicExpiredData{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", logicalClusterDO=" + logicalClusterDO + - ", appDO=" + appDO + - ", fetchConnectionNum=" + fetchConnectionNum + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicMetricsDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicMetricsDTO.java deleted file mode 100644 index c722b4a9..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicMetricsDTO.java +++ /dev/null @@ -1,147 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.topic; - -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/5/11 - */ -public class TopicMetricsDTO { - @ApiModelProperty(value = "每秒流入消息数") - private Object messagesInPerSec; - - @ApiModelProperty(value = "每秒流入字节数") - private Object bytesInPerSec; - - @ApiModelProperty(value = "每秒流出字节数") - private Object bytesOutPerSec; - - @ApiModelProperty(value = "每秒拒绝字节数") - private Object bytesRejectedPerSec; - - @ApiModelProperty(value = "每秒请求数") - private Object totalProduceRequestsPerSec; - - @ApiModelProperty(value = "appId维度每秒流入消息数") - private Object appIdMessagesInPerSec; - - @ApiModelProperty(value = "appId维度每秒流入字节数") - private Object appIdBytesInPerSec; - - @ApiModelProperty(value = "appId维度每秒流出字节数") - private Object appIdBytesOutPerSec; - - @ApiModelProperty(value = "produce限流") - private Boolean produceThrottled; - - @ApiModelProperty(value = "consume限流") - private Boolean consumeThrottled; - - @ApiModelProperty(value = "创建时间") - private Long gmtCreate; - - public Object getMessagesInPerSec() { - return messagesInPerSec; - } - - public void setMessagesInPerSec(Object messagesInPerSec) { - this.messagesInPerSec = messagesInPerSec; - } - - public Object getBytesInPerSec() { - return bytesInPerSec; - } - - public void setBytesInPerSec(Object bytesInPerSec) { - this.bytesInPerSec = bytesInPerSec; - } - - public Object getBytesOutPerSec() { - return bytesOutPerSec; - } - - public void setBytesOutPerSec(Object bytesOutPerSec) { - this.bytesOutPerSec = bytesOutPerSec; - } - - public Object getBytesRejectedPerSec() { - return bytesRejectedPerSec; - } - - public void setBytesRejectedPerSec(Object bytesRejectedPerSec) { - this.bytesRejectedPerSec = bytesRejectedPerSec; - } - - public Object getTotalProduceRequestsPerSec() { - return totalProduceRequestsPerSec; - } - - public void setTotalProduceRequestsPerSec(Object totalProduceRequestsPerSec) { - this.totalProduceRequestsPerSec = totalProduceRequestsPerSec; - } - - public Object getAppIdMessagesInPerSec() { - return appIdMessagesInPerSec; - } - - public void setAppIdMessagesInPerSec(Object appIdMessagesInPerSec) { - this.appIdMessagesInPerSec = appIdMessagesInPerSec; - } - - public Object getAppIdBytesInPerSec() { - return appIdBytesInPerSec; - } - - public void setAppIdBytesInPerSec(Object appIdBytesInPerSec) { - this.appIdBytesInPerSec = appIdBytesInPerSec; - } - - public Object getAppIdBytesOutPerSec() { - return appIdBytesOutPerSec; - } - - public void setAppIdBytesOutPerSec(Object appIdBytesOutPerSec) { - this.appIdBytesOutPerSec = appIdBytesOutPerSec; - } - - public Boolean getProduceThrottled() { - return produceThrottled; - } - - public void setProduceThrottled(Boolean produceThrottled) { - this.produceThrottled = produceThrottled; - } - - public Boolean getConsumeThrottled() { - return consumeThrottled; - } - - public void setConsumeThrottled(Boolean consumeThrottled) { - this.consumeThrottled = consumeThrottled; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - @Override - public String toString() { - return "TopicMetricsDTO{" + - "messagesInPerSec=" + messagesInPerSec + - ", bytesInPerSec=" + bytesInPerSec + - ", bytesOutPerSec=" + bytesOutPerSec + - ", bytesRejectedPerSec=" + bytesRejectedPerSec + - ", totalProduceRequestsPerSec=" + totalProduceRequestsPerSec + - ", appIdMessagesInPerSec=" + appIdMessagesInPerSec + - ", appIdBytesInPerSec=" + appIdBytesInPerSec + - ", appIdBytesOutPerSec=" + appIdBytesOutPerSec + - ", produceThrottled=" + produceThrottled + - ", consumeThrottled=" + consumeThrottled + - ", gmtCreate=" + gmtCreate + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicOverview.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicOverview.java deleted file mode 100644 index fe02fe94..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicOverview.java +++ /dev/null @@ -1,157 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.topic; - -/** - * Topic概览信息 - * @author zengqiao - * @date 20/5/14 - */ -public class TopicOverview { - private Long clusterId; - - private String topicName; - - private Integer replicaNum; - - private Integer partitionNum; - - private Long retentionTime; - - private Object byteIn; - - private Object byteOut; - - private Object produceRequest; - - private String appName; - - private String appId; - - private String description; - - private Long updateTime; - - private Long logicalClusterId; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getReplicaNum() { - return replicaNum; - } - - public void setReplicaNum(Integer replicaNum) { - this.replicaNum = replicaNum; - } - - public Integer getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - public Long getRetentionTime() { - return retentionTime; - } - - public void setRetentionTime(Long retentionTime) { - this.retentionTime = retentionTime; - } - - public Object getByteIn() { - return byteIn; - } - - public void setByteIn(Object byteIn) { - this.byteIn = byteIn; - } - - public Object getByteOut() { - return byteOut; - } - - public void setByteOut(Object byteOut) { - this.byteOut = byteOut; - } - - public Object getProduceRequest() { - return produceRequest; - } - - public void setProduceRequest(Object produceRequest) { - this.produceRequest = produceRequest; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Long getUpdateTime() { - return updateTime; - } - - public void setUpdateTime(Long updateTime) { - this.updateTime = updateTime; - } - - public Long getLogicalClusterId() { - return logicalClusterId; - } - - public void setLogicalClusterId(Long logicalClusterId) { - this.logicalClusterId = logicalClusterId; - } - - @Override - public String toString() { - return "TopicOverview{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", replicaNum=" + replicaNum + - ", partitionNum=" + partitionNum + - ", retentionTime=" + retentionTime + - ", byteIn=" + byteIn + - ", byteOut=" + byteOut + - ", produceRequest=" + produceRequest + - ", appName='" + appName + '\'' + - ", appId='" + appId + '\'' + - ", description='" + description + '\'' + - ", updateTime=" + updateTime + - ", logicalClusterId=" + logicalClusterId + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicPartitionDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicPartitionDTO.java deleted file mode 100644 index 96e96e50..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/ao/topic/TopicPartitionDTO.java +++ /dev/null @@ -1,147 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.ao.topic; - -import java.util.List; - -/** - * @author arthur - * @date 2017/6/6. - */ -public class TopicPartitionDTO { - private Integer partitionId; - - private Long beginningOffset; - - private Long endOffset; - - private Long msgNum; - - private Integer leaderBrokerId; - - private Integer preferredBrokerId; - - private Integer leaderEpoch; - - private List replicaBrokerIdList; - - private List isrBrokerIdList; - - private Boolean underReplicated; - - private Long logSize; - - private String location; - - public Integer getPartitionId() { - return partitionId; - } - - public void setPartitionId(Integer partitionId) { - this.partitionId = partitionId; - } - - public Long getBeginningOffset() { - return beginningOffset; - } - - public void setBeginningOffset(Long beginningOffset) { - this.beginningOffset = beginningOffset; - } - - public Long getEndOffset() { - return endOffset; - } - - public void setEndOffset(Long endOffset) { - this.endOffset = endOffset; - } - - public Long getMsgNum() { - return msgNum; - } - - public void setMsgNum(Long msgNum) { - this.msgNum = msgNum; - } - - public Integer getLeaderBrokerId() { - return leaderBrokerId; - } - - public void setLeaderBrokerId(Integer leaderBrokerId) { - this.leaderBrokerId = leaderBrokerId; - } - - public Integer getPreferredBrokerId() { - return preferredBrokerId; - } - - public void setPreferredBrokerId(Integer preferredBrokerId) { - this.preferredBrokerId = preferredBrokerId; - } - - public Integer getLeaderEpoch() { - return leaderEpoch; - } - - public void setLeaderEpoch(Integer leaderEpoch) { - this.leaderEpoch = leaderEpoch; - } - - public List getReplicaBrokerIdList() { - return replicaBrokerIdList; - } - - public void setReplicaBrokerIdList(List replicaBrokerIdList) { - this.replicaBrokerIdList = replicaBrokerIdList; - } - - public List getIsrBrokerIdList() { - return isrBrokerIdList; - } - - public void setIsrBrokerIdList(List isrBrokerIdList) { - this.isrBrokerIdList = isrBrokerIdList; - } - - public Boolean getUnderReplicated() { - return underReplicated; - } - - public void setUnderReplicated(Boolean underReplicated) { - this.underReplicated = underReplicated; - } - - public Long getLogSize() { - return logSize; - } - - public void setLogSize(Long logSize) { - this.logSize = logSize; - } - - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - - @Override - public String toString() { - return "TopicPartitionDTO{" + - "partitionId=" + partitionId + - ", beginningOffset=" + beginningOffset + - ", endOffset=" + endOffset + - ", msgNum=" + msgNum + - ", leaderBrokerId=" + leaderBrokerId + - ", preferredBrokerId=" + preferredBrokerId + - ", leaderEpoch=" + leaderEpoch + - ", replicaBrokerIdList=" + replicaBrokerIdList + - ", isrBrokerIdList=" + isrBrokerIdList + - ", underReplicated=" + underReplicated + - ", logSize=" + logSize + - ", location='" + location + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/ClusterTopicDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/ClusterTopicDTO.java deleted file mode 100644 index 7a23191d..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/ClusterTopicDTO.java +++ /dev/null @@ -1,50 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto; - -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/23 - */ -@ApiModel(description="Topic信息") -public class ClusterTopicDTO { - @ApiModelProperty(value = "集群ID") - protected Long clusterId; - - @ApiModelProperty(value = "Topic名称") - protected String topicName; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - @Override - public String toString() { - return "ClusterTopicDTO{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - '}'; - } - - public boolean paramLegal() { - if (ValidateUtils.isNull(clusterId) - || ValidateUtils.isNull(topicName)) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/config/ConfigDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/config/ConfigDTO.java deleted file mode 100644 index 397a8b9a..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/config/ConfigDTO.java +++ /dev/null @@ -1,65 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.config; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/3/20 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(description = "配置") -public class ConfigDTO { - @ApiModelProperty(value = "配置key") - private String configKey; - - @ApiModelProperty(value = "配置value") - private String configValue; - - @ApiModelProperty(value = "备注") - private String configDescription; - - public String getConfigKey() { - return configKey; - } - - public void setConfigKey(String configKey) { - this.configKey = configKey; - } - - public String getConfigValue() { - return configValue; - } - - public void setConfigValue(String configValue) { - this.configValue = configValue; - } - - public String getConfigDescription() { - return configDescription; - } - - public void setConfigDescription(String configDescription) { - this.configDescription = configDescription; - } - - @Override - public String toString() { - return "ConfigDTO{" + - "configKey='" + configKey + '\'' + - ", configValue='" + configValue + '\'' + - ", configDescription='" + configDescription + '\'' + - '}'; - } - - public boolean paramLegal() { - if (ValidateUtils.isExistBlank(configKey) - || ValidateUtils.isBlank(configValue) - || ValidateUtils.isBlank(configDescription)) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/gateway/KafkaAclSearchDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/gateway/KafkaAclSearchDTO.java deleted file mode 100644 index 8285cb46..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/gateway/KafkaAclSearchDTO.java +++ /dev/null @@ -1,61 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.gateway; - -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/7/7 - */ -public class KafkaAclSearchDTO { - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "开始时间(ms)") - private Long start; - - @ApiModelProperty(value = "结束时间(ms)") - private Long end; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public Long getStart() { - return start; - } - - public void setStart(Long start) { - this.start = start; - } - - public Long getEnd() { - return end; - } - - public void setEnd(Long end) { - this.end = end; - } - - @Override - public String toString() { - return "KafkaAclSearchDTO{" + - "clusterId=" + clusterId + - ", start=" + start + - ", end=" + end + - '}'; - } - - public boolean paramLegal() { - if (ValidateUtils.isNull(clusterId) - || ValidateUtils.isNull(start) - || ValidateUtils.isNull(end)) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/gateway/KafkaUserSearchDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/gateway/KafkaUserSearchDTO.java deleted file mode 100644 index 40b9ebb7..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/gateway/KafkaUserSearchDTO.java +++ /dev/null @@ -1,51 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.gateway; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/7/7 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(description = "Kafka用户查询") -public class KafkaUserSearchDTO { - @ApiModelProperty(value = "开始时间(ms)") - private Long start; - - @ApiModelProperty(value = "结束时间(ms)") - private Long end; - - public Long getStart() { - return start; - } - - public void setStart(Long start) { - this.start = start; - } - - public Long getEnd() { - return end; - } - - public void setEnd(Long end) { - this.end = end; - } - - @Override - public String toString() { - return "KafkaUserSearchDTO{" + - "start=" + start + - ", end=" + end + - '}'; - } - - public boolean paramLegal() { - if (ValidateUtils.isNullOrLessThanZero(start) || ValidateUtils.isNullOrLessThanZero(end)) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/gateway/TopicQuotaDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/gateway/TopicQuotaDTO.java deleted file mode 100644 index 5719cd28..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/gateway/TopicQuotaDTO.java +++ /dev/null @@ -1,47 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.gateway; - -import com.xiaojukeji.kafka.manager.common.entity.dto.ClusterTopicDTO; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -@ApiModel(description = "配额调整") -public class TopicQuotaDTO extends ClusterTopicDTO { - @ApiModelProperty(value = "appId") - private String appId; - - @ApiModelProperty(value = "发送数据速率B/s") - private Long produceQuota; - - @ApiModelProperty(value = "消费数据速率B/s") - private Long consumeQuota; - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public Long getProduceQuota() { - return produceQuota; - } - - public void setProduceQuota(Long produceQuota) { - this.produceQuota = produceQuota; - } - - public Long getConsumeQuota() { - return consumeQuota; - } - - public void setConsumeQuota(Long consumeQuota) { - this.consumeQuota = consumeQuota; - } - - @Override - public boolean paramLegal() { - return !ValidateUtils.isNullOrLessThanZero(clusterId) && !ValidateUtils.isBlank(topicName) && !ValidateUtils.isBlank(appId); - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/AppDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/AppDTO.java deleted file mode 100644 index b35e42d8..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/AppDTO.java +++ /dev/null @@ -1,76 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.normal; - -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/5/4 - */ -@ApiModel(description="App信息") -public class AppDTO { - @ApiModelProperty(value="AppId, 不可修改") - private String appId; - - @ApiModelProperty(value="App名称") - private String name; - - @ApiModelProperty(value="App描述") - private String description; - - @ApiModelProperty(value="App负责人") - private String principals; - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public String getPrincipals() { - return principals; - } - - public void setPrincipals(String principals) { - this.principals = principals; - } - - @Override - public String toString() { - return "AppDTO{" + - "appId='" + appId + '\'' + - ", name='" + name + '\'' + - ", description='" + description + '\'' + - ", principals='" + principals + '\'' + - '}'; - } - - public boolean legal() { - if (ValidateUtils.isBlank(appId) - || ValidateUtils.isBlank(name) - || ValidateUtils.isBlank(principals) - || ValidateUtils.isBlank(description)) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/JmxSwitchDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/JmxSwitchDTO.java deleted file mode 100644 index ee6986cb..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/JmxSwitchDTO.java +++ /dev/null @@ -1,126 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.normal; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/8/21 - */ -@ApiModel(description = "JmxSwitch开关") -@JsonIgnoreProperties(ignoreUnknown = true) -public class JmxSwitchDTO { - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "是否是物理集群ID, True:是, False:否") - private Boolean isPhysicalClusterId; - - @ApiModelProperty(value = "Topic请求你JMX") - private String topicName; - - @ApiModelProperty(value = "Topic请求你JMX") - private Boolean openTopicRequestMetrics; - - @ApiModelProperty(value = "AppTopicJMX") - private Boolean openAppIdTopicMetrics; - - @ApiModelProperty(value = "客户端请求JMX") - private Boolean openClientRequestMetrics; - - @ApiModelProperty(value = "磁盘JMX") - private Boolean openDiskMetrics; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public Boolean getPhysicalClusterId() { - return isPhysicalClusterId; - } - - public void setPhysicalClusterId(Boolean physicalClusterId) { - isPhysicalClusterId = physicalClusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Boolean getOpenTopicRequestMetrics() { - return openTopicRequestMetrics; - } - - public void setOpenTopicRequestMetrics(Boolean openTopicRequestMetrics) { - this.openTopicRequestMetrics = openTopicRequestMetrics; - } - - public Boolean getOpenAppIdTopicMetrics() { - return openAppIdTopicMetrics; - } - - public void setOpenAppIdTopicMetrics(Boolean openAppIdTopicMetrics) { - this.openAppIdTopicMetrics = openAppIdTopicMetrics; - } - - public Boolean getOpenClientRequestMetrics() { - return openClientRequestMetrics; - } - - public void setOpenClientRequestMetrics(Boolean openClientRequestMetrics) { - this.openClientRequestMetrics = openClientRequestMetrics; - } - - public Boolean getOpenDiskMetrics() { - return openDiskMetrics; - } - - public void setOpenDiskMetrics(Boolean openDiskMetrics) { - this.openDiskMetrics = openDiskMetrics; - } - - @Override - public String toString() { - return "JmxSwitchDTO{" + - "clusterId=" + clusterId + - ", isPhysicalClusterId=" + isPhysicalClusterId + - ", topicName='" + topicName + '\'' + - ", openTopicRequestMetrics=" + openTopicRequestMetrics + - ", openAppIdTopicMetrics=" + openAppIdTopicMetrics + - ", openClientRequestMetrics=" + openClientRequestMetrics + - ", openDiskMetrics=" + openDiskMetrics + - '}'; - } - - public boolean paramLegal() { - if (ValidateUtils.isNull(clusterId) - || ValidateUtils.isNull(isPhysicalClusterId) - || ValidateUtils.isNull(topicName)) { - return false; - } - - if (ValidateUtils.isNull(openTopicRequestMetrics)) { - openTopicRequestMetrics = Boolean.FALSE; - } - if (ValidateUtils.isNull(openAppIdTopicMetrics)) { - openAppIdTopicMetrics = Boolean.FALSE; - } - if (ValidateUtils.isNull(openClientRequestMetrics)) { - openClientRequestMetrics = Boolean.FALSE; - } - if (ValidateUtils.isNull(openDiskMetrics)) { - openDiskMetrics = Boolean.FALSE; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/KafkaFileDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/KafkaFileDTO.java deleted file mode 100644 index b9337661..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/KafkaFileDTO.java +++ /dev/null @@ -1,143 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.normal; - -import com.xiaojukeji.kafka.manager.common.bizenum.KafkaFileEnum; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; -import org.springframework.web.multipart.MultipartFile; - -/** - * @author zengqiao - * @date 20/4/29 - */ -@ApiModel(description = "Kafka文件") -public class KafkaFileDTO { - @ApiModelProperty(value = "ID") - private Long id; - - @ApiModelProperty(value = "集群ID, 创建的时候需要, 修改不需要, 如果是包,则传-1") - private Long clusterId; - - @ApiModelProperty(value = "文件名, 创建时需要, 修改不需要") - private String fileName; - - @ApiModelProperty(value = "文件MD5") - private String fileMd5; - - @ApiModelProperty(value = "文件类型, 创建时需要, 修改不需要") - private Integer fileType; - - @ApiModelProperty(value = "备注") - private String description; - - @ApiModelProperty(value = "上传的文件") - private MultipartFile uploadFile; - - @ApiModelProperty(value = "是更新操作") - private Boolean modify; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getFileName() { - return fileName; - } - - public void setFileName(String fileName) { - this.fileName = fileName; - } - - public String getFileMd5() { - return fileMd5; - } - - public void setFileMd5(String fileMd5) { - this.fileMd5 = fileMd5; - } - - public Integer getFileType() { - return fileType; - } - - public void setFileType(Integer fileType) { - this.fileType = fileType; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public MultipartFile getUploadFile() { - return uploadFile; - } - - public void setUploadFile(MultipartFile uploadFile) { - this.uploadFile = uploadFile; - } - - public Boolean getModify() { - return modify; - } - - public void setModify(Boolean modify) { - this.modify = modify; - } - - @Override - public String toString() { - return "KafkaFileDTO{" + - "id=" + id + - ", clusterId=" + clusterId + - ", fileName='" + fileName + '\'' + - ", fileMd5='" + fileMd5 + '\'' + - ", fileType=" + fileType + - ", description='" + description + '\'' + - '}'; - } - - public boolean createParamLegal() { - if (ValidateUtils.isNull(clusterId) || - ValidateUtils.isBlank(fileName) || - ValidateUtils.isNull(fileType) || - ValidateUtils.isNull(fileMd5) || - ValidateUtils.isNull(uploadFile)) { - return false; - } - if (!(fileName.endsWith(KafkaFileEnum.PACKAGE.getSuffix()) - || fileName.endsWith(KafkaFileEnum.SERVER_CONFIG.getSuffix()))) { - // 后缀不对 - return false; - } - if (KafkaFileEnum.PACKAGE.getCode().equals(fileType) && clusterId != -1) { - // 包不属于任何集群 - return false; - } - return true; - } - - public boolean modifyParamLegal() { - if (ValidateUtils.isBlank(fileName) || - ValidateUtils.isNull(fileMd5) || - ValidateUtils.isNull(uploadFile)) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/LoginDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/LoginDTO.java deleted file mode 100644 index 0fa5c77c..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/LoginDTO.java +++ /dev/null @@ -1,51 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.normal; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import io.swagger.annotations.ApiModel; - -/** - * @author zengqiao - * @date 19/5/3 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(description = "登陆") -public class LoginDTO { - private String username; - - private String password; - - private String code; - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public String getCode() { - return code; - } - - public void setCode(String code) { - this.code = code; - } - - @Override - public String toString() { - return "LoginDTO{" + - "username='" + username + '\'' + - ", password='" + password + '\'' + - ", code='" + code + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/TopicDataSampleDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/TopicDataSampleDTO.java deleted file mode 100644 index 82876522..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/TopicDataSampleDTO.java +++ /dev/null @@ -1,100 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.normal; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.constant.TopicSampleConstant; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModelProperty; - -/** - * Topic采样 - * @author zengqiao - * @date 19/4/8 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -public class TopicDataSampleDTO { - @ApiModelProperty(value = "分区Id") - private Integer partitionId; - - @ApiModelProperty(value = "最大采样条数[必须小于100]") - private Integer maxMsgNum; - - @ApiModelProperty(value = "采样超时时间[必须小于10000]") - private Integer timeout; - - @ApiModelProperty(value = "采样offset") - private Long offset; - - @ApiModelProperty(value = "截断") - private Boolean truncate; - - @ApiModelProperty(value = "是否是集群ID, 默认不是") - private Boolean isPhysicalClusterId; - - public Integer getPartitionId() { - return partitionId; - } - - public void setPartitionId(Integer partitionId) { - this.partitionId = partitionId; - } - - public Integer getMaxMsgNum() { - return maxMsgNum; - } - - public void setMaxMsgNum(Integer maxMsgNum) { - this.maxMsgNum = maxMsgNum; - } - - public Integer getTimeout() { - return timeout; - } - - public void setTimeout(Integer timeout) { - this.timeout = timeout; - } - - public Long getOffset() { - return offset; - } - - public void setOffset(Long offset) { - this.offset = offset; - } - - public Boolean getTruncate() { - return truncate; - } - - public void setTruncate(Boolean truncate) { - this.truncate = truncate; - } - - public Boolean getIsPhysicalClusterId() { - return isPhysicalClusterId; - } - - public void setIsPhysicalClusterId(Boolean isPhysicalClusterId) { - this.isPhysicalClusterId = isPhysicalClusterId; - } - - @Override - public String toString() { - return "TopicDataSampleDTO{" + - "partitionId=" + partitionId + - ", maxMsgNum=" + maxMsgNum + - ", timeout=" + timeout + - ", offset=" + offset + - ", truncate=" + truncate + - ", isPhysicalClusterId=" + isPhysicalClusterId + - '}'; - } - - public void adjustConfig() { - timeout = Math.min(timeout, TopicSampleConstant.MAX_TIMEOUT_UNIT_MS); - maxMsgNum = Math.min(maxMsgNum, TopicSampleConstant.MAX_MSG_NUM); - if (ValidateUtils.isNull(isPhysicalClusterId)) { - isPhysicalClusterId = false; - } - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/TopicModifyDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/TopicModifyDTO.java deleted file mode 100644 index d59b5471..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/TopicModifyDTO.java +++ /dev/null @@ -1,41 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.normal; - -import com.xiaojukeji.kafka.manager.common.entity.dto.ClusterTopicDTO; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/8 - */ -public class TopicModifyDTO extends ClusterTopicDTO { - @ApiModelProperty(value = "描述") - private String description; - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - @Override - public String toString() { - return "TopicModifyDTO{" + - "description='" + description + '\'' + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - '}'; - } - - @Override - public boolean paramLegal() { - if (ValidateUtils.isNull(clusterId) - || ValidateUtils.isExistBlank(topicName) - || ValidateUtils.isNull(description)) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/TopicOffsetResetDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/TopicOffsetResetDTO.java deleted file mode 100644 index 0e2aadb0..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/TopicOffsetResetDTO.java +++ /dev/null @@ -1,139 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.normal; - -import com.xiaojukeji.kafka.manager.common.bizenum.OffsetLocationEnum; -import com.xiaojukeji.kafka.manager.common.entity.ao.PartitionOffsetDTO; -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * 重置offset - * @author zengqiao - * @date 19/4/8 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -public class TopicOffsetResetDTO { - @ApiModelProperty(value = "集群Id") - private Long clusterId; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "消费组") - private String consumerGroup; - - @ApiModelProperty(value = "存储位置") - private String location; - - @ApiModelProperty(value = "重置到指定offset") - private List offsetList; - - @ApiModelProperty(value = "重置到指定时间") - private Long timestamp; - - @ApiModelProperty(value = "是否是物理集群ID") - private Boolean isPhysicalClusterId; - - @ApiModelProperty(value = "指定offset的位置(0 不指定,1 最旧的offset,2 最新的offset)") - private Integer offsetPos; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getConsumerGroup() { - return consumerGroup; - } - - public void setConsumerGroup(String consumerGroup) { - this.consumerGroup = consumerGroup; - } - - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - - public List getOffsetList() { - return offsetList; - } - - public void setOffsetList(List offsetList) { - this.offsetList = offsetList; - } - - public Long getTimestamp() { - return timestamp; - } - - public void setTimestamp(Long timestamp) { - this.timestamp = timestamp; - } - - public Boolean getIsPhysicalClusterId() { - return isPhysicalClusterId; - } - - public void setIsPhysicalClusterId(Boolean isPhysicalClusterId) { - this.isPhysicalClusterId = isPhysicalClusterId; - } - - public Integer getOffsetPos() { - return offsetPos; - } - - public void setOffsetPos(Integer offsetPos) { - this.offsetPos = offsetPos; - } - - @Override - public String toString() { - return "TopicOffsetResetDTO{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", consumerGroup='" + consumerGroup + '\'' + - ", location='" + location + '\'' + - ", offsetList=" + offsetList + - ", timestamp=" + timestamp + - ", isPhysicalClusterId=" + isPhysicalClusterId + - ", offsetPos=" + offsetPos + - '}'; - } - - public boolean legal() { - if (clusterId == null - || ValidateUtils.isExistBlank(topicName) - || ValidateUtils.isExistBlank(consumerGroup) - || OffsetLocationEnum.getOffsetStoreLocation(location) == null) { - return false; - } - - if (isPhysicalClusterId == null) { - isPhysicalClusterId = false; - } - if (timestamp == null && offsetList == null && ValidateUtils.isNullOrLessThanZero(offsetPos)) { - return false; - } - if (ValidateUtils.isNull(offsetPos)) { - offsetPos = 0; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/TopicRetainDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/TopicRetainDTO.java deleted file mode 100644 index 0472a7c0..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/normal/TopicRetainDTO.java +++ /dev/null @@ -1,44 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.normal; - -import com.xiaojukeji.kafka.manager.common.entity.dto.ClusterTopicDTO; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/8 - */ -@ApiModel(description = "Topic延长保留") -public class TopicRetainDTO extends ClusterTopicDTO{ - - @ApiModelProperty(value = "延期天数") - private Integer retainDays; - - public Integer getRetainDays() { - return retainDays; - } - - public void setRetainDays(Integer retainDays) { - this.retainDays = retainDays; - } - - @Override - public String toString() { - return "TopicRetainDTO{" + - "retainDays=" + retainDays + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - '}'; - } - - @Override - public boolean paramLegal() { - if (ValidateUtils.isNull(clusterId) - || ValidateUtils.isNull(topicName) - || ValidateUtils.isNull(retainDays)) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/ControllerPreferredCandidateDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/ControllerPreferredCandidateDTO.java deleted file mode 100644 index 1b4c95b9..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/ControllerPreferredCandidateDTO.java +++ /dev/null @@ -1,45 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.op; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 21/01/24 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(description="优选为Controller的候选者") -public class ControllerPreferredCandidateDTO { - @ApiModelProperty(value="集群ID") - private Long clusterId; - - @ApiModelProperty(value="优选为controller的BrokerId") - private List brokerIdList; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public List getBrokerIdList() { - return brokerIdList; - } - - public void setBrokerIdList(List brokerIdList) { - this.brokerIdList = brokerIdList; - } - - @Override - public String toString() { - return "ControllerPreferredCandidateDTO{" + - "clusterId=" + clusterId + - ", brokerIdList=" + brokerIdList + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/KafkaPackageDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/KafkaPackageDTO.java deleted file mode 100644 index 86314d8d..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/KafkaPackageDTO.java +++ /dev/null @@ -1,25 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.op; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/26 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(description="集群任务") -public class KafkaPackageDTO { - @ApiModelProperty(value="名称") - private String name; - - @ApiModelProperty(value="文件类型") - private Integer fileType; - - @ApiModelProperty(value="md5") - private String md5; - - @ApiModelProperty(value="描述备注") - private String description; -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/RebalanceDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/RebalanceDTO.java deleted file mode 100644 index a27ca9a0..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/RebalanceDTO.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.op; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.bizenum.RebalanceDimensionEnum; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 19/7/8 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(description = "优先副本选举") -public class RebalanceDTO { - @ApiModelProperty(value = "clusterId") - private Long clusterId; - - @ApiModelProperty(value = "RegionId") - private Long regionId; - - @ApiModelProperty(value = "brokerId") - private Integer brokerId; - - @ApiModelProperty(value = "TopicName") - private String topicName; - - @ApiModelProperty(value = "分区ID") - private Integer partitionId; - - @ApiModelProperty(value = "维度[0: Cluster维度, 1: Region维度, 2:Broker维度, 3:Topic维度, 4:Partition纬度]") - private Integer dimension; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public Long getRegionId() { - return regionId; - } - - public void setRegionId(Long regionId) { - this.regionId = regionId; - } - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getPartitionId() { - return partitionId; - } - - public void setPartitionId(Integer partitionId) { - this.partitionId = partitionId; - } - - public Integer getDimension() { - return dimension; - } - - public void setDimension(Integer dimension) { - this.dimension = dimension; - } - - public boolean paramLegal() { - if (ValidateUtils.isNull(clusterId) - || (RebalanceDimensionEnum.REGION.getCode().equals(dimension) && ValidateUtils.isNull(regionId)) - || (RebalanceDimensionEnum.BROKER.getCode().equals(dimension) && ValidateUtils.isNull(brokerId)) - || (RebalanceDimensionEnum.TOPIC.getCode().equals(dimension) && ValidateUtils.isNull(topicName)) - || (RebalanceDimensionEnum.PARTITION.getCode().equals(dimension) && (ValidateUtils.isNull(topicName) || ValidateUtils.isNull(partitionId))) ) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/reassign/ReassignExecDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/reassign/ReassignExecDTO.java deleted file mode 100644 index 74189544..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/reassign/ReassignExecDTO.java +++ /dev/null @@ -1,70 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.op.reassign; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.bizenum.TopicReassignActionEnum; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao_cn@163.com - * @date 19/4/17 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(description = "操作迁移任务") -public class ReassignExecDTO { - @ApiModelProperty(value = "任务ID") - private Long taskId; - - @ApiModelProperty(value = "动作[start|modify|cancel]") - private String action; - - @ApiModelProperty(value = "开始时间(开始之后不可修改)") - private Long beginTime; - - public Long getTaskId() { - return taskId; - } - - public void setTaskId(Long taskId) { - this.taskId = taskId; - } - - public String getAction() { - return action; - } - - public void setAction(String action) { - this.action = action; - } - - public Long getBeginTime() { - return beginTime; - } - - public void setBeginTime(Long beginTime) { - this.beginTime = beginTime; - } - - @Override - public String toString() { - return "ReassignExecDTO{" + - "taskId=" + taskId + - ", action='" + action + '\'' + - ", beginTime=" + beginTime + - '}'; - } - - public boolean paramLegal() { - if (ValidateUtils.isNull(taskId) - || ValidateUtils.isBlank(action)) { - return false; - } - - if (TopicReassignActionEnum.MODIFY.getAction().equals(action) - && ValidateUtils.isNullOrLessThanZero(beginTime)) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/reassign/ReassignExecSubDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/reassign/ReassignExecSubDTO.java deleted file mode 100644 index 52a1d8c9..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/reassign/ReassignExecSubDTO.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.op.reassign; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/6/11 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(description = "操作迁移子任务") -public class ReassignExecSubDTO { - @ApiModelProperty(value = "子任务ID") - private Long subTaskId; - - @ApiModelProperty(value = "动作[modify]") - private String action; - - @ApiModelProperty(value = "当前限流值(B/s), 完成之前可修改") - private Long throttle; - - @ApiModelProperty(value = "限流值上限(B/s), 完成之前可修改") - private Long maxThrottle; - - @ApiModelProperty(value = "限流值下限(B/s), 完成之前可修改") - private Long minThrottle; - - public Long getSubTaskId() { - return subTaskId; - } - - public void setSubTaskId(Long subTaskId) { - this.subTaskId = subTaskId; - } - - public String getAction() { - return action; - } - - public void setAction(String action) { - this.action = action; - } - - public Long getThrottle() { - return throttle; - } - - public void setThrottle(Long throttle) { - this.throttle = throttle; - } - - public Long getMaxThrottle() { - return maxThrottle; - } - - public void setMaxThrottle(Long maxThrottle) { - this.maxThrottle = maxThrottle; - } - - public Long getMinThrottle() { - return minThrottle; - } - - public void setMinThrottle(Long minThrottle) { - this.minThrottle = minThrottle; - } - - @Override - public String toString() { - return "ReassignExecSubDTO{" + - "subTaskId=" + subTaskId + - ", action='" + action + '\'' + - ", throttle=" + throttle + - ", maxThrottle=" + maxThrottle + - ", minThrottle=" + minThrottle + - '}'; - } - - public boolean paramLegal() { - if (ValidateUtils.isNull(subTaskId) - || ValidateUtils.isBlank(action) - || ValidateUtils.isNullOrLessThanZero(throttle) - || ValidateUtils.isNullOrLessThanZero(maxThrottle) - || ValidateUtils.isNullOrLessThanZero(minThrottle) - || maxThrottle < throttle || throttle < minThrottle) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/reassign/ReassignTopicDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/reassign/ReassignTopicDTO.java deleted file mode 100644 index 59aa0ae6..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/reassign/ReassignTopicDTO.java +++ /dev/null @@ -1,172 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.op.reassign; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * 迁移(Topic迁移/Partition迁移) - * @author zengqiao_cn@163.com - * @date 19/4/9 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(description = "Topic迁移") -public class ReassignTopicDTO { - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "目标BrokerID列表") - private List brokerIdList; - - @ApiModelProperty(value = "目标RegionID") - private Long regionId; - - @ApiModelProperty(value = "分区ID") - private List partitionIdList; - - @ApiModelProperty(value = "限流值(B/s)") - private Long throttle; - - @ApiModelProperty(value = "限流上限(B/s)") - private Long maxThrottle; - - @ApiModelProperty(value = "限流下限(B/s)") - private Long minThrottle; - - @ApiModelProperty(value = "原本的保存时间(ms)") - private Long originalRetentionTime; - - @ApiModelProperty(value = "迁移时的保存时间(ms)") - private Long reassignRetentionTime; - - @ApiModelProperty(value = "开始时间(ms, 时间戳)") - private Long beginTime; - - @ApiModelProperty(value = "备注") - private String description; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public List getBrokerIdList() { - return brokerIdList; - } - - public void setBrokerIdList(List brokerIdList) { - this.brokerIdList = brokerIdList; - } - - public Long getRegionId() { - return regionId; - } - - public void setRegionId(Long regionId) { - this.regionId = regionId; - } - - public List getPartitionIdList() { - return partitionIdList; - } - - public void setPartitionIdList(List partitionIdList) { - this.partitionIdList = partitionIdList; - } - - public Long getThrottle() { - return throttle; - } - - public void setThrottle(Long throttle) { - this.throttle = throttle; - } - - public Long getMaxThrottle() { - return maxThrottle; - } - - public void setMaxThrottle(Long maxThrottle) { - this.maxThrottle = maxThrottle; - } - - public Long getMinThrottle() { - return minThrottle; - } - - public void setMinThrottle(Long minThrottle) { - this.minThrottle = minThrottle; - } - - public Long getOriginalRetentionTime() { - return originalRetentionTime; - } - - public void setOriginalRetentionTime(Long originalRetentionTime) { - this.originalRetentionTime = originalRetentionTime; - } - - public Long getReassignRetentionTime() { - return reassignRetentionTime; - } - - public void setReassignRetentionTime(Long reassignRetentionTime) { - this.reassignRetentionTime = reassignRetentionTime; - } - - public Long getBeginTime() { - return beginTime; - } - - public void setBeginTime(Long beginTime) { - this.beginTime = beginTime; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public boolean paramLegal() { - if (ValidateUtils.isNull(clusterId) - || ValidateUtils.isExistBlank(topicName) - || ValidateUtils.isNullOrLessThanZero(throttle) - || ValidateUtils.isNullOrLessThanZero(maxThrottle) - || ValidateUtils.isNullOrLessThanZero(minThrottle) - || maxThrottle < throttle || throttle < minThrottle - || ValidateUtils.isNullOrLessThanZero(originalRetentionTime) - || ValidateUtils.isNullOrLessThanZero(reassignRetentionTime) - || originalRetentionTime < reassignRetentionTime - || ValidateUtils.isNullOrLessThanZero(beginTime)) { - return false; - } - if (ValidateUtils.isNull(description)) { - description = ""; - } - - if (ValidateUtils.isEmptyList(brokerIdList) && ValidateUtils.isNull(regionId)) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/topic/TopicCreationDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/topic/TopicCreationDTO.java deleted file mode 100644 index b92ef7c1..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/topic/TopicCreationDTO.java +++ /dev/null @@ -1,149 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.op.topic; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.entity.dto.ClusterTopicDTO; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; -import java.util.Properties; - -/** - * @author huangyiminghappy@163.com, zengqiao - * @date 2019-04-21 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(description = "Topic创建") -public class TopicCreationDTO extends ClusterTopicDTO { - @ApiModelProperty(value = "AppID") - private String appId; - - @ApiModelProperty(value = "分区数") - private Integer partitionNum; - - @ApiModelProperty(value = "副本数") - private Integer replicaNum; - - @ApiModelProperty(value = "消息保存时间(ms)") - private Long retentionTime; - - @ApiModelProperty(value = "brokerId列表") - private List brokerIdList; - - @ApiModelProperty(value = "RegionId") - private Long regionId; - - @ApiModelProperty(value = "备注") - private String description; - - @ApiModelProperty(value = "Topic属性列表") - private Properties properties; - - @ApiModelProperty(value = "最大写入字节数") - private Long peakBytesIn; - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public Integer getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - public Integer getReplicaNum() { - return replicaNum; - } - - public void setReplicaNum(Integer replicaNum) { - this.replicaNum = replicaNum; - } - - public Long getRetentionTime() { - return retentionTime; - } - - public void setRetentionTime(Long retentionTime) { - this.retentionTime = retentionTime; - } - - public List getBrokerIdList() { - return brokerIdList; - } - - public void setBrokerIdList(List brokerIdList) { - this.brokerIdList = brokerIdList; - } - - public Long getRegionId() { - return regionId; - } - - public void setRegionId(Long regionId) { - this.regionId = regionId; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Properties getProperties() { - return properties; - } - - public void setProperties(Properties properties) { - this.properties = properties; - } - - public Long getPeakBytesIn() { - return peakBytesIn; - } - - public void setPeakBytesIn(Long peakBytesIn) { - this.peakBytesIn = peakBytesIn; - } - - @Override - public String toString() { - return "TopicCreationDTO{" + - "appId='" + appId + '\'' + - ", partitionNum=" + partitionNum + - ", replicaNum=" + replicaNum + - ", retentionTime=" + retentionTime + - ", brokerIdList=" + brokerIdList + - ", regionId=" + regionId + - ", description='" + description + '\'' + - ", properties='" + properties + '\'' + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - '}'; - } - - @Override - public boolean paramLegal() { - if (ValidateUtils.isNull(appId) - || ValidateUtils.isNull(clusterId) - || ValidateUtils.isNull(topicName) - || ValidateUtils.isNull(partitionNum) || partitionNum <= 0 - || ValidateUtils.isNull(replicaNum) || replicaNum <= 0 - || ValidateUtils.isNull(retentionTime) || retentionTime <= 0) { - return false; - } - if ((ValidateUtils.isNull(brokerIdList) || brokerIdList.isEmpty()) && ValidateUtils.isNull(regionId)) { - return false; - } - return true; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/topic/TopicDeletionDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/topic/TopicDeletionDTO.java deleted file mode 100644 index 735ee857..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/topic/TopicDeletionDTO.java +++ /dev/null @@ -1,44 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.op.topic; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.entity.dto.ClusterTopicDTO; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/2 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(value = "Topic删除") -public class TopicDeletionDTO extends ClusterTopicDTO { - @ApiModelProperty(value = "不强制") - private Boolean unForce; - - public Boolean getUnForce() { - return unForce; - } - - public void setUnForce(Boolean unForce) { - this.unForce = unForce; - } - - @Override - public String toString() { - return "TopicDeletionDTO{" + - "unForce=" + unForce + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - '}'; - } - - public boolean paramLegal() { - if (ValidateUtils.isNull(clusterId) - || ValidateUtils.isNull(topicName) - || ValidateUtils.isNull(unForce)) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/topic/TopicExpansionDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/topic/TopicExpansionDTO.java deleted file mode 100644 index bb8a5eb8..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/topic/TopicExpansionDTO.java +++ /dev/null @@ -1,71 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.op.topic; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.entity.dto.ClusterTopicDTO; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/1/2 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(value = "Topic扩分区") -public class TopicExpansionDTO extends ClusterTopicDTO { - @ApiModelProperty(value = "新增分区数") - private Integer partitionNum; - - @ApiModelProperty(value = "brokerId列表") - private List brokerIdList; - - @ApiModelProperty(value = "regionId") - private Long regionId; - - public Integer getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - public List getBrokerIdList() { - return brokerIdList; - } - - public void setBrokerIdList(List brokerIdList) { - this.brokerIdList = brokerIdList; - } - - public Long getRegionId() { - return regionId; - } - - public void setRegionId(Long regionId) { - this.regionId = regionId; - } - - @Override - public String toString() { - return "TopicExpandDTO{" + - ", partitionNum=" + partitionNum + - ", brokerIdList=" + brokerIdList + - ", regionId=" + regionId + - '}'; - } - - public boolean paramLegal() { - if (ValidateUtils.isNull(clusterId) - || ValidateUtils.isNull(topicName) - || ValidateUtils.isNull(partitionNum) || partitionNum <= 0) { - return false; - } - if (ValidateUtils.isEmptyList(brokerIdList) && ValidateUtils.isNull(regionId)) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/topic/TopicModificationDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/topic/TopicModificationDTO.java deleted file mode 100644 index 5d03264e..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/op/topic/TopicModificationDTO.java +++ /dev/null @@ -1,85 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.op.topic; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.entity.dto.ClusterTopicDTO; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.Properties; - -/** - * @author zengqiao - * @date 20/4/24 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(value = "Topic修改") -public class TopicModificationDTO extends ClusterTopicDTO { - @ApiModelProperty(value = "AppID") - private String appId; - - @ApiModelProperty(value = "消息保存时间(ms)") - private Long retentionTime; - - @ApiModelProperty(value = "备注") - private String description; - - @ApiModelProperty(value = "Topic属性列表") - private Properties properties; - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public Long getRetentionTime() { - return retentionTime; - } - - public void setRetentionTime(Long retentionTime) { - this.retentionTime = retentionTime; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Properties getProperties() { - return properties; - } - - public void setProperties(Properties properties) { - this.properties = properties; - } - - @Override - public String toString() { - return "TopicModificationDTO{" + - "appId='" + appId + '\'' + - ", retentionTime=" + retentionTime + - ", description='" + description + '\'' + - ", properties='" + properties + '\'' + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - '}'; - } - - @Override - public boolean paramLegal() { - if (ValidateUtils.isNull(clusterId) - || ValidateUtils.isNull(topicName) - || ValidateUtils.isNull(appId) - || ValidateUtils.isNull(retentionTime) - || ValidateUtils.isNull(description)) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/AccountDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/AccountDTO.java deleted file mode 100644 index ab1147a3..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/AccountDTO.java +++ /dev/null @@ -1,103 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.rd; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 19/5/3 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(description = "用户") -public class AccountDTO { - @ApiModelProperty(value = "用户名") - private String username; - - @ApiModelProperty(value = "密码") - private String password; - - @ApiModelProperty(value = "角色") - private Integer role; - - @ApiModelProperty(value = "用户姓名") - private String displayName; - - @ApiModelProperty(value = "部门") - private String department; - - @ApiModelProperty(value = "邮箱") - private String mail; - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public Integer getRole() { - return role; - } - - public void setRole(Integer role) { - this.role = role; - } - - public String getDisplayName() { - return displayName; - } - - public void setDisplayName(String displayName) { - this.displayName = displayName; - } - - public String getDepartment() { - return department; - } - - public void setDepartment(String department) { - this.department = department; - } - - public String getMail() { - return mail; - } - - public void setMail(String mail) { - this.mail = mail; - } - - @Override - public String toString() { - return "AccountDTO{" + - "username='" + username + '\'' + - ", password='" + password + '\'' + - ", role=" + role + - ", displayName='" + displayName + '\'' + - ", department='" + department + '\'' + - ", mail='" + mail + '\'' + - '}'; - } - - public boolean legal() { - if (ValidateUtils.isNull(username) - || !(role == 0 || role == 1 || role == 2)) { - return false; - } - if (ValidateUtils.isNull(password)) { - password = ""; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/ClusterDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/ClusterDTO.java deleted file mode 100644 index 7afc09c6..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/ClusterDTO.java +++ /dev/null @@ -1,114 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.rd; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/23 - */ -@ApiModel(description = "集群接入&修改") -@JsonIgnoreProperties(ignoreUnknown = true) -public class ClusterDTO { - @ApiModelProperty(value="集群Id, 修改时传") - private Long clusterId; - - @ApiModelProperty(value="集群名称") - private String clusterName; - - @ApiModelProperty(value="ZK地址, 不允许修改") - private String zookeeper; - - @ApiModelProperty(value="bootstrap地址") - private String bootstrapServers; - - @ApiModelProperty(value="数据中心") - private String idc; - - @ApiModelProperty(value="Kafka安全配置") - private String securityProperties; - - @ApiModelProperty(value="Jmx配置") - private String jmxProperties; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getZookeeper() { - return zookeeper; - } - - public void setZookeeper(String zookeeper) { - this.zookeeper = zookeeper; - } - - public String getBootstrapServers() { - return bootstrapServers; - } - - public void setBootstrapServers(String bootstrapServers) { - this.bootstrapServers = bootstrapServers; - } - - public String getIdc() { - return idc; - } - - public void setIdc(String idc) { - this.idc = idc; - } - - public String getSecurityProperties() { - return securityProperties; - } - - public void setSecurityProperties(String securityProperties) { - this.securityProperties = securityProperties; - } - - public String getJmxProperties() { - return jmxProperties; - } - - public void setJmxProperties(String jmxProperties) { - this.jmxProperties = jmxProperties; - } - - @Override - public String toString() { - return "ClusterDTO{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", zookeeper='" + zookeeper + '\'' + - ", bootstrapServers='" + bootstrapServers + '\'' + - ", idc='" + idc + '\'' + - ", securityProperties='" + securityProperties + '\'' + - ", jmxProperties='" + jmxProperties + '\'' + - '}'; - } - - public boolean legal() { - if (ValidateUtils.isNull(clusterName) - || ValidateUtils.isNull(zookeeper) - || ValidateUtils.isNull(idc) - || ValidateUtils.isNull(bootstrapServers)) { - return false; - } - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/CustomScheduledTaskDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/CustomScheduledTaskDTO.java deleted file mode 100644 index d7fa2d72..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/CustomScheduledTaskDTO.java +++ /dev/null @@ -1,35 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.rd; - -/** - * @author zengqiao - * @date 20/8/11 - */ -public class CustomScheduledTaskDTO { - private String name; - - private String cron; - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getCron() { - return cron; - } - - public void setCron(String cron) { - this.cron = cron; - } - - @Override - public String toString() { - return "CustomScheduledTaskDTO{" + - "name='" + name + '\'' + - ", cron='" + cron + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/LogicalClusterDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/LogicalClusterDTO.java deleted file mode 100644 index c3420f68..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/LogicalClusterDTO.java +++ /dev/null @@ -1,132 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.rd; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.bizenum.ClusterModeEnum; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/6/29 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(description = "LogicalClusterDTO") -public class LogicalClusterDTO { - @ApiModelProperty(value = "ID, 更新时必须传") - private Long id; - - @ApiModelProperty(value = "名称") - private String name; - - @ApiModelProperty(value = "集群标识, 用于告警的上报") - private String identification; - - @ApiModelProperty(value = "集群模式") - private Integer mode; - - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "regionId列表") - private List regionIdList; - - @ApiModelProperty(value = "所属应用ID") - private String appId; - - @ApiModelProperty(value = "备注") - private String description; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getIdentification() { - return identification; - } - - public void setIdentification(String identification) { - this.identification = identification; - } - - public Integer getMode() { - return mode; - } - - public void setMode(Integer mode) { - this.mode = mode; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public List getRegionIdList() { - return regionIdList; - } - - public void setRegionIdList(List regionIdList) { - this.regionIdList = regionIdList; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - @Override - public String toString() { - return "LogicalClusterDTO{" + - "id=" + id + - ", name='" + name + '\'' + - ", identification='" + identification + '\'' + - ", mode=" + mode + - ", clusterId=" + clusterId + - ", regionIdList=" + regionIdList + - ", appId='" + appId + '\'' + - ", description='" + description + '\'' + - '}'; - } - - public boolean legal() { - if (ValidateUtils.isNull(clusterId) || ValidateUtils.isEmptyList(regionIdList) || ValidateUtils.isNull(mode)) { - return false; - } - if (!ClusterModeEnum.SHARED_MODE.getCode().equals(mode) && ValidateUtils.isNull(appId)) { - return false; - } - appId = ValidateUtils.isNull(appId)? "": appId; - description = ValidateUtils.isNull(description)? "": description; - identification = ValidateUtils.isNull(identification)? name: identification; - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/OperateRecordDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/OperateRecordDTO.java deleted file mode 100644 index 7f191017..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/OperateRecordDTO.java +++ /dev/null @@ -1,86 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.rd; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.bizenum.ModuleEnum; -import com.xiaojukeji.kafka.manager.common.bizenum.OperateEnum; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModelProperty; - -import java.util.Date; - -/** - * @author zhongyuankai_i - * @date 20/09/03 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -public class OperateRecordDTO { - @ApiModelProperty("模块ID") - private Integer moduleId; - - @ApiModelProperty("操作ID") - private Integer operateId; - - @ApiModelProperty("操作人") - private String operator; - - @ApiModelProperty("开始时间") - private Long startTime; - - @ApiModelProperty("结束时间") - private Long endTime; - - public Integer getModuleId() { - return moduleId; - } - - public void setModuleId(Integer moduleId) { - this.moduleId = moduleId; - } - - public Integer getOperateId() { - return operateId; - } - - public void setOperateId(Integer operateId) { - this.operateId = operateId; - } - - public String getOperator() { - return operator; - } - - public void setOperator(String operator) { - this.operator = operator; - } - - public Long getStartTime() { - return startTime; - } - - public void setStartTime(Long startTime) { - this.startTime = startTime; - } - - public Long getEndTime() { - return endTime; - } - - public void setEndTime(Long endTime) { - this.endTime = endTime; - } - - @Override - public String toString() { - return "OperateRecordDTO{" + - "moduleId=" + moduleId + - ", operateId=" + operateId + - ", operator='" + operator + '\'' + - ", startTime=" + startTime + - ", endTime=" + endTime + - '}'; - } - - public boolean legal() { - return !ValidateUtils.isNull(moduleId) && ModuleEnum.validate(moduleId) && OperateEnum.validate(operateId); - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/RegionDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/RegionDTO.java deleted file mode 100644 index 7c30d681..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/dto/rd/RegionDTO.java +++ /dev/null @@ -1,103 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.dto.rd; - -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * 创建Region - * @author zengqiao - * @date 19/4/3 - */ -@JsonIgnoreProperties(ignoreUnknown = true) -@ApiModel(description = "RegionDTO") -public class RegionDTO { - @ApiModelProperty(value = "ID, 更新时必须传") - private Long id; - - @ApiModelProperty(value = "名称") - private String name; - - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "brokerId列表") - private List brokerIdList; - - @ApiModelProperty(value = "备注") - private String description; - - @ApiModelProperty(value = "状态, 0:正常 1:已满") - private Integer status; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public List getBrokerIdList() { - return brokerIdList; - } - - public void setBrokerIdList(List brokerIdList) { - this.brokerIdList = brokerIdList; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - @Override - public String toString() { - return "RegionDTO{" + - "id=" + id + - ", name='" + name + '\'' + - ", clusterId=" + clusterId + - ", brokerIdList=" + brokerIdList + - ", description='" + description + '\'' + - ", status=" + status + - '}'; - } - - public boolean legal() { - if (ValidateUtils.isNull(clusterId) || ValidateUtils.isEmptyList(brokerIdList) || ValidateUtils.isNull(status)) { - return false; - } - description = ValidateUtils.isNull(description)? "": description; - return true; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/BaseMetrics.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/BaseMetrics.java deleted file mode 100644 index 7752e384..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/BaseMetrics.java +++ /dev/null @@ -1,159 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.metrics; - -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import com.xiaojukeji.kafka.manager.common.utils.jmx.JmxConstant; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author zengqiao - * @date 20/6/16 - */ -public class BaseMetrics { - protected Map metricsMap = new HashMap<>(); - - public Map getMetricsMap() { - return metricsMap; - } - - public void setMetricsMap(Map metricsMap) { - this.metricsMap = metricsMap; - } - - @Override - public String toString() { - return "BaseMetrics{" + - "metricsMap=" + metricsMap + - '}'; - } - - public Object getSpecifiedMetrics(String metricsName) { - return metricsMap.get(metricsName); - } - - public T getSpecifiedMetrics(String metricsName, Class cls) { - Object data = metricsMap.get(metricsName); - if (ValidateUtils.isNull(data)) { - return null; - } - return cls.cast(data); - } - - public Double getTotalProduceRequestsPerSecOneMinuteRate(Double defaultValue) { - Object data = metricsMap.get("TotalProduceRequestsPerSecOneMinuteRate"); - if (data == null) { - return defaultValue; - } - return Double.valueOf(data.toString()); - } - - public Double getTotalFetchRequestsPerSecOneMinuteRate(Double defaultValue) { - Object data = metricsMap.get("TotalFetchRequestsPerSecOneMinuteRate"); - if (data == null) { - return defaultValue; - } - return Double.valueOf(data.toString()); - } - - public Double getBytesInPerSecOneMinuteRate(Double defaultValue) { - Object data = metricsMap.get("BytesInPerSecOneMinuteRate"); - if (data == null) { - return defaultValue; - } - return Double.valueOf(data.toString()); - } - - public Double getBytesOutPerSecOneMinuteRate(Double defaultValue) { - Object data = metricsMap.get("BytesOutPerSecOneMinuteRate"); - if (data == null) { - return defaultValue; - } - return Double.valueOf(data.toString()); - } - - public Double getBytesRejectedPerSecOneMinuteRate(Double defaultValue) { - Object data = metricsMap.get("BytesRejectedPerSecOneMinuteRate"); - if (data == null) { - return defaultValue; - } - return Double.valueOf(data.toString()); - } - - public Double getMessagesInPerSecOneMinuteRate(Double defaultValue) { - Object data = metricsMap.get("MessagesInPerSecOneMinuteRate"); - if (data == null) { - return defaultValue; - } - return Double.valueOf(data.toString()); - } - - public void updateCreateTime(Long timestamp) { - metricsMap.put(JmxConstant.CREATE_TIME, timestamp); - } - - public BaseMetrics mergeByAdd(BaseMetrics metrics) { - if (metrics == null) { - return this; - } - for (Map.Entry entry: metrics.metricsMap.entrySet()) { - mergeByAdd(entry.getKey(), entry.getValue()); - } - return this; - } - - public BaseMetrics mergeByAdd(String objectKey, Object objectValue) { - if (objectKey == null || objectValue == null) { - return this; - } - if (!this.metricsMap.containsKey(objectKey)) { - this.metricsMap.put(objectKey, objectValue); - return this; - } - Object object = this.metricsMap.get(objectKey); - if (object instanceof Integer) { - this.metricsMap.put(objectKey, (Integer) objectValue + (Integer) object); - } else if (object instanceof Long) { - this.metricsMap.put(objectKey, (Long) objectValue + (Long) object); - } else if (object instanceof Float) { - this.metricsMap.put(objectKey, (Float) objectValue + (Float) object); - } else if (object instanceof String) { - this.metricsMap.put(objectKey, (String) objectValue + "," + (String) object); - } else { - this.metricsMap.put(objectKey, (Double) objectValue + (Double) object); - } - return this; - } - - public BaseMetrics mergeByMax(BaseMetrics metrics) { - if (metrics == null) { - return this; - } - for (Map.Entry entry: metrics.metricsMap.entrySet()) { - mergeByMax(entry.getKey(), entry.getValue()); - } - return this; - } - - public BaseMetrics mergeByMax(String objectKey, Object objectValue) { - if (objectKey == null || objectValue == null) { - return this; - } - if (!this.metricsMap.containsKey(objectKey)) { - this.metricsMap.put(objectKey, objectValue); - return this; - } - - Object object = this.metricsMap.get(objectKey); - if (object instanceof Integer) { - this.metricsMap.put(objectKey, Math.max((Integer) objectValue, (Integer) object)); - } else if (object instanceof Long) { - this.metricsMap.put(objectKey, Math.max((Long) objectValue, (Long) object)); - } else if (object instanceof Float) { - this.metricsMap.put(objectKey, Math.max((Float) objectValue, (Float) object)); - } else { - this.metricsMap.put(objectKey, Math.max((Double) objectValue, (Double) object)); - } - return this; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/BrokerMetrics.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/BrokerMetrics.java deleted file mode 100644 index 18153544..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/BrokerMetrics.java +++ /dev/null @@ -1,42 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.metrics; - -/** - * @author zengqiao - * @date 20/6/17 - */ -public class BrokerMetrics extends BaseMetrics { - private Long clusterId; - - private Integer brokerId; - - public BrokerMetrics(Long clusterId, Integer brokerId) { - super(); - this.clusterId = clusterId; - this.brokerId = brokerId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - @Override - public String toString() { - return "BrokerMetrics{" + - "clusterId=" + clusterId + - ", brokerId=" + brokerId + - ", metricsMap=" + metricsMap + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/ClusterMetrics.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/ClusterMetrics.java deleted file mode 100644 index 4a977dcd..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/ClusterMetrics.java +++ /dev/null @@ -1,29 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.metrics; - -/** - * @author zengqiao - * @date 20/6/18 - */ -public class ClusterMetrics extends BaseMetrics { - private Long clusterId; - - public ClusterMetrics(Long clusterId) { - this.clusterId = clusterId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - @Override - public String toString() { - return "ClusterMetrics{" + - "clusterId=" + clusterId + - ", metricsMap=" + metricsMap + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/ConsumerMetrics.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/ConsumerMetrics.java deleted file mode 100644 index 60fcc54f..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/ConsumerMetrics.java +++ /dev/null @@ -1,106 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.metrics; - -import java.util.HashMap; -import java.util.Map; - -/** - * Consumer实体类 - * @author tukun - * @date 2015/11/12 - */ -public class ConsumerMetrics { - private Long clusterId; - - private String topicName; - - private String consumerGroup; - - private String location; - - private Map partitionOffsetMap = new HashMap<>(); - - private Map consumeOffsetMap = new HashMap<>(); - - private long timestampUnitMs; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getConsumerGroup() { - return consumerGroup; - } - - public void setConsumerGroup(String consumerGroup) { - this.consumerGroup = consumerGroup; - } - - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - - public Map getPartitionOffsetMap() { - return partitionOffsetMap; - } - - public void setPartitionOffsetMap(Map partitionOffsetMap) { - this.partitionOffsetMap = partitionOffsetMap; - } - - public Map getConsumeOffsetMap() { - return consumeOffsetMap; - } - - public void setConsumeOffsetMap(Map consumeOffsetMap) { - this.consumeOffsetMap = consumeOffsetMap; - } - - public long getTimestampUnitMs() { - return timestampUnitMs; - } - - public void setTimestampUnitMs(long timestampUnitMs) { - this.timestampUnitMs = timestampUnitMs; - } - - public ConsumerMetrics newConsumerMetrics(String consumerGroup) { - ConsumerMetrics consumerMetrics = new ConsumerMetrics(); - consumerMetrics.setConsumerGroup(consumerGroup); - consumerMetrics.setClusterId(this.getClusterId()); - consumerMetrics.setLocation(this.getLocation()); - consumerMetrics.setTopicName(this.getTopicName()); - consumerMetrics.setConsumeOffsetMap(this.getConsumeOffsetMap()); - consumerMetrics.setPartitionOffsetMap(this.getPartitionOffsetMap()); - consumerMetrics.setTimestampUnitMs(this.getTimestampUnitMs()); - return consumerMetrics; - } - - @Override - public String toString() { - return "ConsumerMetrics{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", consumerGroup='" + consumerGroup + '\'' + - ", location='" + location + '\'' + - ", partitionOffsetMap=" + partitionOffsetMap + - ", consumeOffsetMap=" + consumeOffsetMap + - ", timestampUnitMs=" + timestampUnitMs + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/TopicMetrics.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/TopicMetrics.java deleted file mode 100644 index 33c8aaeb..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/TopicMetrics.java +++ /dev/null @@ -1,68 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.metrics; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/6/17 - */ -public class TopicMetrics extends BaseMetrics { - private String appId; - - private Long clusterId; - - private String topicName; - - private List brokerMetricsList; - - public TopicMetrics(Long clusterId, String topicName) { - super(); - this.clusterId = clusterId; - this.topicName = topicName; - } - - public TopicMetrics(String appId, Long clusterId, String topicName) { - super(); - this.appId = appId; - this.clusterId = clusterId; - this.topicName = topicName; - } - - public TopicMetrics(String appId, Long clusterId, String topicName, List brokerMetricsList) { - super(); - this.appId = appId; - this.clusterId = clusterId; - this.topicName = topicName; - this.brokerMetricsList = brokerMetricsList; - } - - public String getAppId() { - return appId; - } - - public Long getClusterId() { - return clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setBrokerMetricsList(List brokerMetricsList) { - this.brokerMetricsList = brokerMetricsList; - } - - public List getBrokerMetricsList() { - return brokerMetricsList; - } - - @Override - public String toString() { - return "TopicMetrics{" + - "appId='" + appId + '\'' + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", metricsMap=" + metricsMap + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/TopicThrottledMetrics.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/TopicThrottledMetrics.java deleted file mode 100644 index 0b2afa5e..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/metrics/TopicThrottledMetrics.java +++ /dev/null @@ -1,72 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.metrics; - -import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum; - -import java.util.Set; - -/** - * @author zengqiao - * @date 20/5/13 - */ -public class TopicThrottledMetrics { - private String appId; - - private Long clusterId; - - private String topicName; - - private KafkaClientEnum clientType; - - private Set brokerIdSet; - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public KafkaClientEnum getClientType() { - return clientType; - } - - public void setClientType(KafkaClientEnum clientType) { - this.clientType = clientType; - } - - public Set getBrokerIdSet() { - return brokerIdSet; - } - - public void setBrokerIdSet(Set brokerIdSet) { - this.brokerIdSet = brokerIdSet; - } - - @Override - public String toString() { - return "TopicThrottledMetrics{" + - "appId='" + appId + '\'' + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", clientType=" + clientType + - ", brokerIdSet=" + brokerIdSet + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/AccountDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/AccountDO.java deleted file mode 100644 index 1fa25528..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/AccountDO.java +++ /dev/null @@ -1,93 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zengqiao - * @date 19/5/3 - */ -public class AccountDO { - protected Long id; - - protected Integer status; - - protected Date gmtCreate; - - protected Date gmtModify; - - private String username; - - private String password; - - private Integer role; - - private String displayName; - - private String department; - - private String mail; - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public Integer getRole() { - return role; - } - - public void setRole(Integer role) { - this.role = role; - } - - public String getDisplayName() { - return displayName; - } - - public void setDisplayName(String displayName) { - this.displayName = displayName; - } - - public String getDepartment() { - return department; - } - - public void setDepartment(String department) { - this.department = department; - } - - public String getMail() { - return mail; - } - - public void setMail(String mail) { - this.mail = mail; - } - - @Override - public String toString() { - return "AccountDO{" + - "id=" + id + - ", status=" + status + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - ", username='" + username + '\'' + - ", password='" + password + '\'' + - ", role=" + role + - ", displayName='" + displayName + '\'' + - ", department='" + department + '\'' + - ", mail='" + mail + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/BrokerDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/BrokerDO.java deleted file mode 100644 index 0eb0ff81..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/BrokerDO.java +++ /dev/null @@ -1,125 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zengqiao - * @date 19/4/3 - */ -public class BrokerDO { - private Long id; - - private Integer status; - - private Date gmtCreate; - - private Date gmtModify; - - private Long clusterId; - - private Integer brokerId; - - private String host; - - private Integer port; - - private Long timestamp; - - private Double maxAvgBytesIn; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public Integer getPort() { - return port; - } - - public void setPort(Integer port) { - this.port = port; - } - - public Long getTimestamp() { - return timestamp; - } - - public void setTimestamp(Long timestamp) { - this.timestamp = timestamp; - } - - public Double getMaxAvgBytesIn() { - return maxAvgBytesIn; - } - - public void setMaxAvgBytesIn(Double maxAvgBytesIn) { - this.maxAvgBytesIn = maxAvgBytesIn; - } - - @Override - public String toString() { - return "BrokerDO{" + - "id=" + id + - ", status=" + status + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - ", clusterId=" + clusterId + - ", brokerId=" + brokerId + - ", host='" + host + '\'' + - ", port=" + port + - ", timestamp=" + timestamp + - ", maxAvgBytesIn=" + maxAvgBytesIn + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/BrokerMetricsDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/BrokerMetricsDO.java deleted file mode 100644 index 9ebc4514..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/BrokerMetricsDO.java +++ /dev/null @@ -1,70 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/6/17 - */ -public class BrokerMetricsDO { - private Long id; - - private Long clusterId; - - private Integer brokerId; - - private String metrics; - - private Date gmtCreate; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public String getMetrics() { - return metrics; - } - - public void setMetrics(String metrics) { - this.metrics = metrics; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - @Override - public String toString() { - return "BrokerMetricsDO{" + - "id=" + id + - ", clusterId=" + clusterId + - ", brokerId=" + brokerId + - ", metrics='" + metrics + '\'' + - ", gmtCreate=" + gmtCreate + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ClusterDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ClusterDO.java deleted file mode 100644 index 5ebebc75..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ClusterDO.java +++ /dev/null @@ -1,138 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; -import java.util.Objects; - -/** - * @author zengqiao - * @date 20/4/23 - */ -public class ClusterDO implements Comparable { - private Long id; - - private String clusterName; - - private String zookeeper; - - private String bootstrapServers; - - private String securityProperties; - - private String jmxProperties; - - private Integer status; - - private Date gmtCreate; - - private Date gmtModify; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getZookeeper() { - return zookeeper; - } - - public void setZookeeper(String zookeeper) { - this.zookeeper = zookeeper; - } - - public String getBootstrapServers() { - return bootstrapServers; - } - - public void setBootstrapServers(String bootstrapServers) { - this.bootstrapServers = bootstrapServers; - } - - public String getSecurityProperties() { - return securityProperties; - } - - public void setSecurityProperties(String securityProperties) { - this.securityProperties = securityProperties; - } - - public String getJmxProperties() { - return jmxProperties; - } - - public void setJmxProperties(String jmxProperties) { - this.jmxProperties = jmxProperties; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - @Override - public String toString() { - return "ClusterDO{" + - "id=" + id + - ", clusterName='" + clusterName + '\'' + - ", zookeeper='" + zookeeper + '\'' + - ", bootstrapServers='" + bootstrapServers + '\'' + - ", securityProperties='" + securityProperties + '\'' + - ", jmxProperties='" + jmxProperties + '\'' + - ", status=" + status + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - '}'; - } - - @Override - public int compareTo(ClusterDO clusterDO) { - return this.id.compareTo(clusterDO.id); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - ClusterDO clusterDO = (ClusterDO) o; - return Objects.equals(id, clusterDO.id) - && Objects.equals(clusterName, clusterDO.clusterName) - && Objects.equals(zookeeper, clusterDO.zookeeper) - && Objects.equals(bootstrapServers, clusterDO.bootstrapServers) - && Objects.equals(securityProperties, clusterDO.securityProperties) - && Objects.equals(jmxProperties, clusterDO.jmxProperties); - } - - @Override - public int hashCode() { - return Objects.hash(id, clusterName, zookeeper, bootstrapServers, securityProperties, jmxProperties); - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ClusterMetricsDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ClusterMetricsDO.java deleted file mode 100644 index c5c6bcfa..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ClusterMetricsDO.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/4/29 - */ -public class ClusterMetricsDO { - private Long id; - - private Date gmtCreate; - - private Long clusterId; - - private String metrics; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getMetrics() { - return metrics; - } - - public void setMetrics(String metrics) { - this.metrics = metrics; - } - - @Override - public String toString() { - return "ClusterMetricsDO{" + - "id=" + id + - ", gmtCreate=" + gmtCreate + - ", clusterId=" + clusterId + - ", metrics='" + metrics + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ClusterTaskDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ClusterTaskDO.java deleted file mode 100644 index 5387dd29..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ClusterTaskDO.java +++ /dev/null @@ -1,213 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/4/21 - */ -public class ClusterTaskDO { - private Long id; - - private String uuid; - - private Long clusterId; - - private String taskType; - - private String kafkaPackage; - - private String kafkaPackageMd5; - - private String serverProperties; - - private String serverPropertiesMd5; - - private Long agentTaskId; - - private Long agentRollbackTaskId; - - private String hostList; - - private String pauseHostList; - - private String rollbackHostList; - - private String rollbackPauseHostList; - - private String operator; - - private Integer taskStatus; - - private Date createTime; - - private Date modifyTime; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getUuid() { - return uuid; - } - - public void setUuid(String uuid) { - this.uuid = uuid; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTaskType() { - return taskType; - } - - public void setTaskType(String taskType) { - this.taskType = taskType; - } - - public String getKafkaPackage() { - return kafkaPackage; - } - - public void setKafkaPackage(String kafkaPackage) { - this.kafkaPackage = kafkaPackage; - } - - public String getKafkaPackageMd5() { - return kafkaPackageMd5; - } - - public void setKafkaPackageMd5(String kafkaPackageMd5) { - this.kafkaPackageMd5 = kafkaPackageMd5; - } - - public String getServerProperties() { - return serverProperties; - } - - public void setServerProperties(String serverProperties) { - this.serverProperties = serverProperties; - } - - public String getServerPropertiesMd5() { - return serverPropertiesMd5; - } - - public void setServerPropertiesMd5(String serverPropertiesMd5) { - this.serverPropertiesMd5 = serverPropertiesMd5; - } - - public Long getAgentTaskId() { - return agentTaskId; - } - - public void setAgentTaskId(Long agentTaskId) { - this.agentTaskId = agentTaskId; - } - - public Long getAgentRollbackTaskId() { - return agentRollbackTaskId; - } - - public void setAgentRollbackTaskId(Long agentRollbackTaskId) { - this.agentRollbackTaskId = agentRollbackTaskId; - } - - public String getHostList() { - return hostList; - } - - public void setHostList(String hostList) { - this.hostList = hostList; - } - - public String getPauseHostList() { - return pauseHostList; - } - - public void setPauseHostList(String pauseHostList) { - this.pauseHostList = pauseHostList; - } - - public String getRollbackHostList() { - return rollbackHostList; - } - - public void setRollbackHostList(String rollbackHostList) { - this.rollbackHostList = rollbackHostList; - } - - public String getRollbackPauseHostList() { - return rollbackPauseHostList; - } - - public void setRollbackPauseHostList(String rollbackPauseHostList) { - this.rollbackPauseHostList = rollbackPauseHostList; - } - - public String getOperator() { - return operator; - } - - public void setOperator(String operator) { - this.operator = operator; - } - - public Integer getTaskStatus() { - return taskStatus; - } - - public void setTaskStatus(Integer taskStatus) { - this.taskStatus = taskStatus; - } - - public Date getCreateTime() { - return createTime; - } - - public void setCreateTime(Date createTime) { - this.createTime = createTime; - } - - public Date getModifyTime() { - return modifyTime; - } - - public void setModifyTime(Date modifyTime) { - this.modifyTime = modifyTime; - } - - @Override - public String toString() { - return "ClusterTaskDO{" + - "id=" + id + - ", uuid='" + uuid + '\'' + - ", clusterId=" + clusterId + - ", taskType='" + taskType + '\'' + - ", kafkaPackage='" + kafkaPackage + '\'' + - ", kafkaPackageMd5='" + kafkaPackageMd5 + '\'' + - ", serverProperties='" + serverProperties + '\'' + - ", serverPropertiesMd5='" + serverPropertiesMd5 + '\'' + - ", agentTaskId=" + agentTaskId + - ", agentRollbackTaskId=" + agentRollbackTaskId + - ", hostList='" + hostList + '\'' + - ", pauseHostList='" + pauseHostList + '\'' + - ", rollbackHostList='" + rollbackHostList + '\'' + - ", rollbackPauseHostList='" + rollbackPauseHostList + '\'' + - ", operator='" + operator + '\'' + - ", taskStatus=" + taskStatus + - ", createTime=" + createTime + - ", modifyTime=" + modifyTime + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ClusterTaskDetailDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ClusterTaskDetailDO.java deleted file mode 100644 index 561ba043..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ClusterTaskDetailDO.java +++ /dev/null @@ -1,103 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/8/28 - */ -public class ClusterTaskDetailDO { - private Long id; - - private Long taskId; - - private String hostname; - - private Integer groupNum; - - private Date execTime; - - private Date rollbackTime; - - private Date createTime; - - private Date modifyTime; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Long getTaskId() { - return taskId; - } - - public void setTaskId(Long taskId) { - this.taskId = taskId; - } - - public String getHostname() { - return hostname; - } - - public void setHostname(String hostname) { - this.hostname = hostname; - } - - public Integer getGroupNum() { - return groupNum; - } - - public void setGroupNum(Integer groupNum) { - this.groupNum = groupNum; - } - - public Date getExecTime() { - return execTime; - } - - public void setExecTime(Date execTime) { - this.execTime = execTime; - } - - public Date getRollbackTime() { - return rollbackTime; - } - - public void setRollbackTime(Date rollbackTime) { - this.rollbackTime = rollbackTime; - } - - public Date getCreateTime() { - return createTime; - } - - public void setCreateTime(Date createTime) { - this.createTime = createTime; - } - - public Date getModifyTime() { - return modifyTime; - } - - public void setModifyTime(Date modifyTime) { - this.modifyTime = modifyTime; - } - - @Override - public String toString() { - return "ClusterTaskDetailDO{" + - "id=" + id + - ", taskId=" + taskId + - ", hostname='" + hostname + '\'' + - ", groupNum=" + groupNum + - ", execTime=" + execTime + - ", rollbackTime=" + rollbackTime + - ", createTime=" + createTime + - ", modifyTime=" + modifyTime + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ConfigDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ConfigDO.java deleted file mode 100644 index 85da93da..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ConfigDO.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/3/19 - */ -public class ConfigDO { - private Long id; - - private Integer status; - - private Date gmtCreate; - - private Date gmtModify; - - private String configKey; - - private String configValue; - - private String configDescription; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - public String getConfigKey() { - return configKey; - } - - public void setConfigKey(String configKey) { - this.configKey = configKey; - } - - public String getConfigValue() { - return configValue; - } - - public void setConfigValue(String configValue) { - this.configValue = configValue; - } - - public String getConfigDescription() { - return configDescription; - } - - public void setConfigDescription(String configDescription) { - this.configDescription = configDescription; - } - - @Override - public String toString() { - return "ConfigDO{" + - "id=" + id + - ", status=" + status + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - ", configKey='" + configKey + '\'' + - ", configValue='" + configValue + '\'' + - ", configDescription='" + configDescription + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ControllerDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ControllerDO.java deleted file mode 100644 index 3b42d4f0..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ControllerDO.java +++ /dev/null @@ -1,106 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/2/28 - */ -public class ControllerDO { - private Long id; - - private Date gmtCreate; - - private Long clusterId; - - private Integer brokerId; - - private String host; - - private Long timestamp; - - private Integer version; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public Long getTimestamp() { - return timestamp; - } - - public void setTimestamp(Long timestamp) { - this.timestamp = timestamp; - } - - public Integer getVersion() { - return version; - } - - public void setVersion(Integer version) { - this.version = version; - } - - @Override - public String toString() { - return "ControllerDO{" + - "id=" + id + - ", gmtCreate=" + gmtCreate + - ", clusterId=" + clusterId + - ", brokerId=" + brokerId + - ", host='" + host + '\'' + - ", timestamp=" + timestamp + - ", version=" + version + - '}'; - } - - public static ControllerDO newInstance(Long clusterId, - Integer brokerId, - String host, - Long timestamp, - Integer version) { - ControllerDO controllerDO = new ControllerDO(); - controllerDO.setClusterId(clusterId); - controllerDO.setBrokerId(brokerId); - controllerDO.setHost(host == null? "": host); - controllerDO.setTimestamp(timestamp); - controllerDO.setVersion(version); - return controllerDO; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/HeartbeatDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/HeartbeatDO.java deleted file mode 100644 index f7968f73..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/HeartbeatDO.java +++ /dev/null @@ -1,76 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * 心跳 - * @author zengqiao - * @date 20/8/10 - */ -public class HeartbeatDO implements Comparable { - private Long id; - - private String ip; - - private String hostname; - - private Date createTime; - - private Date modifyTime; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getIp() { - return ip; - } - - public void setIp(String ip) { - this.ip = ip; - } - - public String getHostname() { - return hostname; - } - - public void setHostname(String hostname) { - this.hostname = hostname; - } - - public Date getCreateTime() { - return createTime; - } - - public void setCreateTime(Date createTime) { - this.createTime = createTime; - } - - public Date getModifyTime() { - return modifyTime; - } - - public void setModifyTime(Date modifyTime) { - this.modifyTime = modifyTime; - } - - @Override - public String toString() { - return "HeartbeatDO{" + - "id=" + id + - ", ip='" + ip + '\'' + - ", hostname='" + hostname + '\'' + - ", createTime=" + createTime + - ", modifyTime=" + modifyTime + - '}'; - } - - @Override - public int compareTo(HeartbeatDO heartbeatDO) { - return this.id.compareTo(heartbeatDO.id); - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/KafkaBillDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/KafkaBillDO.java deleted file mode 100644 index 109ecfee..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/KafkaBillDO.java +++ /dev/null @@ -1,103 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/5/12 - */ -public class KafkaBillDO { - private Long id; - - private Long clusterId; - - private String topicName; - - private String principal; - - private Double quota; - - private Double cost; - - private String gmtDay; - - private Date gmtCreate; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getPrincipal() { - return principal; - } - - public void setPrincipal(String principal) { - this.principal = principal; - } - - public Double getQuota() { - return quota; - } - - public void setQuota(Double quota) { - this.quota = quota; - } - - public Double getCost() { - return cost; - } - - public void setCost(Double cost) { - this.cost = cost; - } - - public String getGmtDay() { - return gmtDay; - } - - public void setGmtDay(String gmtDay) { - this.gmtDay = gmtDay; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - @Override - public String toString() { - return "KafkaBillDO{" + - "id=" + id + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", principal='" + principal + '\'' + - ", quota=" + quota + - ", cost=" + cost + - ", gmtDay='" + gmtDay + '\'' + - ", gmtCreate=" + gmtCreate + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/KafkaFileDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/KafkaFileDO.java deleted file mode 100644 index 3a99da65..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/KafkaFileDO.java +++ /dev/null @@ -1,114 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zhongyuankai - * @date 2020/5/7 - */ -public class KafkaFileDO { - private Long id; - - private Date gmtCreate; - - private Date gmtModify; - - private Long clusterId; - - private String fileName; - - private String fileMd5; - - private Integer fileType; - - private String description; - - private String operator; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getFileName() { - return fileName; - } - - public void setFileName(String fileName) { - this.fileName = fileName; - } - - public String getFileMd5() { - return fileMd5; - } - - public void setFileMd5(String fileMd5) { - this.fileMd5 = fileMd5; - } - - public Integer getFileType() { - return fileType; - } - - public void setFileType(Integer fileType) { - this.fileType = fileType; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public String getOperator() { - return operator; - } - - public void setOperator(String operator) { - this.operator = operator; - } - - @Override - public String toString() { - return "KafkaFileDO{" + - "id=" + id + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - ", clusterId=" + clusterId + - ", fileName='" + fileName + '\'' + - ", fileMd5='" + fileMd5 + '\'' + - ", fileType=" + fileType + - ", description='" + description + '\'' + - ", operator='" + operator + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/LogicalClusterDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/LogicalClusterDO.java deleted file mode 100644 index db81c1c9..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/LogicalClusterDO.java +++ /dev/null @@ -1,125 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/6/29 - */ -public class LogicalClusterDO { - private Long id; - - private String name; - - private String identification; - - private Integer mode; - - private String appId; - - private Long clusterId; - - private String regionList; - - private String description; - - private Date gmtCreate; - - private Date gmtModify; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getIdentification() { - return identification; - } - - public void setIdentification(String identification) { - this.identification = identification; - } - - public Integer getMode() { - return mode; - } - - public void setMode(Integer mode) { - this.mode = mode; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getRegionList() { - return regionList; - } - - public void setRegionList(String regionList) { - this.regionList = regionList; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - @Override - public String toString() { - return "LogicalClusterDO{" + - "id=" + id + - ", name='" + name + '\'' + - ", identification='" + identification + '\'' + - ", mode=" + mode + - ", appId='" + appId + '\'' + - ", clusterId=" + clusterId + - ", regionList='" + regionList + '\'' + - ", description='" + description + '\'' + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/MonitorRuleDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/MonitorRuleDO.java deleted file mode 100644 index 4650dbd7..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/MonitorRuleDO.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/5/21 - */ -public class MonitorRuleDO { - private Long id; - - private String name; - - private Long strategyId; - - private String appId; - - private String operator; - - private Date createTime; - - private Date modifyTime; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public Long getStrategyId() { - return strategyId; - } - - public void setStrategyId(Long strategyId) { - this.strategyId = strategyId; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getOperator() { - return operator; - } - - public void setOperator(String operator) { - this.operator = operator; - } - - public Date getCreateTime() { - return createTime; - } - - public void setCreateTime(Date createTime) { - this.createTime = createTime; - } - - public Date getModifyTime() { - return modifyTime; - } - - public void setModifyTime(Date modifyTime) { - this.modifyTime = modifyTime; - } - - @Override - public String toString() { - return "MonitorRuleDO{" + - "id=" + id + - ", name='" + name + '\'' + - ", strategyId=" + strategyId + - ", appId='" + appId + '\'' + - ", operator='" + operator + '\'' + - ", createTime=" + createTime + - ", modifyTime=" + modifyTime + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/OperateRecordDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/OperateRecordDO.java deleted file mode 100644 index 50b412b8..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/OperateRecordDO.java +++ /dev/null @@ -1,103 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zhongyuankai - * @date 20/09/03 - */ -public class OperateRecordDO { - private Long id; - - private Integer moduleId; - - private Integer operateId; - - private String resource; - - private String content; - - private String operator; - - private Date createTime; - - private Date modifyTime; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Integer getModuleId() { - return moduleId; - } - - public void setModuleId(Integer moduleId) { - this.moduleId = moduleId; - } - - public Integer getOperateId() { - return operateId; - } - - public void setOperateId(Integer operateId) { - this.operateId = operateId; - } - - public String getResource() { - return resource; - } - - public void setResource(String resource) { - this.resource = resource; - } - - public String getContent() { - return content; - } - - public void setContent(String content) { - this.content = content; - } - - public String getOperator() { - return operator; - } - - public void setOperator(String operator) { - this.operator = operator; - } - - public Date getCreateTime() { - return createTime; - } - - public void setCreateTime(Date createTime) { - this.createTime = createTime; - } - - public Date getModifyTime() { - return modifyTime; - } - - public void setModifyTime(Date modifyTime) { - this.modifyTime = modifyTime; - } - - @Override - public String toString() { - return "OperateRecordDO{" + - "id=" + id + - ", moduleId=" + moduleId + - ", operateId=" + operateId + - ", resource='" + resource + '\'' + - ", content='" + content + '\'' + - ", operator='" + operator + '\'' + - ", createTime=" + createTime + - ", modifyTime=" + modifyTime + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/OrderDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/OrderDO.java deleted file mode 100644 index 1fa820fc..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/OrderDO.java +++ /dev/null @@ -1,147 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zhongyuankai - * @date 20/4/23 - */ -public class OrderDO { - private Long id; - - private Integer status; - - private Date gmtCreate; - - private Date gmtModify; - - private Integer type; - - private String title; - - private String applicant; - - private String description; - - private String approver; - - private Date gmtHandle; - - private String opinion; - - private String extensions; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - public Integer getType() { - return type; - } - - public void setType(Integer type) { - this.type = type; - } - - public String getTitle() { - return title; - } - - public void setTitle(String title) { - this.title = title; - } - - public String getApplicant() { - return applicant; - } - - public void setApplicant(String applicant) { - this.applicant = applicant; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public String getApprover() { - return approver; - } - - public void setApprover(String approver) { - this.approver = approver; - } - - public Date getGmtHandle() { - return gmtHandle; - } - - public void setGmtHandle(Date gmtHandle) { - this.gmtHandle = gmtHandle; - } - - public String getOpinion() { - return opinion; - } - - public void setOpinion(String opinion) { - this.opinion = opinion; - } - - public String getExtensions() { - return extensions; - } - - public void setExtensions(String extensions) { - this.extensions = extensions; - } - - @Override - public String toString() { - return "OrderDO{" + - "id=" + id + - ", status=" + status + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - ", type=" + type + - ", title='" + title + '\'' + - ", applicant='" + applicant + '\'' + - ", description='" + description + '\'' + - ", approver='" + approver + '\'' + - ", gmtHandle=" + gmtHandle + - ", opinion='" + opinion + '\'' + - ", extensions='" + extensions + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ReassignTaskDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ReassignTaskDO.java deleted file mode 100644 index 0eb311a5..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/ReassignTaskDO.java +++ /dev/null @@ -1,225 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * migrate topic task do - * @author zengqiao - * @date 19/4/16 - */ -public class ReassignTaskDO { - private Long id; - - private Integer status; - - private Date gmtCreate; - - private Date gmtModify; - - private Long taskId; - - private Long clusterId; - - private String topicName; - - private String partitions; - - private String reassignmentJson; - - private Long realThrottle; - - private Long maxThrottle; - - private Long minThrottle; - - private Date beginTime; - - private Long originalRetentionTime; - - private Long reassignRetentionTime; - - private String srcBrokers; - - private String destBrokers; - - private String description; - - private String operator; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - public Long getTaskId() { - return taskId; - } - - public void setTaskId(Long taskId) { - this.taskId = taskId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getPartitions() { - return partitions; - } - - public void setPartitions(String partitions) { - this.partitions = partitions; - } - - public String getReassignmentJson() { - return reassignmentJson; - } - - public void setReassignmentJson(String reassignmentJson) { - this.reassignmentJson = reassignmentJson; - } - - public Long getRealThrottle() { - return realThrottle; - } - - public void setRealThrottle(Long realThrottle) { - this.realThrottle = realThrottle; - } - - public Long getMaxThrottle() { - return maxThrottle; - } - - public void setMaxThrottle(Long maxThrottle) { - this.maxThrottle = maxThrottle; - } - - public Long getMinThrottle() { - return minThrottle; - } - - public void setMinThrottle(Long minThrottle) { - this.minThrottle = minThrottle; - } - - public Date getBeginTime() { - return beginTime; - } - - public void setBeginTime(Date beginTime) { - this.beginTime = beginTime; - } - - public Long getOriginalRetentionTime() { - return originalRetentionTime; - } - - public void setOriginalRetentionTime(Long originalRetentionTime) { - this.originalRetentionTime = originalRetentionTime; - } - - public Long getReassignRetentionTime() { - return reassignRetentionTime; - } - - public void setReassignRetentionTime(Long reassignRetentionTime) { - this.reassignRetentionTime = reassignRetentionTime; - } - - public String getSrcBrokers() { - return srcBrokers; - } - - public void setSrcBrokers(String srcBrokers) { - this.srcBrokers = srcBrokers; - } - - public String getDestBrokers() { - return destBrokers; - } - - public void setDestBrokers(String destBrokers) { - this.destBrokers = destBrokers; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public String getOperator() { - return operator; - } - - public void setOperator(String operator) { - this.operator = operator; - } - - @Override - public String toString() { - return "ReassignTaskDO{" + - "id=" + id + - ", status=" + status + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - ", taskId=" + taskId + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", partitions='" + partitions + '\'' + - ", reassignmentJson='" + reassignmentJson + '\'' + - ", realThrottle=" + realThrottle + - ", maxThrottle=" + maxThrottle + - ", minThrottle=" + minThrottle + - ", beginTime=" + beginTime + - ", originalRetentionTime=" + originalRetentionTime + - ", reassignRetentionTime=" + reassignRetentionTime + - ", srcBrokers='" + srcBrokers + '\'' + - ", destBrokers='" + destBrokers + '\'' + - ", description='" + description + '\'' + - ", operator='" + operator + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/RegionDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/RegionDO.java deleted file mode 100644 index 1f948510..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/RegionDO.java +++ /dev/null @@ -1,137 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -public class RegionDO implements Comparable { - private Long id; - - private Integer status; - - private Date gmtCreate; - - private Date gmtModify; - - private String name; - - private Long clusterId; - - private String brokerList; - - private Long capacity; - - private Long realUsed; - - private Long estimateUsed; - - private String description; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getBrokerList() { - return brokerList; - } - - public void setBrokerList(String brokerList) { - this.brokerList = brokerList; - } - - public Long getCapacity() { - return capacity; - } - - public void setCapacity(Long capacity) { - this.capacity = capacity; - } - - public Long getRealUsed() { - return realUsed; - } - - public void setRealUsed(Long realUsed) { - this.realUsed = realUsed; - } - - public Long getEstimateUsed() { - return estimateUsed; - } - - public void setEstimateUsed(Long estimateUsed) { - this.estimateUsed = estimateUsed; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - @Override - public String toString() { - return "RegionDO{" + - "id=" + id + - ", status=" + status + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - ", name='" + name + '\'' + - ", clusterId=" + clusterId + - ", brokerList='" + brokerList + '\'' + - ", capacity=" + capacity + - ", realUsed=" + realUsed + - ", estimateUsed=" + estimateUsed + - ", description='" + description + '\'' + - '}'; - } - - @Override - public int compareTo(RegionDO regionDO) { - return this.id.compareTo(regionDO.id); - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/TopicDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/TopicDO.java deleted file mode 100644 index ecb97e47..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/TopicDO.java +++ /dev/null @@ -1,102 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import com.xiaojukeji.kafka.manager.common.entity.dto.op.topic.TopicCreationDTO; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/4/24 - */ -public class TopicDO { - private Long id; - - private Date gmtCreate; - - private Date gmtModify; - - private String appId; - - private Long clusterId; - - private String topicName; - - private String description; - - private Long peakBytesIn; - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Long getPeakBytesIn() { - return peakBytesIn; - } - - public void setPeakBytesIn(Long peakBytesIn) { - this.peakBytesIn = peakBytesIn; - } - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - public static TopicDO buildFrom(TopicCreationDTO dto) { - TopicDO topicDO = new TopicDO(); - topicDO.setAppId(dto.getAppId()); - topicDO.setClusterId(dto.getClusterId()); - topicDO.setTopicName(dto.getTopicName()); - topicDO.setDescription(dto.getDescription()); - topicDO.setPeakBytesIn(ValidateUtils.isNull(dto.getPeakBytesIn()) ? -1L : dto.getPeakBytesIn()); - return topicDO; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/TopicExpiredDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/TopicExpiredDO.java deleted file mode 100644 index f90cadd0..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/TopicExpiredDO.java +++ /dev/null @@ -1,103 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/3/30 - */ -public class TopicExpiredDO { - private Long id; - - private Long clusterId; - - private String topicName; - - private Integer expiredDay; - - private Date gmtRetain; - - private Integer status; - - private Date gmtCreate; - - private Date gmtModify; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getExpiredDay() { - return expiredDay; - } - - public void setExpiredDay(Integer expiredDay) { - this.expiredDay = expiredDay; - } - - public Date getGmtRetain() { - return gmtRetain; - } - - public void setGmtRetain(Date gmtRetain) { - this.gmtRetain = gmtRetain; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - @Override - public String toString() { - return "TopicExpiredDO{" + - "id=" + id + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", expiredDay=" + expiredDay + - ", gmtRetain=" + gmtRetain + - ", status=" + status + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/TopicMetricsDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/TopicMetricsDO.java deleted file mode 100644 index 2bc2a25b..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/TopicMetricsDO.java +++ /dev/null @@ -1,81 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/6/17 - */ -public class TopicMetricsDO { - private Long id; - - private String appId; - - private Long clusterId; - - private String topicName; - - private String metrics; - - private Date gmtCreate; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getMetrics() { - return metrics; - } - - public void setMetrics(String metrics) { - this.metrics = metrics; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - @Override - public String toString() { - return "TopicMetricsDO{" + - "id=" + id + - ", appId='" + appId + '\'' + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", metrics='" + metrics + '\'' + - ", gmtCreate=" + gmtCreate + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/TopicStatisticsDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/TopicStatisticsDO.java deleted file mode 100644 index b52c84f9..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/TopicStatisticsDO.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/3/29 - */ -public class TopicStatisticsDO { - private Long id; - - private Date gmtCreate; - - private Long clusterId; - - private String topicName; - - private Long offsetSum; - - private Double maxAvgBytesIn; - - private String gmtDay; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Long getOffsetSum() { - return offsetSum; - } - - public void setOffsetSum(Long offsetSum) { - this.offsetSum = offsetSum; - } - - public Double getMaxAvgBytesIn() { - return maxAvgBytesIn; - } - - public void setMaxAvgBytesIn(Double maxAvgBytesIn) { - this.maxAvgBytesIn = maxAvgBytesIn; - } - - public String getGmtDay() { - return gmtDay; - } - - public void setGmtDay(String gmtDay) { - this.gmtDay = gmtDay; - } - - @Override - public String toString() { - return "TopicStatisticsDO{" + - "id=" + id + - ", gmtCreate=" + gmtCreate + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", offsetSum=" + offsetSum + - ", maxAvgBytesIn=" + maxAvgBytesIn + - ", gmtDay='" + gmtDay + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/TopicThrottledMetricsDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/TopicThrottledMetricsDO.java deleted file mode 100644 index 481ae9f8..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/TopicThrottledMetricsDO.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo; - -import java.util.Date; - -/** - * @author zhongyuankai - * @date 20/4/3 - */ -public class TopicThrottledMetricsDO { - private Long id; - - private Long clusterId; - - private String topicName; - - private String appId; - - private Integer produceThrottled; - - private Integer fetchThrottled; - - private Date gmtCreate; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public Integer getProduceThrottled() { - return produceThrottled; - } - - public void setProduceThrottled(Integer produceThrottled) { - this.produceThrottled = produceThrottled; - } - - public Integer getFetchThrottled() { - return fetchThrottled; - } - - public void setFetchThrottled(Integer fetchThrottled) { - this.fetchThrottled = fetchThrottled; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - @Override - public String toString() { - return "TopicThrottleDO{" + - "id=" + id + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", appId='" + appId + '\'' + - ", produceThrottled=" + produceThrottled + - ", fetchThrottled=" + fetchThrottled + - ", gmtCreate=" + gmtCreate + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/AppDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/AppDO.java deleted file mode 100644 index 600c0ac4..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/AppDO.java +++ /dev/null @@ -1,148 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo.gateway; - -import java.util.Date; -import java.util.Random; - -/** - * @author zengqiao - * @date 20/7/29 - */ -public class AppDO { - private static final String ALPHA_NUM = "1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_"; - - private Long id; - - private String appId; - - private String name; - - private String password; - - private Integer type; - - private String applicant; - - private String principals; - - private String description; - - private Date createTime; - - private Date modifyTime; - - public static String getAlphaNum() { - return ALPHA_NUM; - } - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public Integer getType() { - return type; - } - - public void setType(Integer type) { - this.type = type; - } - - public String getApplicant() { - return applicant; - } - - public void setApplicant(String applicant) { - this.applicant = applicant; - } - - public String getPrincipals() { - return principals; - } - - public void setPrincipals(String principals) { - this.principals = principals; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Date getCreateTime() { - return createTime; - } - - public void setCreateTime(Date createTime) { - this.createTime = createTime; - } - - public Date getModifyTime() { - return modifyTime; - } - - public void setModifyTime(Date modifyTime) { - this.modifyTime = modifyTime; - } - - @Override - public String toString() { - return "AppDO{" + - "id=" + id + - ", appId='" + appId + '\'' + - ", name='" + name + '\'' + - ", password='" + password + '\'' + - ", type=" + type + - ", applicant='" + applicant + '\'' + - ", principals='" + principals + '\'' + - ", description='" + description + '\'' + - ", createTime=" + createTime + - ", modifyTime=" + modifyTime + - '}'; - } - - public void generateAppIdAndPassword(Long orderId, String idc) { - this.appId = AppDO.generateAppId(orderId, idc); - - StringBuffer stringBuffer = new StringBuffer(15); - Random random = new Random(); - for(int i = 0; i < 12; i++) { - int index = random.nextInt(ALPHA_NUM.length()); - stringBuffer.append(ALPHA_NUM.charAt(index)); - } - this.password = stringBuffer.toString(); - } - - public static String generateAppId(Long orderId, String idc) { - return String.format("appId_%06d_%s", orderId, idc); - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/AuthorityDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/AuthorityDO.java deleted file mode 100644 index cd999509..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/AuthorityDO.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo.gateway; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/7/21 - */ -public class AuthorityDO { - private Long id; - - private String appId; - - private Long clusterId; - - private String topicName; - - private Integer access; - - private Date createTime; - - private Date modifyTime; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getAccess() { - return access; - } - - public void setAccess(Integer access) { - this.access = access; - } - - public Date getCreateTime() { - return createTime; - } - - public void setCreateTime(Date createTime) { - this.createTime = createTime; - } - - public Date getModifyTime() { - return modifyTime; - } - - public void setModifyTime(Date modifyTime) { - this.modifyTime = modifyTime; - } - - @Override - public String toString() { - return "AuthorityDO{" + - "id=" + id + - ", appId='" + appId + '\'' + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", access=" + access + - ", createTime=" + createTime + - ", modifyTime=" + modifyTime + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/GatewayConfigDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/GatewayConfigDO.java deleted file mode 100644 index fa29c7cf..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/GatewayConfigDO.java +++ /dev/null @@ -1,103 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo.gateway; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/7/28 - */ -public class GatewayConfigDO { - private Long id; - - private String type; - - private String name; - - private String value; - - private Long version; - - private String description; - - private Date createTime; - - private Date modifyTime; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getType() { - return type; - } - - public void setType(String type) { - this.type = type; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getValue() { - return value; - } - - public void setValue(String value) { - this.value = value; - } - - public Long getVersion() { - return version; - } - - public void setVersion(Long version) { - this.version = version; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Date getCreateTime() { - return createTime; - } - - public void setCreateTime(Date createTime) { - this.createTime = createTime; - } - - public Date getModifyTime() { - return modifyTime; - } - - public void setModifyTime(Date modifyTime) { - this.modifyTime = modifyTime; - } - - @Override - public String toString() { - return "GatewayConfigDO{" + - "id=" + id + - ", type='" + type + '\'' + - ", name='" + name + '\'' + - ", value='" + value + '\'' + - ", version=" + version + - ", description='" + description + '\'' + - ", createTime=" + createTime + - ", modifyTime=" + modifyTime + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/KafkaAclDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/KafkaAclDO.java deleted file mode 100644 index 79ca398e..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/KafkaAclDO.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo.gateway; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/7/21 - */ -public class KafkaAclDO { - private Long id; - - private String appId; - - private Long clusterId; - - private String topicName; - - private Integer access; - - private Integer operation; - - private Date createTime; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getAccess() { - return access; - } - - public void setAccess(Integer access) { - this.access = access; - } - - public Integer getOperation() { - return operation; - } - - public void setOperation(Integer operation) { - this.operation = operation; - } - - public Date getCreateTime() { - return createTime; - } - - public void setCreateTime(Date createTime) { - this.createTime = createTime; - } - - @Override - public String toString() { - return "KafkaAclDO{" + - "id=" + id + - ", appId='" + appId + '\'' + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", access=" + access + - ", operation=" + operation + - ", createTime=" + createTime + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/KafkaUserDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/KafkaUserDO.java deleted file mode 100644 index e858e142..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/KafkaUserDO.java +++ /dev/null @@ -1,81 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo.gateway; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/7/21 - */ -public class KafkaUserDO { - private Long id; - - private String appId; - - private String password; - - private Integer userType; - - private Integer operation; - - private Date createTime; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public Integer getUserType() { - return userType; - } - - public void setUserType(Integer userType) { - this.userType = userType; - } - - public Integer getOperation() { - return operation; - } - - public void setOperation(Integer operation) { - this.operation = operation; - } - - public Date getCreateTime() { - return createTime; - } - - public void setCreateTime(Date createTime) { - this.createTime = createTime; - } - - @Override - public String toString() { - return "KafkaUserDO{" + - "id=" + id + - ", appId='" + appId + '\'' + - ", password='" + password + '\'' + - ", userType=" + userType + - ", operation=" + operation + - ", createTime=" + createTime + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/TopicConnectionDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/TopicConnectionDO.java deleted file mode 100644 index 4707d271..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/TopicConnectionDO.java +++ /dev/null @@ -1,108 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo.gateway; - -import java.util.Date; - -/** - * Topic连接信息 - * @author zengqiao - * @date 20/7/6 - */ -public class TopicConnectionDO { - private Long id; - - private String appId; - - private Long clusterId; - - private String topicName; - - private String type; - - private String ip; - - private String clientVersion; - - private Date createTime; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getType() { - return type; - } - - public void setType(String type) { - this.type = type; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getIp() { - return ip; - } - - public void setIp(String ip) { - this.ip = ip; - } - - public String getClientVersion() { - return clientVersion; - } - - public void setClientVersion(String clientVersion) { - this.clientVersion = clientVersion; - } - - public Date getCreateTime() { - return createTime; - } - - public void setCreateTime(Date createTime) { - this.createTime = createTime; - } - - @Override - public String toString() { - return "TopicConnectionDO{" + - "id=" + id + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", type='" + type + '\'' + - ", appId='" + appId + '\'' + - ", ip='" + ip + '\'' + - ", clientVersion='" + clientVersion + '\'' + - ", createTime=" + createTime + - '}'; - } - - public String uniqueKey() { - return appId + clusterId + topicName + type + ip; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/TopicReportDO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/TopicReportDO.java deleted file mode 100644 index e495cdc6..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/pojo/gateway/TopicReportDO.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.pojo.gateway; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/7/29 - */ -public class TopicReportDO { - private Long id; - - private Long clusterId; - - private String topicName; - - private Date startTime; - - private Date endTime; - - private Date createTime; - - private Date modifyTime; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Date getStartTime() { - return startTime; - } - - public void setStartTime(Date startTime) { - this.startTime = startTime; - } - - public Date getEndTime() { - return endTime; - } - - public void setEndTime(Date endTime) { - this.endTime = endTime; - } - - public Date getCreateTime() { - return createTime; - } - - public void setCreateTime(Date createTime) { - this.createTime = createTime; - } - - public Date getModifyTime() { - return modifyTime; - } - - public void setModifyTime(Date modifyTime) { - this.modifyTime = modifyTime; - } - - @Override - public String toString() { - return "TopicReportDO{" + - "id=" + id + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", startTime=" + startTime + - ", endTime=" + endTime + - ", createTime=" + createTime + - ", modifyTime=" + modifyTime + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/AccountRoleVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/AccountRoleVO.java deleted file mode 100644 index aca8087a..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/AccountRoleVO.java +++ /dev/null @@ -1,46 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.common; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/8/26 - */ -@ApiModel(description = "账号角色信息") -public class AccountRoleVO { - @ApiModelProperty(value = "账号") - private String username; - - @ApiModelProperty(value = "角色, 0:Normal, 1:RD, 2:OP") - private Integer role; - - public AccountRoleVO(String username, Integer role) { - this.username = username; - this.role = role; - } - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - public Integer getRole() { - return role; - } - - public void setRole(Integer role) { - this.role = role; - } - - @Override - public String toString() { - return "AccountRoleVO{" + - "username='" + username + '\'' + - ", role=" + role + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/AccountSummaryVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/AccountSummaryVO.java deleted file mode 100644 index 08ad1817..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/AccountSummaryVO.java +++ /dev/null @@ -1,53 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.common; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/8/26 - */ -@ApiModel(description = "账号概要信息") -public class AccountSummaryVO { - @ApiModelProperty(value = "账号") - private String username; - - @ApiModelProperty(value = "中文名") - private String chineseName; - - @ApiModelProperty(value = "部门") - private String department; - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - public String getChineseName() { - return chineseName; - } - - public void setChineseName(String chineseName) { - this.chineseName = chineseName; - } - - public String getDepartment() { - return department; - } - - public void setDepartment(String department) { - this.department = department; - } - - @Override - public String toString() { - return "AccountSummaryVO{" + - "username='" + username + '\'' + - ", chineseName='" + chineseName + '\'' + - ", department='" + department + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/AccountVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/AccountVO.java deleted file mode 100644 index 33a2eba8..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/AccountVO.java +++ /dev/null @@ -1,65 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.common; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/21 - */ -@ApiModel(description = "账号信息") -public class AccountVO { - @ApiModelProperty(value = "账号") - private String username; - - @ApiModelProperty(value = "中文名") - private String chineseName; - - @ApiModelProperty(value = "部门") - private String department; - - @ApiModelProperty(value = "角色, 0:Normal, 1:RD, 2:OP") - private Integer role; - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - public String getChineseName() { - return chineseName; - } - - public void setChineseName(String chineseName) { - this.chineseName = chineseName; - } - - public String getDepartment() { - return department; - } - - public void setDepartment(String department) { - this.department = department; - } - - public Integer getRole() { - return role; - } - - public void setRole(Integer role) { - this.role = role; - } - - @Override - public String toString() { - return "AccountVO{" + - "username='" + username + '\'' + - ", chineseName='" + chineseName + '\'' + - ", department='" + department + '\'' + - ", role=" + role + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/BrokerOverviewVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/BrokerOverviewVO.java deleted file mode 100644 index b2cde3bf..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/BrokerOverviewVO.java +++ /dev/null @@ -1,197 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.common; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 19/4/3 - */ -@ApiModel(description = "Broker信息概览") -public class BrokerOverviewVO { - @ApiModelProperty(value = "brokerId") - private Integer brokerId; - - @ApiModelProperty(value = "主机名") - private String host; - - @ApiModelProperty(value = "端口") - private Integer port; - - @ApiModelProperty(value = "jmx端口") - private Integer jmxPort; - - @ApiModelProperty(value = "启动时间") - private Long startTime; - - @ApiModelProperty(value = "流入流量") - private Object byteIn; - - @ApiModelProperty(value = "流出流量") - private Object byteOut; - - @ApiModelProperty(value = "分区数") - private Integer partitionCount; - - @ApiModelProperty(value = "失效副本分区的个数") - private Integer underReplicatedPartitions; - - @ApiModelProperty(value = "未同步") - private Boolean underReplicated; - - @ApiModelProperty(value = "broker状态[0:在线, -1:不在线]") - private Integer status; - - @ApiModelProperty(value = "Region名称") - private String regionName; - - @ApiModelProperty(value = "峰值状态") - private Integer peakFlowStatus; - - @ApiModelProperty(value = "Kafka版本") - private String kafkaVersion; - - @ApiModelProperty(value = "Leader数") - private Integer leaderCount; - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public Integer getPort() { - return port; - } - - public void setPort(Integer port) { - this.port = port; - } - - public Integer getJmxPort() { - return jmxPort; - } - - public void setJmxPort(Integer jmxPort) { - this.jmxPort = jmxPort; - } - - public Long getStartTime() { - return startTime; - } - - public void setStartTime(Long startTime) { - this.startTime = startTime; - } - - public Object getByteIn() { - return byteIn; - } - - public void setByteIn(Object byteIn) { - this.byteIn = byteIn; - } - - public Object getByteOut() { - return byteOut; - } - - public void setByteOut(Object byteOut) { - this.byteOut = byteOut; - } - - public Integer getPartitionCount() { - return partitionCount; - } - - public void setPartitionCount(Integer partitionCount) { - this.partitionCount = partitionCount; - } - - public Integer getUnderReplicatedPartitions() { - return underReplicatedPartitions; - } - - public void setUnderReplicatedPartitions(Integer underReplicatedPartitions) { - this.underReplicatedPartitions = underReplicatedPartitions; - } - - public Boolean getUnderReplicated() { - return underReplicated; - } - - public void setUnderReplicated(Boolean underReplicated) { - this.underReplicated = underReplicated; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public String getRegionName() { - return regionName; - } - - public void setRegionName(String regionName) { - this.regionName = regionName; - } - - public Integer getPeakFlowStatus() { - return peakFlowStatus; - } - - public void setPeakFlowStatus(Integer peakFlowStatus) { - this.peakFlowStatus = peakFlowStatus; - } - - public String getKafkaVersion() { - return kafkaVersion; - } - - public void setKafkaVersion(String kafkaVersion) { - this.kafkaVersion = kafkaVersion; - } - - public Integer getLeaderCount() { - return leaderCount; - } - - public void setLeaderCount(Integer leaderCount) { - this.leaderCount = leaderCount; - } - - @Override - public String toString() { - return "BrokerOverviewVO{" + - "brokerId=" + brokerId + - ", host='" + host + '\'' + - ", port=" + port + - ", jmxPort=" + jmxPort + - ", startTime=" + startTime + - ", byteIn=" + byteIn + - ", byteOut=" + byteOut + - ", partitionCount=" + partitionCount + - ", underReplicatedPartitions=" + underReplicatedPartitions + - ", underReplicated=" + underReplicated + - ", status=" + status + - ", regionName='" + regionName + '\'' + - ", peakFlowStatus=" + peakFlowStatus + - ", kafkaVersion='" + kafkaVersion + '\'' + - ", leaderCount=" + leaderCount + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/OrderPartitionVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/OrderPartitionVO.java deleted file mode 100644 index 2e03f9a5..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/OrderPartitionVO.java +++ /dev/null @@ -1,223 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.common; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 19/6/23 - */ -@ApiModel(value = "OrderPartitionVO", description = "分区申请工单") -public class OrderPartitionVO { - @ApiModelProperty(value = "工单ID") - private Long orderId; - - @ApiModelProperty(value = "集群Id") - private Long clusterId; - - @ApiModelProperty(value = "集群名称") - private String clusterName; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "申请人") - private String applicant; - - @ApiModelProperty(value = "预计峰值流量(MB/s)") - private Long predictBytesIn; - - @ApiModelProperty(value = "近24小时峰值流量(MB/s)") - private Long realBytesIn; - - @ApiModelProperty(value = "当前分区数") - private Integer partitionNum; - - @ApiModelProperty(value = "当前Topic所处的Region") - private List regionNameList; - - @ApiModelProperty(value = "Region的brokerId列表") - private List regionBrokerIdList; - - @ApiModelProperty(value = "Topic的brokerId列表") - private List brokerIdList; - - @ApiModelProperty(value = "备注信息") - private String description; - - @ApiModelProperty(value = "工单状态, 0:待处理, 1:通过, 2:拒绝, 3:撤销") - private Integer orderStatus; - - @ApiModelProperty(value = "审批人") - private String approver; - - @ApiModelProperty(value = "审批意见") - private String approvalOpinions; - - @ApiModelProperty(value = "创建时间") - private Long gmtCreate; - - @ApiModelProperty(value = "修改时间") - private Long gmtModify; - - public Long getOrderId() { - return orderId; - } - - public void setOrderId(Long orderId) { - this.orderId = orderId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getApplicant() { - return applicant; - } - - public void setApplicant(String applicant) { - this.applicant = applicant; - } - - public Long getPredictBytesIn() { - return predictBytesIn; - } - - public void setPredictBytesIn(Long predictBytesIn) { - this.predictBytesIn = predictBytesIn; - } - - public Long getRealBytesIn() { - return realBytesIn; - } - - public void setRealBytesIn(Long realBytesIn) { - this.realBytesIn = realBytesIn; - } - - public Integer getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - public List getRegionNameList() { - return regionNameList; - } - - public void setRegionNameList(List regionNameList) { - this.regionNameList = regionNameList; - } - - public List getRegionBrokerIdList() { - return regionBrokerIdList; - } - - public void setRegionBrokerIdList(List regionBrokerIdList) { - this.regionBrokerIdList = regionBrokerIdList; - } - - public List getBrokerIdList() { - return brokerIdList; - } - - public void setBrokerIdList(List brokerIdList) { - this.brokerIdList = brokerIdList; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Integer getOrderStatus() { - return orderStatus; - } - - public void setOrderStatus(Integer orderStatus) { - this.orderStatus = orderStatus; - } - - public String getApprover() { - return approver; - } - - public void setApprover(String approver) { - this.approver = approver; - } - - public String getApprovalOpinions() { - return approvalOpinions; - } - - public void setApprovalOpinions(String approvalOpinions) { - this.approvalOpinions = approvalOpinions; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Long getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Long gmtModify) { - this.gmtModify = gmtModify; - } - - @Override - public String toString() { - return "OrderPartitionVO{" + - "orderId=" + orderId + - ", clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", applicant='" + applicant + '\'' + - ", predictBytesIn=" + predictBytesIn + - ", realBytesIn=" + realBytesIn + - ", partitionNum=" + partitionNum + - ", regionNameList=" + regionNameList + - ", regionBrokerIdList=" + regionBrokerIdList + - ", brokerIdList=" + brokerIdList + - ", description='" + description + '\'' + - ", orderStatus=" + orderStatus + - ", approver='" + approver + '\'' + - ", approvalOpinions='" + approvalOpinions + '\'' + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/OrderTopicVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/OrderTopicVO.java deleted file mode 100644 index b5f95492..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/OrderTopicVO.java +++ /dev/null @@ -1,229 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.common; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 19/6/18 - */ -@ApiModel(value = "Topic工单") -public class OrderTopicVO { - @ApiModelProperty(value = "工单ID") - private Long orderId; - - @ApiModelProperty(value = "集群Id") - private Long clusterId; - - @ApiModelProperty(value = "集群名称") - private String clusterName; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "流量上限(KB)") - private Long peakBytesIn; - - @ApiModelProperty(value = "保留时间") - private Long retentionTime; - - private Integer partitionNum; - - private Integer replicaNum; - - private String regions; - - private String brokers; - - @ApiModelProperty(value = "申请人") - private String applicant; - - @ApiModelProperty(value = "负责人") - private String principals; - - @ApiModelProperty(value = "备注信息") - private String description; - - @ApiModelProperty(value = "工单状态, 0:待处理, 1:通过, 2:拒绝, 3:撤销") - private Integer orderStatus; - - @ApiModelProperty(value = "审批人") - private String approver; - - @ApiModelProperty(value = "审批意见") - private String opinion; - - @ApiModelProperty(value = "创建时间") - private Long gmtCreate; - - @ApiModelProperty(value = "修改时间") - private Long gmtModify; - - public Long getOrderId() { - return orderId; - } - - public void setOrderId(Long orderId) { - this.orderId = orderId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Long getPeakBytesIn() { - return peakBytesIn; - } - - public void setPeakBytesIn(Long peakBytesIn) { - this.peakBytesIn = peakBytesIn; - } - - public Long getRetentionTime() { - return retentionTime; - } - - public void setRetentionTime(Long retentionTime) { - this.retentionTime = retentionTime; - } - - public Integer getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - public Integer getReplicaNum() { - return replicaNum; - } - - public void setReplicaNum(Integer replicaNum) { - this.replicaNum = replicaNum; - } - - public String getRegions() { - return regions; - } - - public void setRegions(String regions) { - this.regions = regions; - } - - public String getBrokers() { - return brokers; - } - - public void setBrokers(String brokers) { - this.brokers = brokers; - } - - public String getApplicant() { - return applicant; - } - - public void setApplicant(String applicant) { - this.applicant = applicant; - } - - public String getPrincipals() { - return principals; - } - - public void setPrincipals(String principals) { - this.principals = principals; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Integer getOrderStatus() { - return orderStatus; - } - - public void setOrderStatus(Integer orderStatus) { - this.orderStatus = orderStatus; - } - - public String getApprover() { - return approver; - } - - public void setApprover(String approver) { - this.approver = approver; - } - - public String getOpinion() { - return opinion; - } - - public void setOpinion(String opinion) { - this.opinion = opinion; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Long getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Long gmtModify) { - this.gmtModify = gmtModify; - } - - @Override - public String toString() { - return "OrderTopicVO{" + - "orderId=" + orderId + - ", clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", peakBytesIn=" + peakBytesIn + - ", retentionTime=" + retentionTime + - ", partitionNum=" + partitionNum + - ", replicaNum=" + replicaNum + - ", regions='" + regions + '\'' + - ", brokers='" + brokers + '\'' + - ", applicant='" + applicant + '\'' + - ", principals='" + principals + '\'' + - ", description='" + description + '\'' + - ", orderStatus=" + orderStatus + - ", approver='" + approver + '\'' + - ", opinion='" + opinion + '\'' + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/RealTimeMetricsVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/RealTimeMetricsVO.java deleted file mode 100644 index 3dbd8272..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/RealTimeMetricsVO.java +++ /dev/null @@ -1,116 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.common; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * 实时流量信息 - * @author zengqiao - * @date 19/4/1 - */ -@ApiModel(value = "实时流量信息(Cluster/broker/Topic)") -public class RealTimeMetricsVO { - @ApiModelProperty(value = "每秒进入消息条") - private List messageIn; - - @ApiModelProperty(value = "每秒字节流入") - private List byteIn; - - @ApiModelProperty(value = "每秒字节流出") - private List byteOut; - - @ApiModelProperty(value = "每秒拒绝字节") - private List byteRejected; - - @ApiModelProperty(value = "失败fetch的请求") - private List failedFetchRequest; - - @ApiModelProperty(value = "失败produce的请求") - private List failedProduceRequest; - - @ApiModelProperty(value = "总的produce请求") - private List totalProduceRequest; - - @ApiModelProperty(value = "总的fetch请求") - private List totalFetchRequest; - - public List getMessageIn() { - return messageIn; - } - - public void setMessageIn(List messageIn) { - this.messageIn = messageIn; - } - - public List getByteIn() { - return byteIn; - } - - public void setByteIn(List byteIn) { - this.byteIn = byteIn; - } - - public List getByteOut() { - return byteOut; - } - - public void setByteOut(List byteOut) { - this.byteOut = byteOut; - } - - public List getByteRejected() { - return byteRejected; - } - - public void setByteRejected(List byteRejected) { - this.byteRejected = byteRejected; - } - - public List getFailedFetchRequest() { - return failedFetchRequest; - } - - public void setFailedFetchRequest(List failedFetchRequest) { - this.failedFetchRequest = failedFetchRequest; - } - - public List getFailedProduceRequest() { - return failedProduceRequest; - } - - public void setFailedProduceRequest(List failedProduceRequest) { - this.failedProduceRequest = failedProduceRequest; - } - - public List getTotalProduceRequest() { - return totalProduceRequest; - } - - public void setTotalProduceRequest(List totalProduceRequest) { - this.totalProduceRequest = totalProduceRequest; - } - - public List getTotalFetchRequest() { - return totalFetchRequest; - } - - public void setTotalFetchRequest(List totalFetchRequest) { - this.totalFetchRequest = totalFetchRequest; - } - - @Override - public String toString() { - return "RealTimeMetricsVO{" + - "messageIn=" + messageIn + - ", byteIn=" + byteIn + - ", byteOut=" + byteOut + - ", byteRejected=" + byteRejected + - ", failedFetchRequest=" + failedFetchRequest + - ", failedProduceRequest=" + failedProduceRequest + - ", totalProduceRequest=" + totalProduceRequest + - ", totalFetchRequest=" + totalFetchRequest + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/TopicOverviewVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/TopicOverviewVO.java deleted file mode 100644 index 724e31b2..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/TopicOverviewVO.java +++ /dev/null @@ -1,174 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.common; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * Topic信息 - * @author zengqiao - * @date 19/4/1 - */ -@ApiModel(description = "Topic信息概览") -public class TopicOverviewVO { - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "副本数") - private Integer replicaNum; - - @ApiModelProperty(value = "分区数") - private Integer partitionNum; - - @ApiModelProperty(value = "保存时间(ms)") - private Long retentionTime; - - @ApiModelProperty(value = "每秒流入流量(B)") - private Object byteIn; - - @ApiModelProperty(value = "每秒流出流量(B)") - private Object byteOut; - - @ApiModelProperty(value = "发送请求数(个/秒)") - private Object produceRequest; - - @ApiModelProperty(value = "应用名称") - private String appName; - - @ApiModelProperty(value = "应用ID") - private String appId; - - @ApiModelProperty(value = "说明") - private String description; - - @ApiModelProperty(value = "Topic更新时间") - private Long updateTime; - - @ApiModelProperty(value = "逻辑集群id") - private Long logicalClusterId; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getReplicaNum() { - return replicaNum; - } - - public void setReplicaNum(Integer replicaNum) { - this.replicaNum = replicaNum; - } - - public Integer getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - public Long getRetentionTime() { - return retentionTime; - } - - public void setRetentionTime(Long retentionTime) { - this.retentionTime = retentionTime; - } - - public Object getByteIn() { - return byteIn; - } - - public void setByteIn(Object byteIn) { - this.byteIn = byteIn; - } - - public Object getByteOut() { - return byteOut; - } - - public void setByteOut(Object byteOut) { - this.byteOut = byteOut; - } - - public Object getProduceRequest() { - return produceRequest; - } - - public void setProduceRequest(Object produceRequest) { - this.produceRequest = produceRequest; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Long getUpdateTime() { - return updateTime; - } - - public void setUpdateTime(Long updateTime) { - this.updateTime = updateTime; - } - - public Long getLogicalClusterId() { - return logicalClusterId; - } - - public void setLogicalClusterId(Long logicalClusterId) { - this.logicalClusterId = logicalClusterId; - } - - @Override - public String toString() { - return "TopicOverviewVO{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", replicaNum=" + replicaNum + - ", partitionNum=" + partitionNum + - ", retentionTime=" + retentionTime + - ", byteIn=" + byteIn + - ", byteOut=" + byteOut + - ", produceRequest=" + produceRequest + - ", appName='" + appName + '\'' + - ", appId='" + appId + '\'' + - ", description='" + description + '\'' + - ", updateTime=" + updateTime + - ", logicalClusterId=" + logicalClusterId + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/TopicThrottleVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/TopicThrottleVO.java deleted file mode 100644 index ca1c2d7a..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/common/TopicThrottleVO.java +++ /dev/null @@ -1,67 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.common; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/4/28 - */ -@ApiModel(description="集群限流信息") -public class TopicThrottleVO { - @ApiModelProperty(value="Topic名称") - private String topicName; - - @ApiModelProperty(value="AppId") - private String appId; - - @ApiModelProperty(value="BrokerId列表") - private List brokerIdList; - - @ApiModelProperty(value = "客户端类型[Produce|Fetch]") - private String throttleClientType; - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public List getBrokerIdList() { - return brokerIdList; - } - - public void setBrokerIdList(List brokerIdList) { - this.brokerIdList = brokerIdList; - } - - public String getThrottleClientType() { - return throttleClientType; - } - - public void setThrottleClientType(String throttleClientType) { - this.throttleClientType = throttleClientType; - } - - @Override - public String toString() { - return "TopicThrottleVO{" + - "topicName='" + topicName + '\'' + - ", appId='" + appId + '\'' + - ", brokerIdList=" + brokerIdList + - ", throttleClientType='" + throttleClientType + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/gateway/GatewayConfigVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/gateway/GatewayConfigVO.java deleted file mode 100644 index 95707080..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/gateway/GatewayConfigVO.java +++ /dev/null @@ -1,40 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.gateway; - -/** - * @author zengqiao - * @date 20/7/28 - */ -public class GatewayConfigVO { - private String version; - - private String data; - - public GatewayConfigVO(String version, String data) { - this.version = version; - this.data = data; - } - - public String getVersion() { - return version; - } - - public void setVersion(String version) { - this.version = version; - } - - public String getData() { - return data; - } - - public void setData(String data) { - this.data = data; - } - - @Override - public String toString() { - return "GatewayConfigVO{" + - "version='" + version + '\'' + - ", data='" + data + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/gateway/KafkaAclVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/gateway/KafkaAclVO.java deleted file mode 100644 index 5f0ac42c..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/gateway/KafkaAclVO.java +++ /dev/null @@ -1,68 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.gateway; - -/** - * @author zengqiao - * @date 20/7/27 - */ -public class KafkaAclVO { - private String topicName; - - private Long timestamp; - - private Integer access; - - private Integer operation; - - private String username; - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Long getTimestamp() { - return timestamp; - } - - public void setTimestamp(Long timestamp) { - this.timestamp = timestamp; - } - - public Integer getAccess() { - return access; - } - - public void setAccess(Integer access) { - this.access = access; - } - - public Integer getOperation() { - return operation; - } - - public void setOperation(Integer operation) { - this.operation = operation; - } - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - @Override - public String toString() { - return "KafkaAclVO{" + - "topicName='" + topicName + '\'' + - ", timestamp=" + timestamp + - ", access=" + access + - ", operation=" + operation + - ", username='" + username + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/gateway/KafkaSecurityVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/gateway/KafkaSecurityVO.java deleted file mode 100644 index 14192f35..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/gateway/KafkaSecurityVO.java +++ /dev/null @@ -1,26 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.gateway; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/8/3 - */ -public class KafkaSecurityVO { - List rows; - - public List getRows() { - return rows; - } - - public void setRows(List rows) { - this.rows = rows; - } - - @Override - public String toString() { - return "KafkaSecurityVO{" + - "rows=" + rows + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/gateway/KafkaUserVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/gateway/KafkaUserVO.java deleted file mode 100644 index bae66f85..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/gateway/KafkaUserVO.java +++ /dev/null @@ -1,68 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.gateway; - -/** - * @author zengqiao - * @date 20/7/27 - */ -public class KafkaUserVO { - private String username; - - private String password; - - private Integer operation; - - private Long timestamp; - - private Integer userType; - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public Integer getOperation() { - return operation; - } - - public void setOperation(Integer operation) { - this.operation = operation; - } - - public Long getTimestamp() { - return timestamp; - } - - public void setTimestamp(Long timestamp) { - this.timestamp = timestamp; - } - - public Integer getUserType() { - return userType; - } - - public void setUserType(Integer userType) { - this.userType = userType; - } - - @Override - public String toString() { - return "KafkaUserVO{" + - "username='" + username + '\'' + - ", password='" + password + '\'' + - ", operation=" + operation + - ", timestamp=" + timestamp + - ", userType=" + userType + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/BillStaffDetailVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/BillStaffDetailVO.java deleted file mode 100644 index 9746aa3a..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/BillStaffDetailVO.java +++ /dev/null @@ -1,55 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/4/26 - */ -@ApiModel(value = "用户月度账单详情") -public class BillStaffDetailVO { - @ApiModelProperty(value = "用户名") - private String username; - - @ApiModelProperty(value = "总金额") - private Double costSum; - - @ApiModelProperty(value = "账单详情") - private List billList; - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - public Double getCostSum() { - return costSum; - } - - public void setCostSum(Double costSum) { - this.costSum = costSum; - } - - public List getBillList() { - return billList; - } - - public void setBillList(List billList) { - this.billList = billList; - } - - @Override - public String toString() { - return "BillStaffDetailVO{" + - "username='" + username + '\'' + - ", costSum=" + costSum + - ", billList=" + billList + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/BillStaffSummaryVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/BillStaffSummaryVO.java deleted file mode 100644 index 9e7d74e1..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/BillStaffSummaryVO.java +++ /dev/null @@ -1,89 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/26 - */ -@ApiModel(value = "用户月度账单概览") -public class BillStaffSummaryVO { - @ApiModelProperty(value = "用户名") - private String username; - - @ApiModelProperty(value = "Topic数量") - private Integer topicNum; - - @ApiModelProperty(value = "配额") - private Double quota; - - @ApiModelProperty(value = "金额") - private Double cost; - - @ApiModelProperty(value = "月份") - private String gmtMonth; - - @ApiModelProperty(value = "时间戳") - private Long timestamp; - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - public Integer getTopicNum() { - return topicNum; - } - - public void setTopicNum(Integer topicNum) { - this.topicNum = topicNum; - } - - public Double getQuota() { - return quota; - } - - public void setQuota(Double quota) { - this.quota = quota; - } - - public Double getCost() { - return cost; - } - - public void setCost(Double cost) { - this.cost = cost; - } - - public String getGmtMonth() { - return gmtMonth; - } - - public void setGmtMonth(String gmtMonth) { - this.gmtMonth = gmtMonth; - } - - public Long getTimestamp() { - return timestamp; - } - - public void setTimestamp(Long timestamp) { - this.timestamp = timestamp; - } - - @Override - public String toString() { - return "BillStaffSummaryVO{" + - "username='" + username + '\'' + - ", topicNum=" + topicNum + - ", quota=" + quota + - ", cost=" + cost + - ", gmtMonth='" + gmtMonth + '\'' + - ", timestamp=" + timestamp + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/BillTopicVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/BillTopicVO.java deleted file mode 100644 index 33142c51..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/BillTopicVO.java +++ /dev/null @@ -1,77 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/26 - */ -@ApiModel(value = "Topic账单") -public class BillTopicVO { - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "集群名称") - private String clusterName; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "配额") - private Double quota; - - @ApiModelProperty(value = "金额") - private Double cost; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Double getQuota() { - return quota; - } - - public void setQuota(Double quota) { - this.quota = quota; - } - - public Double getCost() { - return cost; - } - - public void setCost(Double cost) { - this.cost = cost; - } - - @Override - public String toString() { - return "BillTopicVO{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", quota=" + quota + - ", cost=" + cost + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/QuotaVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/QuotaVO.java deleted file mode 100644 index c741b677..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/QuotaVO.java +++ /dev/null @@ -1,68 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal; - -/** - * @author zengqiao - * @date 20/5/12 - */ -public class QuotaVO { - private Long clusterId; - - private String topicName; - - private String appId; - - private Long produceQuota; - - private Long consumeQuota; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public Long getProduceQuota() { - return produceQuota; - } - - public void setProduceQuota(Long produceQuota) { - this.produceQuota = produceQuota; - } - - public Long getConsumeQuota() { - return consumeQuota; - } - - public void setConsumeQuota(Long consumeQuota) { - this.consumeQuota = consumeQuota; - } - - @Override - public String toString() { - return "QuotaVO{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", appId='" + appId + '\'' + - ", produceQuota=" + produceQuota + - ", consumeQuota=" + consumeQuota + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/TopicBusinessInfoVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/TopicBusinessInfoVO.java deleted file mode 100644 index 61440a41..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/TopicBusinessInfoVO.java +++ /dev/null @@ -1,77 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zhongyuankai - * @date 20/09/08 - */ -@ApiModel(value = "Topic业务信息") -public class TopicBusinessInfoVO { - @ApiModelProperty(value = "应用id") - private String appId; - - @ApiModelProperty(value = "应用名称") - private String appName; - - @ApiModelProperty(value = "负责人") - private String principals; - - @ApiModelProperty(value = "集群Id") - private Long clusterId; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public String getPrincipals() { - return principals; - } - - public void setPrincipals(String principals) { - this.principals = principals; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - @Override - public String toString() { - return "TopicBusinessInfoVO{" + - "appId='" + appId + '\'' + - ", appName='" + appName + '\'' + - ", principals='" + principals + '\'' + - ", clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/app/AppSummaryVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/app/AppSummaryVO.java deleted file mode 100644 index 9cb486bf..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/app/AppSummaryVO.java +++ /dev/null @@ -1,53 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.app; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/5/22 - */ -@ApiModel(description="App概览信息") -public class AppSummaryVO { - @ApiModelProperty(value="AppId") - private String appId; - - @ApiModelProperty(value="App名称") - private String name; - - @ApiModelProperty(value="App负责人") - private String principals; - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getPrincipals() { - return principals; - } - - public void setPrincipals(String principals) { - this.principals = principals; - } - - @Override - public String toString() { - return "AppSummaryVO{" + - "appId='" + appId + '\'' + - ", name='" + name + '\'' + - ", principals='" + principals + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/app/AppTopicAuthorityVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/app/AppTopicAuthorityVO.java deleted file mode 100644 index d5a3fae4..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/app/AppTopicAuthorityVO.java +++ /dev/null @@ -1,48 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.app; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; -@ApiModel(value = "AppTopicAuthority") -public class AppTopicAuthorityVO { - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "appId") - private String appId; - - @ApiModelProperty(value = "权限: 0:无权限, 1:可消费 2:可发送 3:可发送消费 4:可管理") - private Integer access; - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public Integer getAccess() { - return access; - } - - public void setAccess(Integer access) { - this.access = access; - } - - @Override - public String toString() { - return "AppTopicAuthorityVO{" + - ", topicName='" + topicName + '\'' + - ", appId='" + appId + '\'' + - ", access=" + access + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/app/AppTopicVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/app/AppTopicVO.java deleted file mode 100644 index 228bcd85..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/app/AppTopicVO.java +++ /dev/null @@ -1,89 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.app; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/7 - */ -@ApiModel(description="AppTopic") -public class AppTopicVO { - @ApiModelProperty(value = "逻辑集群ID") - private Long clusterId; - - @ApiModelProperty(value = "逻辑集群名称") - private String clusterName; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "权限: 0:无权限, 1:可消费 2:可发送 3:可发送消费 4:可管理") - private Integer access; - - @ApiModelProperty(value = "操作人") - private String operator; - - @ApiModelProperty(value = "权限授予时间") - private Long gmtCreate; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getAccess() { - return access; - } - - public void setAccess(Integer access) { - this.access = access; - } - - public String getOperator() { - return operator; - } - - public void setOperator(String operator) { - this.operator = operator; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - @Override - public String toString() { - return "AppTopicVO{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", access=" + access + - ", operator='" + operator + '\'' + - ", gmtCreate=" + gmtCreate + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/app/AppVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/app/AppVO.java deleted file mode 100644 index dab97dd7..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/app/AppVO.java +++ /dev/null @@ -1,89 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.app; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/7 - */ -@ApiModel(description="App信息") -public class AppVO { - @ApiModelProperty(value="AppId") - private String appId; - - @ApiModelProperty(value="App名称") - private String name; - - @ApiModelProperty(value="App密码") - private String password; - - @ApiModelProperty(value="申请人") - private String applicant; - - @ApiModelProperty(value="App描述") - private String description; - - @ApiModelProperty(value="App负责人") - private String principals; - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public String getApplicant() { - return applicant; - } - - public void setApplicant(String applicant) { - this.applicant = applicant; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public String getPrincipals() { - return principals; - } - - public void setPrincipals(String principals) { - this.principals = principals; - } - - @Override - public String toString() { - return "AppVO{" + - "appId='" + appId + '\'' + - ", name='" + name + '\'' + - ", password='" + password + '\'' + - ", applicant='" + applicant + '\'' + - ", description='" + description + '\'' + - ", principals='" + principals + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/app/DeprecatedAppVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/app/DeprecatedAppVO.java deleted file mode 100644 index 034cbc4f..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/app/DeprecatedAppVO.java +++ /dev/null @@ -1,214 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.app; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/8/13 - */ -@Deprecated -public class DeprecatedAppVO { - private Long id; - - private Date gmtCreate; - - private Date gmtModify; - - private String appId; - - private String name; - - private String password; - - private String type = "离线应用"; - - private String applicant; - - private String principal; - - private String department = ""; - - private Long department_id = null; - - private String description; - - private String approveUser = ""; - - private String approveTime = ""; - - private String approveInfo = ""; - - private Integer status = 1; - - private String bpmInstanceId = ""; - - private Boolean lastUsed = false; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public String getType() { - return type; - } - - public void setType(String type) { - this.type = type; - } - - public String getApplicant() { - return applicant; - } - - public void setApplicant(String applicant) { - this.applicant = applicant; - } - - public String getPrincipal() { - return principal; - } - - public void setPrincipal(String principal) { - this.principal = principal; - } - - public String getDepartment() { - return department; - } - - public void setDepartment(String department) { - this.department = department; - } - - public Long getDepartment_id() { - return department_id; - } - - public void setDepartment_id(Long department_id) { - this.department_id = department_id; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public String getApproveUser() { - return approveUser; - } - - public void setApproveUser(String approveUser) { - this.approveUser = approveUser; - } - - public String getApproveTime() { - return approveTime; - } - - public void setApproveTime(String approveTime) { - this.approveTime = approveTime; - } - - public String getApproveInfo() { - return approveInfo; - } - - public void setApproveInfo(String approveInfo) { - this.approveInfo = approveInfo; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public String getBpmInstanceId() { - return bpmInstanceId; - } - - public void setBpmInstanceId(String bpmInstanceId) { - this.bpmInstanceId = bpmInstanceId; - } - - public Boolean getLastUsed() { - return lastUsed; - } - - public void setLastUsed(Boolean lastUsed) { - this.lastUsed = lastUsed; - } - - @Override - public String toString() { - return "DeprecatedAppVO{" + - "id=" + id + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - ", appId='" + appId + '\'' + - ", name='" + name + '\'' + - ", password='" + password + '\'' + - ", type='" + type + '\'' + - ", applicant='" + applicant + '\'' + - ", principal='" + principal + '\'' + - ", department='" + department + '\'' + - ", department_id=" + department_id + - ", description='" + description + '\'' + - ", approveUser='" + approveUser + '\'' + - ", approveTime='" + approveTime + '\'' + - ", approveInfo='" + approveInfo + '\'' + - ", status=" + status + - ", bpmInstanceId='" + bpmInstanceId + '\'' + - ", lastUsed=" + lastUsed + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/cluster/ClusterBasicVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/cluster/ClusterBasicVO.java deleted file mode 100644 index 53410e05..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/cluster/ClusterBasicVO.java +++ /dev/null @@ -1,114 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.cluster; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * cluster basic info - * @author zengqiao - * @date 19/3/18 - */ -@ApiModel(value="ClusterBasicVO", description="集群基本信息") -public class ClusterBasicVO { - @ApiModelProperty(value="集群Id") - private Long clusterId; - - @ApiModelProperty(value="集群名称") - private String clusterName; - - @ApiModelProperty(value="bootstrap地址") - private String bootstrapServers; - - @ApiModelProperty(value="kafka版本") - private String kafkaVersion; - - @ApiModelProperty(value="broker数量") - private Integer brokerNum; - - @ApiModelProperty(value="topic数量") - private Integer topicNum; - - @ApiModelProperty(value="集群创建时间") - private Long gmtCreate; - - @ApiModelProperty(value="集群修改时间") - private Long gmtModify; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getBootstrapServers() { - return bootstrapServers; - } - - public void setBootstrapServers(String bootstrapServers) { - this.bootstrapServers = bootstrapServers; - } - - public String getKafkaVersion() { - return kafkaVersion; - } - - public void setKafkaVersion(String kafkaVersion) { - this.kafkaVersion = kafkaVersion; - } - - public Integer getBrokerNum() { - return brokerNum; - } - - public void setBrokerNum(Integer brokerNum) { - this.brokerNum = brokerNum; - } - - public Integer getTopicNum() { - return topicNum; - } - - public void setTopicNum(Integer topicNum) { - this.topicNum = topicNum; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Long getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Long gmtModify) { - this.gmtModify = gmtModify; - } - - @Override - public String toString() { - return "ClusterBasicVO{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", bootstrapServers='" + bootstrapServers + '\'' + - ", kafkaVersion='" + kafkaVersion + '\'' + - ", brokerNum=" + brokerNum + - ", topicNum=" + topicNum + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/cluster/ClusterNameDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/cluster/ClusterNameDTO.java deleted file mode 100644 index e99005d5..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/cluster/ClusterNameDTO.java +++ /dev/null @@ -1,70 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.cluster; - -import java.util.List; - -/** - * @author zhongyuankai - * @date 2020/5/18 - */ -public class ClusterNameDTO { - private Long physicalClusterId; - - private String physicalClusterName; - - private Long logicalClusterId; - - private String logicalClusterName; - - private List regionIdList; - - public Long getPhysicalClusterId() { - return physicalClusterId; - } - - public void setPhysicalClusterId(Long physicalClusterId) { - this.physicalClusterId = physicalClusterId; - } - - public String getPhysicalClusterName() { - return physicalClusterName; - } - - public void setPhysicalClusterName(String physicalClusterName) { - this.physicalClusterName = physicalClusterName; - } - - public Long getLogicalClusterId() { - return logicalClusterId; - } - - public void setLogicalClusterId(Long logicalClusterId) { - this.logicalClusterId = logicalClusterId; - } - - public String getLogicalClusterName() { - return logicalClusterName; - } - - public void setLogicalClusterName(String logicalClusterName) { - this.logicalClusterName = logicalClusterName; - } - - public List getRegionIdList() { - return regionIdList; - } - - public void setRegionIdList(List regionIdList) { - this.regionIdList = regionIdList; - } - - @Override - public String toString() { - return "ClusterNameDTO{" + - "physicalClusterId=" + physicalClusterId + - ", physicalClusterName='" + physicalClusterName + '\'' + - ", logicalClusterId=" + logicalClusterId + - ", logicalClusterName='" + logicalClusterName + '\'' + - ", regionIdList=" + regionIdList + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/cluster/LogicClusterVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/cluster/LogicClusterVO.java deleted file mode 100644 index 8fa5db9d..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/cluster/LogicClusterVO.java +++ /dev/null @@ -1,137 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.cluster; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/3/31 - */ -@ApiModel(description="逻辑集群信息") -public class LogicClusterVO { - @ApiModelProperty(value="逻辑集群ID") - private Long clusterId; - - @ApiModelProperty(value="逻辑集群名称") - private String clusterName; - - @ApiModelProperty(value="逻辑标识") - private String clusterIdentification; - - @ApiModelProperty(value="逻辑集群类型, 0:共享集群, 1:独享集群, 2:独立集群") - private Integer mode; - - @ApiModelProperty(value="逻辑Topic数量") - private Integer topicNum; - - @ApiModelProperty(value="集群版本") - private String clusterVersion; - - @ApiModelProperty(value="集群服务地址") - private String bootstrapServers; - - @ApiModelProperty(value="描述") - private String description; - - @ApiModelProperty(value="接入时间") - private Long gmtCreate; - - @ApiModelProperty(value="修改时间") - private Long gmtModify; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getClusterIdentification() { - return clusterIdentification; - } - - public void setClusterIdentification(String clusterIdentification) { - this.clusterIdentification = clusterIdentification; - } - - public Integer getMode() { - return mode; - } - - public void setMode(Integer mode) { - this.mode = mode; - } - - public Integer getTopicNum() { - return topicNum; - } - - public void setTopicNum(Integer topicNum) { - this.topicNum = topicNum; - } - - public String getClusterVersion() { - return clusterVersion; - } - - public void setClusterVersion(String clusterVersion) { - this.clusterVersion = clusterVersion; - } - - public String getBootstrapServers() { - return bootstrapServers; - } - - public void setBootstrapServers(String bootstrapServers) { - this.bootstrapServers = bootstrapServers; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Long getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Long gmtModify) { - this.gmtModify = gmtModify; - } - - @Override - public String toString() { - return "LogicClusterVO{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", clusterIdentification='" + clusterIdentification + '\'' + - ", mode=" + mode + - ", topicNum=" + topicNum + - ", clusterVersion='" + clusterVersion + '\'' + - ", bootstrapServers='" + bootstrapServers + '\'' + - ", description='" + description + '\'' + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/cluster/NormalClusterMetricsVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/cluster/NormalClusterMetricsVO.java deleted file mode 100644 index 1d3c6601..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/cluster/NormalClusterMetricsVO.java +++ /dev/null @@ -1,88 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.cluster; - -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/5/13 - */ -public class NormalClusterMetricsVO { - - @ApiModelProperty(value="每秒总共发送的请求") - private Double totalProduceRequestsPerSec; - - @ApiModelProperty(value="每秒流入的字节数") - private Double bytesInPerSec; - - @ApiModelProperty(value="每秒流出的字节数") - private Double bytesOutPerSec; - - @ApiModelProperty(value="每秒拒绝的字节数") - private Double bytesRejectedPerSec; - - @ApiModelProperty(value="每秒流入的消息数") - private Double messagesInPerSec; - - @ApiModelProperty(value="创建时间") - private Long gmtCreate; - - public Double getBytesInPerSec() { - return bytesInPerSec; - } - - public void setBytesInPerSec(Double bytesInPerSec) { - this.bytesInPerSec = bytesInPerSec; - } - - public Double getBytesOutPerSec() { - return bytesOutPerSec; - } - - public void setBytesOutPerSec(Double bytesOutPerSec) { - this.bytesOutPerSec = bytesOutPerSec; - } - - public Double getBytesRejectedPerSec() { - return bytesRejectedPerSec; - } - - public void setBytesRejectedPerSec(Double bytesRejectedPerSec) { - this.bytesRejectedPerSec = bytesRejectedPerSec; - } - - public Double getMessagesInPerSec() { - return messagesInPerSec; - } - - public void setMessagesInPerSec(Double messagesInPerSec) { - this.messagesInPerSec = messagesInPerSec; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Double getTotalProduceRequestsPerSec() { - return totalProduceRequestsPerSec; - } - - public void setTotalProduceRequestsPerSec(Double totalProduceRequestsPerSec) { - this.totalProduceRequestsPerSec = totalProduceRequestsPerSec; - } - - @Override - public String toString() { - return "NormalClusterMetricsVO{" + - "totalProduceRequestsPerSec=" + totalProduceRequestsPerSec + - ", bytesInPerSec=" + bytesInPerSec + - ", bytesOutPerSec=" + bytesOutPerSec + - ", bytesRejectedPerSec=" + bytesRejectedPerSec + - ", messagesInPerSec=" + messagesInPerSec + - ", gmtCreate=" + gmtCreate + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/cluster/TopicMetadataVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/cluster/TopicMetadataVO.java deleted file mode 100644 index 26965ae3..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/cluster/TopicMetadataVO.java +++ /dev/null @@ -1,55 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.cluster; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/4/29 - */ -@ApiModel(description="Topic元信息") -public class TopicMetadataVO { - @ApiModelProperty(value="Topic名称") - private String topicName; - - @ApiModelProperty(value="Topic分区列表") - private List partitionIdList; - - @ApiModelProperty(value="Topic分区数") - private Integer partitionNum; - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public List getPartitionIdList() { - return partitionIdList; - } - - public void setPartitionIdList(List partitionIdList) { - this.partitionIdList = partitionIdList; - } - - public Integer getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - @Override - public String toString() { - return "TopicMetadataVO{" + - "topicName='" + topicName + '\'' + - ", partitionIdList=" + partitionIdList + - ", partitionNum=" + partitionNum + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/consumer/ConsumerGroupDetailVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/consumer/ConsumerGroupDetailVO.java deleted file mode 100644 index d70557e2..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/consumer/ConsumerGroupDetailVO.java +++ /dev/null @@ -1,113 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 19/4/3 - */ -@ApiModel(value = "消费组的消费详情") -public class ConsumerGroupDetailVO { - @ApiModelProperty(value = "topic名称") - private String topicName; - - @ApiModelProperty(value = "消费组名称") - private String consumerGroup; - - @ApiModelProperty(value = "location") - private String location; - - @ApiModelProperty(value = "分区Id") - private Integer partitionId; - - @ApiModelProperty(value = "clientId") - private String clientId; - - @ApiModelProperty(value = "消费偏移量") - private Long consumeOffset; - - @ApiModelProperty(value = "partitionOffset") - private Long partitionOffset; - - @ApiModelProperty(value = "lag") - private Long lag; - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getConsumerGroup() { - return consumerGroup; - } - - public void setConsumerGroup(String consumerGroup) { - this.consumerGroup = consumerGroup; - } - - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - - public Integer getPartitionId() { - return partitionId; - } - - public void setPartitionId(Integer partitionId) { - this.partitionId = partitionId; - } - - public String getClientId() { - return clientId; - } - - public void setClientId(String clientId) { - this.clientId = clientId; - } - - public Long getConsumeOffset() { - return consumeOffset; - } - - public void setConsumeOffset(Long consumeOffset) { - this.consumeOffset = consumeOffset; - } - - public Long getPartitionOffset() { - return partitionOffset; - } - - public void setPartitionOffset(Long partitionOffset) { - this.partitionOffset = partitionOffset; - } - - public Long getLag() { - return lag; - } - - public void setLag(Long lag) { - this.lag = lag; - } - - @Override - public String toString() { - return "ConsumerGroupDetailVO{" + - "topicName='" + topicName + '\'' + - ", consumerGroup='" + consumerGroup + '\'' + - ", location='" + location + '\'' + - ", partitionId=" + partitionId + - ", clientId='" + clientId + '\'' + - ", consumeOffset=" + consumeOffset + - ", partitionOffset=" + partitionOffset + - ", lag=" + lag + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/consumer/ConsumerGroupSummaryVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/consumer/ConsumerGroupSummaryVO.java deleted file mode 100644 index 0049468d..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/consumer/ConsumerGroupSummaryVO.java +++ /dev/null @@ -1,67 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 21/01/14 - */ -@ApiModel(value = "Topic消费组概要信息") -public class ConsumerGroupSummaryVO { - @ApiModelProperty(value = "消费组名称") - private String consumerGroup; - - @ApiModelProperty(value = "使用的AppID") - private String appIds; - - @ApiModelProperty(value = "offset存储位置") - private String location; - - @ApiModelProperty(value = "消费组状态") - private String state; - - public String getConsumerGroup() { - return consumerGroup; - } - - public void setConsumerGroup(String consumerGroup) { - this.consumerGroup = consumerGroup; - } - - public String getAppIds() { - return appIds; - } - - public void setAppIds(String appIds) { - this.appIds = appIds; - } - - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - - public String getState() { - return state; - } - - public void setState(String state) { - this.state = state; - } - - @Override - public String toString() { - return "ConsumerGroupSummaryVO{" + - "consumerGroup='" + consumerGroup + '\'' + - ", appIds=" + appIds + - ", location='" + location + '\'' + - ", state='" + state + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/consumer/ConsumerGroupVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/consumer/ConsumerGroupVO.java deleted file mode 100644 index 9c09eb6f..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/consumer/ConsumerGroupVO.java +++ /dev/null @@ -1,54 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.consumer; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - - -/** - * @author zhongyuankai - * @date 20/4/8 - */ -@ApiModel(value = "消费组消费Topic信息") -public class ConsumerGroupVO { - @ApiModelProperty(value = "消费组名称") - private String consumerGroup; - - @ApiModelProperty(value = "使用的AppID") - private String appIds; - - @ApiModelProperty(value = "offset存储位置") - private String location; - - public String getConsumerGroup() { - return consumerGroup; - } - - public void setConsumerGroup(String consumerGroup) { - this.consumerGroup = consumerGroup; - } - - public String getAppIds() { - return appIds; - } - - public void setAppIds(String appIds) { - this.appIds = appIds; - } - - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - - @Override - public String toString() { - return "ConsumerGroupVO{" + - ", consumerGroup='" + consumerGroup + '\'' + - ", appIds='" + appIds + '\'' + - ", location='" + location + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/OrderResultVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/OrderResultVO.java deleted file mode 100644 index 67686acd..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/OrderResultVO.java +++ /dev/null @@ -1,38 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.order; - -import com.xiaojukeji.kafka.manager.common.entity.Result; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -@ApiModel(value = "工单审批结果") -public class OrderResultVO { - @ApiModelProperty(value = "工单ID") - private Long id; - - @ApiModelProperty(value = "审批结果") - private Result result; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Result getResult() { - return result; - } - - public void setResult(Result result) { - this.result = result; - } - - @Override - public String toString() { - return "OrderResultVO{" + - "id=" + id + - ", result=" + result + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/OrderTypeVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/OrderTypeVO.java deleted file mode 100644 index e75f8e8b..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/OrderTypeVO.java +++ /dev/null @@ -1,47 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.order; - -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zhongyuankai - * @date 20/4/24 - */ -public class OrderTypeVO { - @ApiModelProperty(value = "工单类型") - private Integer type; - - @ApiModelProperty(value = "描述信息") - private String message; - - public Integer getType() { - return type; - } - - public void setType(Integer type) { - this.type = type; - } - - public String getMessage() { - return message; - } - - public void setMessage(String message) { - this.message = message; - } - - public OrderTypeVO(Integer type, String message) { - this.type = type; - this.message = message; - } - - public OrderTypeVO() { - } - - @Override - public String toString() { - return "OrderTypeVO{" + - "type=" + type + - ", message='" + message + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/OrderVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/OrderVO.java deleted file mode 100644 index e8b05779..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/OrderVO.java +++ /dev/null @@ -1,112 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.order; - -import io.swagger.annotations.ApiModelProperty; - -import java.util.Date; - -/** - * @author zhongyuankai - * @date 20/4/21 - */ -public class OrderVO { - @ApiModelProperty(value = "工单ID") - private Long id; - - @ApiModelProperty(value = "工单类型, 0:topics, 1:apps, 2:quotas, 3:authorities, 4:clusters") - private Integer type; - - @ApiModelProperty(value = "工单标题") - private String title; - - @ApiModelProperty(value = "申请人") - private String applicant; - - @ApiModelProperty(value = "描述信息") - private String description; - - @ApiModelProperty(value = "工单状态, 0:待审批, 1:通过, 2:拒绝, 3:取消") - private Integer status; - - @ApiModelProperty(value = "申请时间") - private Date gmtCreate; - - @ApiModelProperty(value = "审核时间") - private Date gmtHandle; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Integer getType() { - return type; - } - - public void setType(Integer type) { - this.type = type; - } - - public String getTitle() { - return title; - } - - public void setTitle(String title) { - this.title = title; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtHandle() { - return gmtHandle; - } - - public void setGmtHandle(Date gmtHandle) { - this.gmtHandle = gmtHandle; - } - - public String getApplicant() { - return applicant; - } - - public void setApplicant(String applicant) { - this.applicant = applicant; - } - - @Override - public String toString() { - return "OrderVO{" + - "id=" + id + - ", type=" + type + - ", title='" + title + '\'' + - ", applicant='" + applicant + '\'' + - ", description='" + description + '\'' + - ", status=" + status + - ", gmtTime=" + gmtCreate + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/detail/OrderDetailBaseVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/detail/OrderDetailBaseVO.java deleted file mode 100644 index 9d5e5623..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/order/detail/OrderDetailBaseVO.java +++ /dev/null @@ -1,153 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.order.detail; - -import com.xiaojukeji.kafka.manager.common.entity.vo.common.AccountVO; -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.Date; -import java.util.List; - -/** - * @author zengqiao - * @date 20/4/21 - */ -@ApiModel(description = "工单详情类") -public class OrderDetailBaseVO { - @ApiModelProperty(value = "工单ID") - private Long id; - - @ApiModelProperty(value = "工单类型") - private Integer type; - - @ApiModelProperty(value = "工单标题") - private String title; - - @ApiModelProperty(value = "申请人") - private AccountVO applicant; - - @ApiModelProperty(value = "申请时间") - private Date gmtCreate; - - @ApiModelProperty(value = "审批人列表, 状态为未处理时返回的是审批人, 状态为处理完成时返回的是审批的人") - private List approverList; - - @ApiModelProperty(value = "审批时间") - private Date gmtHandle; - - @ApiModelProperty(value = "审批审批意见") - private String opinion; - - @ApiModelProperty(value = "工单状态, 0:待审批, 1:通过, 2:拒绝, 3:取消") - private Integer status; - - @ApiModelProperty(value = "备注") - private String description; - - @ApiModelProperty(value = "工单明细") - private T detail; - - public Integer getType() { - return type; - } - - public void setType(Integer type) { - this.type = type; - } - - public String getTitle() { - return title; - } - - public void setTitle(String title) { - this.title = title; - } - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public AccountVO getApplicant() { - return applicant; - } - - public void setApplicant(AccountVO applicant) { - this.applicant = applicant; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public List getApproverList() { - return approverList; - } - - public void setApproverList(List approverList) { - this.approverList = approverList; - } - - public Date getGmtHandle() { - return gmtHandle; - } - - public void setGmtHandle(Date gmtHandle) { - this.gmtHandle = gmtHandle; - } - - public String getOpinion() { - return opinion; - } - - public void setOpinion(String opinion) { - this.opinion = opinion; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public T getDetail() { - return detail; - } - - public void setDetail(T detail) { - this.detail = detail; - } - - @Override - public String toString() { - return "OrderDetailBaseVO{" + - "id=" + id + - ", type=" + type + - ", title='" + title + '\'' + - ", applicant=" + applicant + - ", gmtCreate=" + gmtCreate + - ", approverList=" + approverList + - ", gmtHandle=" + gmtHandle + - ", opinion='" + opinion + '\'' + - ", status=" + status + - ", description='" + description + '\'' + - ", detail=" + detail + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicAuthorizedAppVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicAuthorizedAppVO.java deleted file mode 100644 index 4979e338..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicAuthorizedAppVO.java +++ /dev/null @@ -1,125 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zhongyuankai - * @date 20/4/8 - */ -@ApiModel(value = "TopicApp信息") -public class TopicAuthorizedAppVO { - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "应用id") - private String appId; - - @ApiModelProperty(value = "应用名称") - private String appName; - - @ApiModelProperty(value = "负责人") - private String appPrincipals; - - @ApiModelProperty(value = "发送Quota(B/s)") - private Long produceQuota; - - @ApiModelProperty(value = "消费Quota(B/s)") - private Long consumerQuota; - - @ApiModelProperty(value = "生产被限流") - private Boolean produceThrottled; - - @ApiModelProperty(value = "消费被限流") - private Boolean fetchThrottled; - - @ApiModelProperty(value = "权限, 0:无权限, 1:可消费, 2:可发送, 3:可消费发送, 4:可管理") - private Integer access; - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public String getAppPrincipals() { - return appPrincipals; - } - - public void setAppPrincipals(String appPrincipals) { - this.appPrincipals = appPrincipals; - } - - public Long getProduceQuota() { - return produceQuota; - } - - public void setProduceQuota(Long produceQuota) { - this.produceQuota = produceQuota; - } - - public Long getConsumerQuota() { - return consumerQuota; - } - - public void setConsumerQuota(Long consumerQuota) { - this.consumerQuota = consumerQuota; - } - - public Boolean getProduceThrottled() { - return produceThrottled; - } - - public void setProduceThrottled(Boolean produceThrottled) { - this.produceThrottled = produceThrottled; - } - - public Boolean getFetchThrottled() { - return fetchThrottled; - } - - public void setFetchThrottled(Boolean fetchThrottled) { - this.fetchThrottled = fetchThrottled; - } - - public Integer getAccess() { - return access; - } - - public void setAccess(Integer access) { - this.access = access; - } - - @Override - public String toString() { - return "TopicAppVO{" + - "topicName='" + topicName + '\'' + - ", appId='" + appId + '\'' + - ", appName='" + appName + '\'' + - ", appPrincipals='" + appPrincipals + '\'' + - ", produceQuota=" + produceQuota + - ", consumerQuota=" + consumerQuota + - ", produceThrottled=" + produceThrottled + - ", fetchThrottled=" + fetchThrottled + - ", access=" + access + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicBasicVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicBasicVO.java deleted file mode 100644 index b200a150..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicBasicVO.java +++ /dev/null @@ -1,200 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * Topic的基本信息 - * @author zengqiao - * @date 19/4/1 - */ -@ApiModel(description = "Topic基本信息") -public class TopicBasicVO { - @ApiModelProperty(value = "集群id") - private Long clusterId; - - @ApiModelProperty(value = "应用id") - private String appId; - - @ApiModelProperty(value = "应用名称") - private String appName; - - @ApiModelProperty(value = "分区数") - private Integer partitionNum; - - @ApiModelProperty(value = "副本数") - private Integer replicaNum; - - @ApiModelProperty(value = "负责人") - private String principals; - - @ApiModelProperty(value = "存储时间(ms)") - private Long retentionTime; - - @ApiModelProperty(value = "单分区数据保存大小(Byte)") - private Long retentionBytes; - - @ApiModelProperty(value = "创建时间") - private Long createTime; - - @ApiModelProperty(value = "修改时间") - private Long modifyTime; - - @ApiModelProperty(value = "健康分") - private Integer score; - - @ApiModelProperty(value = "压缩格式") - private String topicCodeC; - - @ApiModelProperty(value = "备注") - private String description; - - @ApiModelProperty(value = "集群地址") - private String bootstrapServers; - - @ApiModelProperty(value = "所属region") - private List regionNameList; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public Integer getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - public Integer getReplicaNum() { - return replicaNum; - } - - public void setReplicaNum(Integer replicaNum) { - this.replicaNum = replicaNum; - } - - public String getPrincipals() { - return principals; - } - - public void setPrincipals(String principals) { - this.principals = principals; - } - - public Long getRetentionTime() { - return retentionTime; - } - - public void setRetentionTime(Long retentionTime) { - this.retentionTime = retentionTime; - } - - public Long getRetentionBytes() { - return retentionBytes; - } - - public void setRetentionBytes(Long retentionBytes) { - this.retentionBytes = retentionBytes; - } - - public Long getCreateTime() { - return createTime; - } - - public void setCreateTime(Long createTime) { - this.createTime = createTime; - } - - public Long getModifyTime() { - return modifyTime; - } - - public void setModifyTime(Long modifyTime) { - this.modifyTime = modifyTime; - } - - public Integer getScore() { - return score; - } - - public void setScore(Integer score) { - this.score = score; - } - - public String getTopicCodeC() { - return topicCodeC; - } - - public void setTopicCodeC(String topicCodeC) { - this.topicCodeC = topicCodeC; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public String getBootstrapServers() { - return bootstrapServers; - } - - public void setBootstrapServers(String bootstrapServers) { - this.bootstrapServers = bootstrapServers; - } - - public List getRegionNameList() { - return regionNameList; - } - - public void setRegionNameList(List regionNameList) { - this.regionNameList = regionNameList; - } - - @Override - public String toString() { - return "TopicBasicVO{" + - "clusterId=" + clusterId + - ", appId='" + appId + '\'' + - ", appName='" + appName + '\'' + - ", partitionNum=" + partitionNum + - ", replicaNum=" + replicaNum + - ", principals='" + principals + '\'' + - ", retentionTime=" + retentionTime + - ", retentionBytes=" + retentionBytes + - ", createTime=" + createTime + - ", modifyTime=" + modifyTime + - ", score=" + score + - ", topicCodeC='" + topicCodeC + '\'' + - ", description='" + description + '\'' + - ", bootstrapServers='" + bootstrapServers + '\'' + - ", regionNameList=" + regionNameList + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicBillVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicBillVO.java deleted file mode 100644 index 64e62adb..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicBillVO.java +++ /dev/null @@ -1,53 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/5/11 - */ -@ApiModel(value = "Topic账单") -public class TopicBillVO { - @ApiModelProperty(value = "配额") - private Long quota; - - @ApiModelProperty(value = "金额") - private Double cost; - - @ApiModelProperty(value = "月份") - private String gmtMonth; - - public Long getQuota() { - return quota; - } - - public void setQuota(Long quota) { - this.quota = quota; - } - - public Double getCost() { - return cost; - } - - public void setCost(Double cost) { - this.cost = cost; - } - - public String getGmtMonth() { - return gmtMonth; - } - - public void setGmtMonth(String gmtMonth) { - this.gmtMonth = gmtMonth; - } - - @Override - public String toString() { - return "TopicBillVO{" + - "quota=" + quota + - ", cost=" + cost + - ", gmtMonth='" + gmtMonth + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicBrokerRequestTimeVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicBrokerRequestTimeVO.java deleted file mode 100644 index 486dcc61..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicBrokerRequestTimeVO.java +++ /dev/null @@ -1,39 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -/** - * author: mrazkonglingxu - * Date: 2020/12/7 - * Time: 7:40 下午 - */ -public class TopicBrokerRequestTimeVO { - - private Long clusterId; - - private Integer brokerId; - - private TopicRequestTimeDetailVO brokerRequestTime; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public TopicRequestTimeDetailVO getBrokerRequestTime() { - return brokerRequestTime; - } - - public void setBrokerRequestTime(TopicRequestTimeDetailVO brokerRequestTime) { - this.brokerRequestTime = brokerRequestTime; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicConnectionVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicConnectionVO.java deleted file mode 100644 index e6d65b91..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicConnectionVO.java +++ /dev/null @@ -1,101 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zhongyuankai,zengqiao - * @date 20/4/8 - */ -@ApiModel(value = "Topic连接信息") -public class TopicConnectionVO { - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "topic名称") - private String topicName; - - @ApiModelProperty(value = "AppID") - private String appId; - - @ApiModelProperty(value = "ip") - private String ip; - - @ApiModelProperty(value = "主机名") - private String hostname; - - @ApiModelProperty(value = "客户端类型[consume|produce]") - private String clientType; - - @ApiModelProperty(value = "客户端版本") - private String clientVersion; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getIp() { - return ip; - } - - public void setIp(String ip) { - this.ip = ip; - } - - public String getHostname() { - return hostname; - } - - public void setHostname(String hostname) { - this.hostname = hostname; - } - - public String getClientType() { - return clientType; - } - - public void setClientType(String clientType) { - this.clientType = clientType; - } - - public String getClientVersion() { - return clientVersion; - } - - public void setClientVersion(String clientVersion) { - this.clientVersion = clientVersion; - } - - @Override - public String toString() { - return "TopicConnectionVO{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", appId='" + appId + '\'' + - ", ip='" + ip + '\'' + - ", hostname='" + hostname + '\'' + - ", clientType='" + clientType + '\'' + - ", clientVersion='" + clientVersion + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicDataSampleVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicDataSampleVO.java deleted file mode 100644 index d4d941f8..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicDataSampleVO.java +++ /dev/null @@ -1,29 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 19/4/3 - */ -@ApiModel(value = "Topic采样数据") -public class TopicDataSampleVO { - @ApiModelProperty(value = "Topic数据") - private String value; - - public String getValue() { - return value; - } - - public void setValue(String value) { - this.value = value; - } - - @Override - public String toString() { - return "TopicSampleVO{" + - "value='" + value + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicDeleteVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicDeleteVO.java deleted file mode 100644 index 5f7069ba..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicDeleteVO.java +++ /dev/null @@ -1,74 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.io.Serializable; - -/** - * @author zengqiao - * @date 19/7/8 - */ -@ApiModel(value = "Topic删除结果") -public class TopicDeleteVO implements Serializable { - @ApiModelProperty(value = "集群Id") - private Long clusterId; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "删除信息") - private String message; - - @ApiModelProperty(value = "删除code") - private Integer code; - - public TopicDeleteVO(Long clusterId, String topicName, String message, Integer code) { - this.clusterId = clusterId; - this.topicName = topicName; - this.message = message; - this.code = code; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getMessage() { - return message; - } - - public void setMessage(String message) { - this.message = message; - } - - public Integer getCode() { - return code; - } - - public void setCode(Integer code) { - this.code = code; - } - - @Override - public String toString() { - return "TopicDeleteInfoVO{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", message='" + message + '\'' + - ", code='" + code + '\'' + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicDetailVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicDetailVO.java deleted file mode 100644 index 3708eaf5..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicDetailVO.java +++ /dev/null @@ -1,120 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; - -import java.util.List; - -/** - * @author zengqiao - * @date 19/7/12 - */ -@ApiModel(value = "TopicDetailVO", description = "Topic详情") -public class TopicDetailVO { - private Long clusterId; - - private String topicName; - - private List principalList; - - private String description; - - private Long retentionTime; - - private String properties; - - private Integer replicaNum; - - private Integer partitionNum; - - private Long gmtCreate; - - private Long gmtModify; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public List getPrincipalList() { - return principalList; - } - - public void setPrincipalList(List principalList) { - this.principalList = principalList; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Long getRetentionTime() { - return retentionTime; - } - - public void setRetentionTime(Long retentionTime) { - this.retentionTime = retentionTime; - } - - public String getProperties() { - return properties; - } - - public void setProperties(String properties) { - this.properties = properties; - } - - public void setReplicaNum(Integer replicaNum) { - this.replicaNum = replicaNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Long getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Long gmtModify) { - this.gmtModify = gmtModify; - } - - @Override - public String toString() { - return "TopicDetailVO{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", principalList=" + principalList + - ", description='" + description + '\'' + - ", retentionTime=" + retentionTime + - ", properties='" + properties + '\'' + - ", replicaNum=" + replicaNum + - ", partitionNum=" + partitionNum + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicExpiredVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicExpiredVO.java deleted file mode 100644 index 79fdcfd3..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicExpiredVO.java +++ /dev/null @@ -1,101 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/3/31 - */ -@ApiModel(value = "过期Topic") -public class TopicExpiredVO { - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "集群名称") - private String clusterName; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "AppID") - private String appId; - - @ApiModelProperty(value = "App名称") - private String appName; - - @ApiModelProperty(value = "App负责人") - private String appPrincipals; - - @ApiModelProperty(value = "消费连接个数") - private Integer fetchConnectionNum; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public String getAppPrincipals() { - return appPrincipals; - } - - public void setAppPrincipals(String appPrincipals) { - this.appPrincipals = appPrincipals; - } - - public Integer getFetchConnectionNum() { - return fetchConnectionNum; - } - - public void setFetchConnectionNum(Integer fetchConnectionNum) { - this.fetchConnectionNum = fetchConnectionNum; - } - - @Override - public String toString() { - return "TopicExpiredVO{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", appId='" + appId + '\'' + - ", appName='" + appName + '\'' + - ", appPrincipals='" + appPrincipals + '\'' + - ", fetchConnectionNum=" + fetchConnectionNum + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicMetricVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicMetricVO.java deleted file mode 100644 index 1e5b6a46..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicMetricVO.java +++ /dev/null @@ -1,149 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author huangyiminghappy@163.com - * @date 20/4/2 - */ -@ApiModel(value = "Topic流量信息") -public class TopicMetricVO { - @ApiModelProperty(value = "每秒流入消息数") - private Object messagesInPerSec; - - @ApiModelProperty(value = "每秒流入字节数") - private Object bytesInPerSec; - - @ApiModelProperty(value = "每秒流出字节数") - private Object bytesOutPerSec; - - @ApiModelProperty(value = "每秒拒绝字节数") - private Object bytesRejectedPerSec; - - @ApiModelProperty(value = "每秒请求数") - private Object totalProduceRequestsPerSec; - - @ApiModelProperty(value = "appId维度每秒流入消息数") - private Object appIdMessagesInPerSec; - - @ApiModelProperty(value = "appId维度每秒流入字节数") - private Object appIdBytesInPerSec; - - @ApiModelProperty(value = "appId维度每秒流出字节数") - private Object appIdBytesOutPerSec; - - @ApiModelProperty(value = "produce限流") - private Boolean produceThrottled; - - @ApiModelProperty(value = "consume限流") - private Boolean consumeThrottled; - - @ApiModelProperty(value = "创建时间") - private Long gmtCreate; - - public Object getMessagesInPerSec() { - return messagesInPerSec; - } - - public void setMessagesInPerSec(Object messagesInPerSec) { - this.messagesInPerSec = messagesInPerSec; - } - - public Object getBytesInPerSec() { - return bytesInPerSec; - } - - public void setBytesInPerSec(Object bytesInPerSec) { - this.bytesInPerSec = bytesInPerSec; - } - - public Object getBytesOutPerSec() { - return bytesOutPerSec; - } - - public void setBytesOutPerSec(Object bytesOutPerSec) { - this.bytesOutPerSec = bytesOutPerSec; - } - - public Object getBytesRejectedPerSec() { - return bytesRejectedPerSec; - } - - public void setBytesRejectedPerSec(Object bytesRejectedPerSec) { - this.bytesRejectedPerSec = bytesRejectedPerSec; - } - - public Object getTotalProduceRequestsPerSec() { - return totalProduceRequestsPerSec; - } - - public void setTotalProduceRequestsPerSec(Object totalProduceRequestsPerSec) { - this.totalProduceRequestsPerSec = totalProduceRequestsPerSec; - } - - public Object getAppIdMessagesInPerSec() { - return appIdMessagesInPerSec; - } - - public void setAppIdMessagesInPerSec(Object appIdMessagesInPerSec) { - this.appIdMessagesInPerSec = appIdMessagesInPerSec; - } - - public Object getAppIdBytesInPerSec() { - return appIdBytesInPerSec; - } - - public void setAppIdBytesInPerSec(Object appIdBytesInPerSec) { - this.appIdBytesInPerSec = appIdBytesInPerSec; - } - - public Object getAppIdBytesOutPerSec() { - return appIdBytesOutPerSec; - } - - public void setAppIdBytesOutPerSec(Object appIdBytesOutPerSec) { - this.appIdBytesOutPerSec = appIdBytesOutPerSec; - } - - public Boolean getProduceThrottled() { - return produceThrottled; - } - - public void setProduceThrottled(Boolean produceThrottled) { - this.produceThrottled = produceThrottled; - } - - public Boolean getConsumeThrottled() { - return consumeThrottled; - } - - public void setConsumeThrottled(Boolean consumeThrottled) { - this.consumeThrottled = consumeThrottled; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - @Override - public String toString() { - return "TopicMetricVO{" + - "messagesInPerSec=" + messagesInPerSec + - ", bytesInPerSec=" + bytesInPerSec + - ", bytesOutPerSec=" + bytesOutPerSec + - ", bytesRejectedPerSec=" + bytesRejectedPerSec + - ", totalProduceRequestsPerSec=" + totalProduceRequestsPerSec + - ", appIdMessagesInPerSec=" + appIdMessagesInPerSec + - ", appIdBytesInPerSec=" + appIdBytesInPerSec + - ", appIdBytesOutPerSec=" + appIdBytesOutPerSec + - ", produceThrottled=" + produceThrottled + - ", consumeThrottled=" + consumeThrottled + - ", gmtCreate=" + gmtCreate + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicMineVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicMineVO.java deleted file mode 100644 index 2e4665a1..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicMineVO.java +++ /dev/null @@ -1,136 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/3/31 - */ -@ApiModel(description = "Topic信息") -public class TopicMineVO { - @ApiModelProperty(value = "逻辑集群ID") - private Long clusterId; - - @ApiModelProperty(value = "逻辑集群名称") - private String clusterName; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "流入流量(B/s)") - private Object bytesIn; - - @ApiModelProperty(value = "流出流量(B/s)") - private Object bytesOut; - - @ApiModelProperty(value = "AppID") - private String appId; - - @ApiModelProperty(value = "App名称") - private String appName; - - @ApiModelProperty(value = "App负责人") - private String appPrincipals; - - @ApiModelProperty(value = "状态, 0:无权限, 1:可消费 2:可发送 3:可消费发送 4:可管理") - private Integer access; - - @ApiModelProperty(value = "备注") - private String description; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Object getBytesIn() { - return bytesIn; - } - - public void setBytesIn(Object bytesIn) { - this.bytesIn = bytesIn; - } - - public Object getBytesOut() { - return bytesOut; - } - - public void setBytesOut(Object bytesOut) { - this.bytesOut = bytesOut; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public String getAppPrincipals() { - return appPrincipals; - } - - public void setAppPrincipals(String appPrincipals) { - this.appPrincipals = appPrincipals; - } - - public Integer getAccess() { - return access; - } - - public void setAccess(Integer access) { - this.access = access; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - @Override - public String toString() { - return "TopicMineVO{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", bytesIn=" + bytesIn + - ", bytesOut=" + bytesOut + - ", appId='" + appId + '\'' + - ", appName='" + appName + '\'' + - ", appPrincipals='" + appPrincipals + '\'' + - ", access=" + access + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicMyAppVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicMyAppVO.java deleted file mode 100644 index c89ba73d..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicMyAppVO.java +++ /dev/null @@ -1,89 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/9/16 - */ -@ApiModel(value = "我的应用对Topic的信息") -public class TopicMyAppVO { - @ApiModelProperty(value = "应用id") - private String appId; - - @ApiModelProperty(value = "应用名称") - private String appName; - - @ApiModelProperty(value = "负责人") - private String appPrincipals; - - @ApiModelProperty(value = "发送Quota(B/s)") - private Long produceQuota; - - @ApiModelProperty(value = "消费Quota(B/s)") - private Long consumerQuota; - - @ApiModelProperty(value = "权限, 0:无权限, 1:可消费, 2:可发送, 3:可消费发送") - private Integer access; - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public String getAppPrincipals() { - return appPrincipals; - } - - public void setAppPrincipals(String appPrincipals) { - this.appPrincipals = appPrincipals; - } - - public Long getProduceQuota() { - return produceQuota; - } - - public void setProduceQuota(Long produceQuota) { - this.produceQuota = produceQuota; - } - - public Long getConsumerQuota() { - return consumerQuota; - } - - public void setConsumerQuota(Long consumerQuota) { - this.consumerQuota = consumerQuota; - } - - public Integer getAccess() { - return access; - } - - public void setAccess(Integer access) { - this.access = access; - } - - @Override - public String toString() { - return "TopicMyAppVO{" + - "appId='" + appId + '\'' + - ", appName='" + appName + '\'' + - ", appPrincipals='" + appPrincipals + '\'' + - ", produceQuota=" + produceQuota + - ", consumerQuota=" + consumerQuota + - ", access=" + access + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicOffsetVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicOffsetVO.java deleted file mode 100644 index 9313ef30..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicOffsetVO.java +++ /dev/null @@ -1,85 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author huangyiminghappy@163.com - * @date 2019-03-26 - */ -@ApiModel(value = "TopicOffset信息") -public class TopicOffsetVO { - @ApiModelProperty(value = "集群id") - private Long clusterId; - - @ApiModelProperty(value = "topic名字") - private String topicName; - - @ApiModelProperty(value = "分区编号") - private Integer partitionId; - - @ApiModelProperty(value = "分区offset") - private Long offset; - - @ApiModelProperty(value = "该offset对应的时间") - private Long timestamp; - - public TopicOffsetVO(Long clusterId, String topicName, Integer partitionId, Long offset, Long timestamp) { - this.clusterId = clusterId; - this.topicName = topicName; - this.partitionId = partitionId; - this.offset = offset; - this.timestamp = timestamp; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getPartitionId() { - return partitionId; - } - - public void setPartitionId(Integer partitionId) { - this.partitionId = partitionId; - } - - public Long getOffset() { - return offset; - } - - public void setOffset(Long offset) { - this.offset = offset; - } - - public Long getTimestamp() { - return timestamp; - } - - public void setTimestamp(Long timestamp) { - this.timestamp = timestamp; - } - - @Override - public String toString() { - return "TopicOffsetVO{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", partitionId=" + partitionId + - ", offset=" + offset + - ", timestamp=" + timestamp + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicPartitionVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicPartitionVO.java deleted file mode 100644 index a66771e5..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicPartitionVO.java +++ /dev/null @@ -1,139 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author arthur - * @date 2017/6/6. - */ -@ApiModel(value = "分区信息") -public class TopicPartitionVO { - @ApiModelProperty(value = "分区ID") - private Integer partitionId; - - @ApiModelProperty(value = "起始偏移") - private Long beginningOffset; - - @ApiModelProperty(value = "结尾偏移") - private Long endOffset; - - @ApiModelProperty(value = "消息条数") - private Long msgNum; - - @ApiModelProperty(value = "Leader副本") - private Integer leaderBrokerId; - - @ApiModelProperty(value = "首选副本") - private Integer preferredBrokerId; - - @ApiModelProperty(value = "replicas") - private List replicaBrokerIdList; - - @ApiModelProperty(value = "ISR") - private List isrBrokerIdList; - - @ApiModelProperty(value = "True:未同步, False:已同步") - private Boolean underReplicated; - - @ApiModelProperty(value = "Leader副本的大小(B)") - private Long logSize; - - public Integer getPartitionId() { - return partitionId; - } - - public void setPartitionId(Integer partitionId) { - this.partitionId = partitionId; - } - - public Long getBeginningOffset() { - return beginningOffset; - } - - public void setBeginningOffset(Long beginningOffset) { - this.beginningOffset = beginningOffset; - } - - public Long getEndOffset() { - return endOffset; - } - - public void setEndOffset(Long endOffset) { - this.endOffset = endOffset; - } - - public Long getMsgNum() { - return msgNum; - } - - public void setMsgNum(Long msgNum) { - this.msgNum = msgNum; - } - - public Integer getLeaderBrokerId() { - return leaderBrokerId; - } - - public void setLeaderBrokerId(Integer leaderBrokerId) { - this.leaderBrokerId = leaderBrokerId; - } - - public Integer getPreferredBrokerId() { - return preferredBrokerId; - } - - public void setPreferredBrokerId(Integer preferredBrokerId) { - this.preferredBrokerId = preferredBrokerId; - } - - public List getReplicaBrokerIdList() { - return replicaBrokerIdList; - } - - public void setReplicaBrokerIdList(List replicaBrokerIdList) { - this.replicaBrokerIdList = replicaBrokerIdList; - } - - public List getIsrBrokerIdList() { - return isrBrokerIdList; - } - - public void setIsrBrokerIdList(List isrBrokerIdList) { - this.isrBrokerIdList = isrBrokerIdList; - } - - public Boolean getUnderReplicated() { - return underReplicated; - } - - public void setUnderReplicated(Boolean underReplicated) { - this.underReplicated = underReplicated; - } - - public Long getLogSize() { - return logSize; - } - - public void setLogSize(Long logSize) { - this.logSize = logSize; - } - - @Override - public String toString() { - return "TopicPartitionVO{" + - "partitionId=" + partitionId + - ", beginningOffset=" + beginningOffset + - ", endOffset=" + endOffset + - ", msgNum=" + msgNum + - ", leaderBrokerId=" + leaderBrokerId + - ", preferredBrokerId=" + preferredBrokerId + - ", replicaBrokerIdList=" + replicaBrokerIdList + - ", isrBrokerIdList=" + isrBrokerIdList + - ", underReplicated=" + underReplicated + - ", logSize=" + logSize + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicRequestTimeDetailVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicRequestTimeDetailVO.java deleted file mode 100644 index 346e2383..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicRequestTimeDetailVO.java +++ /dev/null @@ -1,125 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/4/8 - */ -@ApiModel(value = "Topic请求耗时详情") -public class TopicRequestTimeDetailVO { - @ApiModelProperty(value = "请求耗时类型") - private String requestTimeType; - - @ApiModelProperty(value = "responseQueueTimeMs") - private Object responseQueueTimeMs; - - @ApiModelProperty(value = "localTimeMs") - private Object localTimeMs; - - @ApiModelProperty(value = "requestQueueTimeMs") - private Object requestQueueTimeMs; - - @ApiModelProperty(value = "throttleTimeMs") - private Object throttleTimeMs; - - @ApiModelProperty(value = "responseSendTimeMs") - private Object responseSendTimeMs; - - @ApiModelProperty(value = "remoteTimeMs") - private Object remoteTimeMs; - - @ApiModelProperty(value = "totalTimeMs") - private Object totalTimeMs; - - private List brokerRequestTimeList; - - public String getRequestTimeType() { - return requestTimeType; - } - - public void setRequestTimeType(String requestTimeType) { - this.requestTimeType = requestTimeType; - } - - public Object getResponseQueueTimeMs() { - return responseQueueTimeMs; - } - - public void setResponseQueueTimeMs(Object responseQueueTimeMs) { - this.responseQueueTimeMs = responseQueueTimeMs; - } - - public Object getLocalTimeMs() { - return localTimeMs; - } - - public void setLocalTimeMs(Object localTimeMs) { - this.localTimeMs = localTimeMs; - } - - public Object getRequestQueueTimeMs() { - return requestQueueTimeMs; - } - - public void setRequestQueueTimeMs(Object requestQueueTimeMs) { - this.requestQueueTimeMs = requestQueueTimeMs; - } - - public Object getThrottleTimeMs() { - return throttleTimeMs; - } - - public void setThrottleTimeMs(Object throttleTimeMs) { - this.throttleTimeMs = throttleTimeMs; - } - - public Object getResponseSendTimeMs() { - return responseSendTimeMs; - } - - public void setResponseSendTimeMs(Object responseSendTimeMs) { - this.responseSendTimeMs = responseSendTimeMs; - } - - public Object getRemoteTimeMs() { - return remoteTimeMs; - } - - public void setRemoteTimeMs(Object remoteTimeMs) { - this.remoteTimeMs = remoteTimeMs; - } - - public Object getTotalTimeMs() { - return totalTimeMs; - } - - public void setTotalTimeMs(Object totalTimeMs) { - this.totalTimeMs = totalTimeMs; - } - - public List getBrokerRequestTimeList() { - return brokerRequestTimeList; - } - - public void setBrokerRequestTimeList(List brokerRequestTimeList) { - this.brokerRequestTimeList = brokerRequestTimeList; - } - - @Override - public String toString() { - return "TopicRequestTimeDetailVO{" + - "requestTimeType='" + requestTimeType + '\'' + - ", responseQueueTimeMs=" + responseQueueTimeMs + - ", localTimeMs=" + localTimeMs + - ", requestQueueTimeMs=" + requestQueueTimeMs + - ", throttleTimeMs=" + throttleTimeMs + - ", responseSendTimeMs=" + responseSendTimeMs + - ", remoteTimeMs=" + remoteTimeMs + - ", totalTimeMs=" + totalTimeMs + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicRequestTimeVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicRequestTimeVO.java deleted file mode 100644 index dfce225d..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicRequestTimeVO.java +++ /dev/null @@ -1,149 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/7 - */ -@ApiModel(value = "Topic请求耗时信息") -public class TopicRequestTimeVO { - @ApiModelProperty(value = "produce请求平均耗时") - private Object produceRequestTimeMean; - - @ApiModelProperty(value = "produce请求50分位耗时") - private Object produceRequestTime50thPercentile; - - @ApiModelProperty(value = "produce请求75分位耗时") - private Object produceRequestTime75thPercentile; - - @ApiModelProperty(value = "produce请求95分位耗时") - private Object produceRequestTime95thPercentile; - - @ApiModelProperty(value = "produce请求99分位耗时") - private Object produceRequestTime99thPercentile; - - @ApiModelProperty(value = "fetch请求平均耗时") - private Object fetchRequestTimeMean; - - @ApiModelProperty(value = "fetch请求50分位耗时") - private Object fetchRequestTime50thPercentile; - - @ApiModelProperty(value = "fetch请求75分位耗时") - private Object fetchRequestTime75thPercentile; - - @ApiModelProperty(value = "fetch请求95分位耗时") - private Object fetchRequestTime95thPercentile; - - @ApiModelProperty(value = "fetch请求99分位耗时") - private Object fetchRequestTime99thPercentile; - - @ApiModelProperty(value = "创建时间") - private Object gmtCreate; - - public Object getProduceRequestTimeMean() { - return produceRequestTimeMean; - } - - public void setProduceRequestTimeMean(Object produceRequestTimeMean) { - this.produceRequestTimeMean = produceRequestTimeMean; - } - - public Object getProduceRequestTime50thPercentile() { - return produceRequestTime50thPercentile; - } - - public void setProduceRequestTime50thPercentile(Object produceRequestTime50thPercentile) { - this.produceRequestTime50thPercentile = produceRequestTime50thPercentile; - } - - public Object getProduceRequestTime75thPercentile() { - return produceRequestTime75thPercentile; - } - - public void setProduceRequestTime75thPercentile(Object produceRequestTime75thPercentile) { - this.produceRequestTime75thPercentile = produceRequestTime75thPercentile; - } - - public Object getProduceRequestTime95thPercentile() { - return produceRequestTime95thPercentile; - } - - public void setProduceRequestTime95thPercentile(Object produceRequestTime95thPercentile) { - this.produceRequestTime95thPercentile = produceRequestTime95thPercentile; - } - - public Object getProduceRequestTime99thPercentile() { - return produceRequestTime99thPercentile; - } - - public void setProduceRequestTime99thPercentile(Object produceRequestTime99thPercentile) { - this.produceRequestTime99thPercentile = produceRequestTime99thPercentile; - } - - public Object getFetchRequestTimeMean() { - return fetchRequestTimeMean; - } - - public void setFetchRequestTimeMean(Object fetchRequestTimeMean) { - this.fetchRequestTimeMean = fetchRequestTimeMean; - } - - public Object getFetchRequestTime50thPercentile() { - return fetchRequestTime50thPercentile; - } - - public void setFetchRequestTime50thPercentile(Object fetchRequestTime50thPercentile) { - this.fetchRequestTime50thPercentile = fetchRequestTime50thPercentile; - } - - public Object getFetchRequestTime75thPercentile() { - return fetchRequestTime75thPercentile; - } - - public void setFetchRequestTime75thPercentile(Object fetchRequestTime75thPercentile) { - this.fetchRequestTime75thPercentile = fetchRequestTime75thPercentile; - } - - public Object getFetchRequestTime95thPercentile() { - return fetchRequestTime95thPercentile; - } - - public void setFetchRequestTime95thPercentile(Object fetchRequestTime95thPercentile) { - this.fetchRequestTime95thPercentile = fetchRequestTime95thPercentile; - } - - public Object getFetchRequestTime99thPercentile() { - return fetchRequestTime99thPercentile; - } - - public void setFetchRequestTime99thPercentile(Object fetchRequestTime99thPercentile) { - this.fetchRequestTime99thPercentile = fetchRequestTime99thPercentile; - } - - public Object getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Object gmtCreate) { - this.gmtCreate = gmtCreate; - } - - @Override - public String toString() { - return "TopicRequestTimeVO{" + - "produceRequestTimeMean=" + produceRequestTimeMean + - ", produceRequestTime50thPercentile=" + produceRequestTime50thPercentile + - ", produceRequestTime75thPercentile=" + produceRequestTime75thPercentile + - ", produceRequestTime95thPercentile=" + produceRequestTime95thPercentile + - ", produceRequestTime99thPercentile=" + produceRequestTime99thPercentile + - ", fetchRequestTimeMean=" + fetchRequestTimeMean + - ", fetchRequestTime50thPercentile=" + fetchRequestTime50thPercentile + - ", fetchRequestTime75thPercentile=" + fetchRequestTime75thPercentile + - ", fetchRequestTime95thPercentile=" + fetchRequestTime95thPercentile + - ", fetchRequestTime99thPercentile=" + fetchRequestTime99thPercentile + - ", gmtCreate=" + gmtCreate + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicStatisticMetricsVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicStatisticMetricsVO.java deleted file mode 100644 index c83c24d2..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicStatisticMetricsVO.java +++ /dev/null @@ -1,33 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/8/14 - */ -@ApiModel(description="Topic流量统计信息") -public class TopicStatisticMetricsVO { - @ApiModelProperty(value="峰值流入流量(B/s)") - private Double peakBytesIn; - - public TopicStatisticMetricsVO(Double peakBytesIn) { - this.peakBytesIn = peakBytesIn; - } - - public Double getPeakBytesIn() { - return peakBytesIn; - } - - public void setPeakBytesIn(Double peakBytesIn) { - this.peakBytesIn = peakBytesIn; - } - - @Override - public String toString() { - return "TopicStatisticMetricsVO{" + - "peakBytesIn=" + peakBytesIn + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicVO.java deleted file mode 100644 index afb99025..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/normal/topic/TopicVO.java +++ /dev/null @@ -1,113 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.normal.topic; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/8 - */ -@ApiModel(value = "Topic信息") -public class TopicVO { - @ApiModelProperty(value = "逻辑集群ID") - private Long clusterId; - - @ApiModelProperty(value = "逻辑集群名称") - private String clusterName; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "Topic描述") - private String description; - - @ApiModelProperty(value = "AppID") - private String appId; - - @ApiModelProperty(value = "App名称") - private String appName; - - @ApiModelProperty(value = "App负责人") - private String appPrincipals; - - @ApiModelProperty(value = "需要鉴权, true:是 false:否") - private Boolean needAuth; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public String getAppPrincipals() { - return appPrincipals; - } - - public void setAppPrincipals(String appPrincipals) { - this.appPrincipals = appPrincipals; - } - - public Boolean getNeedAuth() { - return needAuth; - } - - public void setNeedAuth(Boolean needAuth) { - this.needAuth = needAuth; - } - - @Override - public String toString() { - return "TopicVO{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", description='" + description + '\'' + - ", appId='" + appId + '\'' + - ", appName='" + appName + '\'' + - ", appPrincipals='" + appPrincipals + '\'' + - ", needAuth=" + needAuth + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/expert/AnomalyFlowTopicVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/expert/AnomalyFlowTopicVO.java deleted file mode 100644 index d057c913..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/expert/AnomalyFlowTopicVO.java +++ /dev/null @@ -1,101 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.op.expert; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/3/30 - */ -@ApiModel(description = "流量异常Topic") -public class AnomalyFlowTopicVO { - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "集群名称") - private String clusterName; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "bytesIn(B/s)") - private Double bytesIn; - - @ApiModelProperty(value = "bytesIn增加(B/s)") - private Double bytesInIncr; - - @ApiModelProperty(value = "iops(Q/s)") - private Double iops; - - @ApiModelProperty(value = "iops增加(Q/s)") - private Double iopsIncr; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Double getBytesIn() { - return bytesIn; - } - - public void setBytesIn(Double bytesIn) { - this.bytesIn = bytesIn; - } - - public Double getBytesInIncr() { - return bytesInIncr; - } - - public void setBytesInIncr(Double bytesInIncr) { - this.bytesInIncr = bytesInIncr; - } - - public Double getIops() { - return iops; - } - - public void setIops(Double iops) { - this.iops = iops; - } - - public Double getIopsIncr() { - return iopsIncr; - } - - public void setIopsIncr(Double iopsIncr) { - this.iopsIncr = iopsIncr; - } - - @Override - public String toString() { - return "AnomalyFlowTopic{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", bytesIn=" + bytesIn + - ", bytesInIncr=" + bytesInIncr + - ", iops=" + iops + - ", iopsIncr=" + iopsIncr + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/expert/BrokerIdPartitionNumVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/expert/BrokerIdPartitionNumVO.java deleted file mode 100644 index 413aa6a4..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/expert/BrokerIdPartitionNumVO.java +++ /dev/null @@ -1,41 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.op.expert; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/5/15 - */ -@ApiModel(description = "Region热点Topic") -public class BrokerIdPartitionNumVO { - @ApiModelProperty(value = "BrokerId") - private Integer brokeId; - - @ApiModelProperty(value = "分区数") - private Integer partitionNum; - - public Integer getBrokeId() { - return brokeId; - } - - public void setBrokeId(Integer brokeId) { - this.brokeId = brokeId; - } - - public Integer getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - @Override - public String toString() { - return "BrokerIdPartitionNumVO{" + - "brokeId=" + brokeId + - ", partitionNum=" + partitionNum + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/expert/ExpiredTopicVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/expert/ExpiredTopicVO.java deleted file mode 100644 index c4921259..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/expert/ExpiredTopicVO.java +++ /dev/null @@ -1,111 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.op.expert; - -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/3/30 - */ -public class ExpiredTopicVO { - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "集群名称") - private String clusterName; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "过期天数") - private Integer expiredDay; - - @ApiModelProperty(value = "App名称") - private String appName; - - @ApiModelProperty(value = "AppID") - private String appId; - - @ApiModelProperty(value = "负责人") - private String principals; - - @ApiModelProperty(value = "状态, -1:已通知可下线, 0:过期待通知, 1+:已通知待反馈") - private Integer status; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getExpiredDay() { - return expiredDay; - } - - public void setExpiredDay(Integer expiredDay) { - this.expiredDay = expiredDay; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getPrincipals() { - return principals; - } - - public void setPrincipals(String principals) { - this.principals = principals; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - @Override - public String toString() { - return "ExpiredTopicVO{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", expiredDay=" + expiredDay + - ", appName='" + appName + '\'' + - ", appId='" + appId + '\'' + - ", principals='" + principals + '\'' + - ", status=" + status + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/expert/PartitionInsufficientTopicVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/expert/PartitionInsufficientTopicVO.java deleted file mode 100644 index fd135052..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/expert/PartitionInsufficientTopicVO.java +++ /dev/null @@ -1,127 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.op.expert; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/3/30 - */ -@ApiModel(description = "分区不足Topic") -public class PartitionInsufficientTopicVO { - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "集群名称") - private String clusterName; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "Region名称") - private String regionName; - - @ApiModelProperty(value = "Topic所属BrokerId列表") - private List brokerIdList; - - @ApiModelProperty(value = "当前分区数") - private Integer presentPartitionNum; - - @ApiModelProperty(value = "建议分区数") - private Integer suggestedPartitionNum; - - @ApiModelProperty(value = "单分区流量(B/s)") - private Double bytesInPerPartition; - - @ApiModelProperty(value = "今天,昨天,前天的峰值均值流入流量(B/s)") - private List maxAvgBytesInList; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getRegionName() { - return regionName; - } - - public void setRegionName(String regionName) { - this.regionName = regionName; - } - - public List getBrokerIdList() { - return brokerIdList; - } - - public void setBrokerIdList(List brokerIdList) { - this.brokerIdList = brokerIdList; - } - - public Integer getPresentPartitionNum() { - return presentPartitionNum; - } - - public void setPresentPartitionNum(Integer presentPartitionNum) { - this.presentPartitionNum = presentPartitionNum; - } - - public Integer getSuggestedPartitionNum() { - return suggestedPartitionNum; - } - - public void setSuggestedPartitionNum(Integer suggestedPartitionNum) { - this.suggestedPartitionNum = suggestedPartitionNum; - } - - public Double getBytesInPerPartition() { - return bytesInPerPartition; - } - - public void setBytesInPerPartition(Double bytesInPerPartition) { - this.bytesInPerPartition = bytesInPerPartition; - } - - public List getMaxAvgBytesInList() { - return maxAvgBytesInList; - } - - public void setMaxAvgBytesInList(List maxAvgBytesInList) { - this.maxAvgBytesInList = maxAvgBytesInList; - } - - @Override - public String toString() { - return "PartitionInsufficientTopicVO{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", regionName='" + regionName + '\'' + - ", brokerIdList=" + brokerIdList + - ", presentPartitionNum=" + presentPartitionNum + - ", suggestedPartitionNum=" + suggestedPartitionNum + - ", bytesInPerPartition=" + bytesInPerPartition + - ", maxAvgBytesInList=" + maxAvgBytesInList + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/expert/RegionHotTopicVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/expert/RegionHotTopicVO.java deleted file mode 100644 index 70e44fff..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/expert/RegionHotTopicVO.java +++ /dev/null @@ -1,85 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.op.expert; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/3/20 - */ -@ApiModel(description = "Region热点Topic") -public class RegionHotTopicVO { - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "集群名称") - private String clusterName; - -// @ApiModelProperty(value = "RegionID") -// private Long regionId; -// -// @ApiModelProperty(value = "Region名称") -// private String regionName; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "失衡详情") - private List detailList; - - @ApiModelProperty(value = "Topic保存时间") - private Long retentionTime; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public List getDetailList() { - return detailList; - } - - public void setDetailList(List detailList) { - this.detailList = detailList; - } - - public Long getRetentionTime() { - return retentionTime; - } - - public void setRetentionTime(Long retentionTime) { - this.retentionTime = retentionTime; - } - - @Override - public String toString() { - return "RegionHotTopicVO{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", detailList=" + detailList + - ", retentionTime=" + retentionTime + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/reassign/ReassignPartitionStatusVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/reassign/ReassignPartitionStatusVO.java deleted file mode 100644 index 51e1bda6..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/reassign/ReassignPartitionStatusVO.java +++ /dev/null @@ -1,53 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.op.reassign; - -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/4/16 - */ -public class ReassignPartitionStatusVO { - @ApiModelProperty(value = "分区Id") - private Integer partitionId; - - @ApiModelProperty(value = "目标副本ID列表") - private List destReplicaIdList; - - @ApiModelProperty(value = "状态") - private Integer status; - - public Integer getPartitionId() { - return partitionId; - } - - public void setPartitionId(Integer partitionId) { - this.partitionId = partitionId; - } - - public List getDestReplicaIdList() { - return destReplicaIdList; - } - - public void setDestReplicaIdList(List destReplicaIdList) { - this.destReplicaIdList = destReplicaIdList; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - @Override - public String toString() { - return "ReassignPartitionStatusVO{" + - "partitionId=" + partitionId + - ", destReplicaIdList=" + destReplicaIdList + - ", status=" + status + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/reassign/ReassignTaskVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/reassign/ReassignTaskVO.java deleted file mode 100644 index 294ea1ef..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/reassign/ReassignTaskVO.java +++ /dev/null @@ -1,136 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.op.reassign; - -import io.swagger.annotations.ApiModelProperty; - -/** - * 迁移任务 - * @author zengqiao - * @date 19/7/13 - */ -public class ReassignTaskVO { - @ApiModelProperty(value = "任务ID") - private Long taskId; - - @ApiModelProperty(value = "任务名称") - private String taskName; - - @ApiModelProperty(value = "完成数") - private Integer completedTopicNum; - - @ApiModelProperty(value = "总数") - private Integer totalTopicNum; - - @ApiModelProperty(value = "状态") - private Integer status; - - @ApiModelProperty(value = "计划开始时间") - private Long beginTime; - - @ApiModelProperty(value = "实际结束时间") - private Long endTime; - - @ApiModelProperty(value = "任务创建时间") - private Long gmtCreate; - - @ApiModelProperty(value = "操作人") - private String operator; - - @ApiModelProperty(value = "任务说明") - private String description; - - public Long getTaskId() { - return taskId; - } - - public void setTaskId(Long taskId) { - this.taskId = taskId; - } - - public String getTaskName() { - return taskName; - } - - public void setTaskName(String taskName) { - this.taskName = taskName; - } - - public Integer getCompletedTopicNum() { - return completedTopicNum; - } - - public void setCompletedTopicNum(Integer completedTopicNum) { - this.completedTopicNum = completedTopicNum; - } - - public Integer getTotalTopicNum() { - return totalTopicNum; - } - - public void setTotalTopicNum(Integer totalTopicNum) { - this.totalTopicNum = totalTopicNum; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Long getBeginTime() { - return beginTime; - } - - public void setBeginTime(Long beginTime) { - this.beginTime = beginTime; - } - - public Long getEndTime() { - return endTime; - } - - public void setEndTime(Long endTime) { - this.endTime = endTime; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public String getOperator() { - return operator; - } - - public void setOperator(String operator) { - this.operator = operator; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - @Override - public String toString() { - return "ReassignTaskVO{" + - "taskId=" + taskId + - ", taskName='" + taskName + '\'' + - ", completedTopicNum=" + completedTopicNum + - ", totalTopicNum=" + totalTopicNum + - ", status=" + status + - ", beginTime=" + beginTime + - ", endTime=" + endTime + - ", gmtCreate=" + gmtCreate + - ", operator='" + operator + '\'' + - ", description='" + description + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/reassign/ReassignTopicStatusVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/reassign/ReassignTopicStatusVO.java deleted file mode 100644 index 836e4108..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/reassign/ReassignTopicStatusVO.java +++ /dev/null @@ -1,151 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.op.reassign; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/4/16 - */ -@ApiModel(value = "Topic迁移信息") -public class ReassignTopicStatusVO { - @ApiModelProperty(value = "子任务ID") - private Long subTaskId; - - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "集群名称") - private String clusterName; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "状态") - private Integer status; - - @ApiModelProperty(value = "实际限流(B/s)") - private Long realThrottle; - - @ApiModelProperty(value = "限流上限(B/s)") - private Long maxThrottle; - - @ApiModelProperty(value = "限流下限(B/s)") - private Long minThrottle; - - @ApiModelProperty(value = "完成迁移分区数") - private Integer completedPartitionNum; - - @ApiModelProperty(value = "总的分区数") - private Integer totalPartitionNum; - - @ApiModelProperty(value = "分区迁移列表") - private List reassignList; - - public Long getSubTaskId() { - return subTaskId; - } - - public void setSubTaskId(Long subTaskId) { - this.subTaskId = subTaskId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Long getRealThrottle() { - return realThrottle; - } - - public void setRealThrottle(Long realThrottle) { - this.realThrottle = realThrottle; - } - - public Long getMaxThrottle() { - return maxThrottle; - } - - public void setMaxThrottle(Long maxThrottle) { - this.maxThrottle = maxThrottle; - } - - public Long getMinThrottle() { - return minThrottle; - } - - public void setMinThrottle(Long minThrottle) { - this.minThrottle = minThrottle; - } - - public Integer getCompletedPartitionNum() { - return completedPartitionNum; - } - - public void setCompletedPartitionNum(Integer completedPartitionNum) { - this.completedPartitionNum = completedPartitionNum; - } - - public Integer getTotalPartitionNum() { - return totalPartitionNum; - } - - public void setTotalPartitionNum(Integer totalPartitionNum) { - this.totalPartitionNum = totalPartitionNum; - } - - public List getReassignList() { - return reassignList; - } - - public void setReassignList(List reassignList) { - this.reassignList = reassignList; - } - - @Override - public String toString() { - return "ReassignTopicStatusVO{" + - "subTaskId=" + subTaskId + - ", clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", status=" + status + - ", realThrottle=" + realThrottle + - ", maxThrottle=" + maxThrottle + - ", minThrottle=" + minThrottle + - ", completedPartitionNum=" + completedPartitionNum + - ", totalPartitionNum=" + totalPartitionNum + - ", reassignList=" + reassignList + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/task/ClusterTaskKafkaFilesVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/task/ClusterTaskKafkaFilesVO.java deleted file mode 100644 index 0fde414d..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/task/ClusterTaskKafkaFilesVO.java +++ /dev/null @@ -1,49 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.op.task; - -import io.swagger.annotations.ApiModel; - -/** - * @author zengqiao - * @date 20/5/11 - */ -@ApiModel(value="Kafka相关文件") -public class ClusterTaskKafkaFilesVO { - private String fileName; - - private String fileMd5; - - private Integer fileType; - - public String getFileName() { - return fileName; - } - - public void setFileName(String fileName) { - this.fileName = fileName; - } - - public String getFileMd5() { - return fileMd5; - } - - public void setFileMd5(String fileMd5) { - this.fileMd5 = fileMd5; - } - - public Integer getFileType() { - return fileType; - } - - public void setFileType(Integer fileType) { - this.fileType = fileType; - } - - @Override - public String toString() { - return "ClusterTaskKafkaFilesVO{" + - "fileName='" + fileName + '\'' + - ", fileMd5='" + fileMd5 + '\'' + - ", fileType=" + fileType + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/task/ClusterTaskMetadataVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/task/ClusterTaskMetadataVO.java deleted file mode 100644 index 2788f197..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/task/ClusterTaskMetadataVO.java +++ /dev/null @@ -1,187 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.op.task; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/4/27 - */ -@ApiModel(value="任务元信息") -public class ClusterTaskMetadataVO { - @ApiModelProperty(value="任务ID") - private Long taskId; - - @ApiModelProperty(value="集群ID") - private Long clusterId; - - @ApiModelProperty(value="集群名称") - private String clusterName; - - @ApiModelProperty(value="升级的主机列表") - private List hostList; - - @ApiModelProperty(value="升级的主机暂停点") - private List pauseHostList; - - @ApiModelProperty(value="回滚主机列表") - private List rollbackHostList; - - @ApiModelProperty(value="回滚主机暂停点") - private List rollbackPauseHostList; - - @ApiModelProperty(value="kafka包名") - private String kafkaPackageName; - - @ApiModelProperty(value="kafka包 MD5") - private String kafkaPackageMd5; - - @ApiModelProperty(value="server配置文件Id") - private Long serverPropertiesFileId; - - @ApiModelProperty(value="server配置名") - private String serverPropertiesName; - - @ApiModelProperty(value="server配置 MD5") - private String serverPropertiesMd5; - - @ApiModelProperty(value="操作人") - private String operator; - - @ApiModelProperty(value="创建时间") - private Long gmtCreate; - - public Long getTaskId() { - return taskId; - } - - public void setTaskId(Long taskId) { - this.taskId = taskId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public List getHostList() { - return hostList; - } - - public void setHostList(List hostList) { - this.hostList = hostList; - } - - public List getPauseHostList() { - return pauseHostList; - } - - public void setPauseHostList(List pauseHostList) { - this.pauseHostList = pauseHostList; - } - - public List getRollbackHostList() { - return rollbackHostList; - } - - public void setRollbackHostList(List rollbackHostList) { - this.rollbackHostList = rollbackHostList; - } - - public List getRollbackPauseHostList() { - return rollbackPauseHostList; - } - - public void setRollbackPauseHostList(List rollbackPauseHostList) { - this.rollbackPauseHostList = rollbackPauseHostList; - } - - public String getKafkaPackageName() { - return kafkaPackageName; - } - - public void setKafkaPackageName(String kafkaPackageName) { - this.kafkaPackageName = kafkaPackageName; - } - - public String getKafkaPackageMd5() { - return kafkaPackageMd5; - } - - public void setKafkaPackageMd5(String kafkaPackageMd5) { - this.kafkaPackageMd5 = kafkaPackageMd5; - } - - public Long getServerPropertiesFileId() { - return serverPropertiesFileId; - } - - public void setServerPropertiesFileId(Long serverPropertiesFileId) { - this.serverPropertiesFileId = serverPropertiesFileId; - } - - public String getServerPropertiesName() { - return serverPropertiesName; - } - - public void setServerPropertiesName(String serverPropertiesName) { - this.serverPropertiesName = serverPropertiesName; - } - - public String getServerPropertiesMd5() { - return serverPropertiesMd5; - } - - public void setServerPropertiesMd5(String serverPropertiesMd5) { - this.serverPropertiesMd5 = serverPropertiesMd5; - } - - public String getOperator() { - return operator; - } - - public void setOperator(String operator) { - this.operator = operator; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - @Override - public String toString() { - return "ClusterTaskMetadataVO{" + - "taskId=" + taskId + - ", clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", hostList=" + hostList + - ", pauseHostList=" + pauseHostList + - ", rollbackHostList=" + rollbackHostList + - ", rollbackPauseHostList=" + rollbackPauseHostList + - ", kafkaPackageName='" + kafkaPackageName + '\'' + - ", kafkaPackageMd5='" + kafkaPackageMd5 + '\'' + - ", serverPropertiesFileId=" + serverPropertiesFileId + - ", serverPropertiesName='" + serverPropertiesName + '\'' + - ", serverPropertiesMd5='" + serverPropertiesMd5 + '\'' + - ", operator='" + operator + '\'' + - ", gmtCreate=" + gmtCreate + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/task/ClusterTaskStatusVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/task/ClusterTaskStatusVO.java deleted file mode 100644 index b780737b..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/task/ClusterTaskStatusVO.java +++ /dev/null @@ -1,127 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.op.task; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/4/27 - */ -@ApiModel(value="任务状态详情") -public class ClusterTaskStatusVO { - @ApiModelProperty(value="任务ID") - private Long taskId; - - @ApiModelProperty(value="任务状态: 30:运行中(展示暂停), 40:暂停(展示开始), 100:完成(都置灰)") - private Integer status; - - @ApiModelProperty(value="正处于回滚的状态") - private Boolean rollback; - - @ApiModelProperty(value="任务总数") - private Integer sumCount; - - @ApiModelProperty(value="成功总数") - private Integer successCount; - - @ApiModelProperty(value="失败总数") - private Integer failedCount; - - @ApiModelProperty(value="执行中总数") - private Integer runningCount; - - @ApiModelProperty(value="等待总数") - private Integer waitingCount; - - @ApiModelProperty(value="子任务状态") - private List subTaskStatusList; - - public Long getTaskId() { - return taskId; - } - - public void setTaskId(Long taskId) { - this.taskId = taskId; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Boolean getRollback() { - return rollback; - } - - public void setRollback(Boolean rollback) { - this.rollback = rollback; - } - - public Integer getSumCount() { - return sumCount; - } - - public void setSumCount(Integer sumCount) { - this.sumCount = sumCount; - } - - public Integer getSuccessCount() { - return successCount; - } - - public void setSuccessCount(Integer successCount) { - this.successCount = successCount; - } - - public Integer getFailedCount() { - return failedCount; - } - - public void setFailedCount(Integer failedCount) { - this.failedCount = failedCount; - } - - public Integer getRunningCount() { - return runningCount; - } - - public void setRunningCount(Integer runningCount) { - this.runningCount = runningCount; - } - - public Integer getWaitingCount() { - return waitingCount; - } - - public void setWaitingCount(Integer waitingCount) { - this.waitingCount = waitingCount; - } - - public List getSubTaskStatusList() { - return subTaskStatusList; - } - - public void setSubTaskStatusList(List subTaskStatusList) { - this.subTaskStatusList = subTaskStatusList; - } - - @Override - public String toString() { - return "ClusterTaskStatusVO{" + - "taskId=" + taskId + - ", status=" + status + - ", rollback=" + rollback + - ", sumCount=" + sumCount + - ", successCount=" + successCount + - ", failedCount=" + failedCount + - ", runningCount=" + runningCount + - ", waitingCount=" + waitingCount + - ", subTaskStatusList=" + subTaskStatusList + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/task/ClusterTaskSubStatusVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/task/ClusterTaskSubStatusVO.java deleted file mode 100644 index 3f6e17d3..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/task/ClusterTaskSubStatusVO.java +++ /dev/null @@ -1,65 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.op.task; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/27 - */ -@ApiModel(value="子任务状态") -public class ClusterTaskSubStatusVO { - @ApiModelProperty(value="主机名") - private String hostname; - - @ApiModelProperty(value="子任务状态") - private Integer status; - - @ApiModelProperty(value="角色") - private String kafkaRoles; - - @ApiModelProperty(value="分组ID") - private Integer groupId; - - public String getHostname() { - return hostname; - } - - public void setHostname(String hostname) { - this.hostname = hostname; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public String getKafkaRoles() { - return kafkaRoles; - } - - public void setKafkaRoles(String kafkaRoles) { - this.kafkaRoles = kafkaRoles; - } - - public Integer getGroupId() { - return groupId; - } - - public void setGroupId(Integer groupId) { - this.groupId = groupId; - } - - @Override - public String toString() { - return "ClusterTaskSubStatusVO{" + - "hostname='" + hostname + '\'' + - ", status=" + status + - ", kafkaRoles='" + kafkaRoles + '\'' + - ", groupId=" + groupId + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/task/ClusterTaskVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/task/ClusterTaskVO.java deleted file mode 100644 index 239de98e..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/op/task/ClusterTaskVO.java +++ /dev/null @@ -1,99 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.op.task; - -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/21 - */ -public class ClusterTaskVO { - @ApiModelProperty(value="任务Id") - private Long taskId; - - @ApiModelProperty(value="集群ID") - private Long clusterId; - - @ApiModelProperty(value="集群名称") - private String clusterName; - - @ApiModelProperty(value="任务类型") - private String taskType; - - @ApiModelProperty(value="状态") - private Integer status; - - @ApiModelProperty(value="操作人") - private String operator; - - @ApiModelProperty(value="创建时间") - private Long createTime; - - public Long getTaskId() { - return taskId; - } - - public void setTaskId(Long taskId) { - this.taskId = taskId; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTaskType() { - return taskType; - } - - public void setTaskType(String taskType) { - this.taskType = taskType; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public String getOperator() { - return operator; - } - - public void setOperator(String operator) { - this.operator = operator; - } - - public Long getCreateTime() { - return createTime; - } - - public void setCreateTime(Long createTime) { - this.createTime = createTime; - } - - @Override - public String toString() { - return "ClusterTaskVO{" + - "taskId=" + taskId + - ", clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", taskType='" + taskType + '\'' + - ", status=" + status + - ", operator='" + operator + '\'' + - ", createTime=" + createTime + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/ConfigVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/ConfigVO.java deleted file mode 100644 index b4bef9a8..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/ConfigVO.java +++ /dev/null @@ -1,89 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/3/19 - */ -@ApiModel(value = "ConfigVO", description = "配置信息") -public class ConfigVO { - @ApiModelProperty(value="集群Id") - private Long id; - - @ApiModelProperty(value="配置键") - private String configKey; - - @ApiModelProperty(value="配置值") - private String configValue; - - @ApiModelProperty(value="描述信息") - private String configDescription; - - @ApiModelProperty(value="创建时间") - private Long gmtCreate; - - @ApiModelProperty(value="修改时间") - private Long gmtModify; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getConfigKey() { - return configKey; - } - - public void setConfigKey(String configKey) { - this.configKey = configKey; - } - - public String getConfigValue() { - return configValue; - } - - public void setConfigValue(String configValue) { - this.configValue = configValue; - } - - public String getConfigDescription() { - return configDescription; - } - - public void setConfigDescription(String configDescription) { - this.configDescription = configDescription; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Long getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Long gmtModify) { - this.gmtModify = gmtModify; - } - - @Override - public String toString() { - return "ConfigVO{" + - "id=" + id + - ", configKey='" + configKey + '\'' + - ", configValue='" + configValue + '\'' + - ", configDescription='" + configDescription + '\'' + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/CustomScheduledTaskVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/CustomScheduledTaskVO.java deleted file mode 100644 index 2132abc2..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/CustomScheduledTaskVO.java +++ /dev/null @@ -1,35 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd; - -/** - * @author zengqiao - * @date 20/8/11 - */ -public class CustomScheduledTaskVO { - private String name; - - private Object cron; - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public Object getCron() { - return cron; - } - - public void setCron(Object cron) { - this.cron = cron; - } - - @Override - public String toString() { - return "CustomScheduledTaskVO{" + - "name='" + name + '\'' + - ", cron=" + cron + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/GatewayConfigVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/GatewayConfigVO.java deleted file mode 100644 index 72314c31..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/GatewayConfigVO.java +++ /dev/null @@ -1,115 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/3/19 - */ -@ApiModel(value = "GatewayConfigVO", description = "Gateway配置信息") -public class GatewayConfigVO { - @ApiModelProperty(value="ID") - private Long id; - - @ApiModelProperty(value="配置类型") - private String type; - - @ApiModelProperty(value="配置名称") - private String name; - - @ApiModelProperty(value="配置值") - private String value; - - @ApiModelProperty(value="版本") - private Long version; - - @ApiModelProperty(value="描述说明") - private String description; - - @ApiModelProperty(value="创建时间") - private Date createTime; - - @ApiModelProperty(value="修改时间") - private Date modifyTime; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getType() { - return type; - } - - public void setType(String type) { - this.type = type; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public String getValue() { - return value; - } - - public void setValue(String value) { - this.value = value; - } - - public Long getVersion() { - return version; - } - - public void setVersion(Long version) { - this.version = version; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Date getCreateTime() { - return createTime; - } - - public void setCreateTime(Date createTime) { - this.createTime = createTime; - } - - public Date getModifyTime() { - return modifyTime; - } - - public void setModifyTime(Date modifyTime) { - this.modifyTime = modifyTime; - } - - @Override - public String toString() { - return "GatewayConfigVO{" + - "id=" + id + - ", type='" + type + '\'' + - ", name='" + name + '\'' + - ", value='" + value + '\'' + - ", version=" + version + - ", description='" + description + '\'' + - ", createTime=" + createTime + - ", modifyTime=" + modifyTime + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/KafkaControllerVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/KafkaControllerVO.java deleted file mode 100644 index 1c205fcb..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/KafkaControllerVO.java +++ /dev/null @@ -1,65 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author huangyiminghappy@163.com - * @date 2019-03-27 - */ -@ApiModel(value = "KafkaController信息") -public class KafkaControllerVO { - @ApiModelProperty(value = "节点ID") - private Integer brokerId; - - @ApiModelProperty(value = "节点地址") - private String host; - - @ApiModelProperty(value = "ZK消息版本") - private Integer version; - - @ApiModelProperty(value = "变更时间") - private Long timestamp; - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public Integer getVersion() { - return version; - } - - public void setVersion(Integer version) { - this.version = version; - } - - public Long getTimestamp() { - return timestamp; - } - - public void setTimestamp(Long timestamp) { - this.timestamp = timestamp; - } - - @Override - public String toString() { - return "KafkaControllerVO{" + - "brokerId=" + brokerId + - ", host='" + host + '\'' + - ", version=" + version + - ", timestamp=" + timestamp + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/KafkaFileVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/KafkaFileVO.java deleted file mode 100644 index 1ca29b73..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/KafkaFileVO.java +++ /dev/null @@ -1,127 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/4/29 - */ -@ApiModel(description = "Kafka文件信息") -public class KafkaFileVO { - @ApiModelProperty(value = "集群名称") - private String clusterName; - - @ApiModelProperty(value = "ID") - private Long id; - - @ApiModelProperty(value = "文件名称") - private String fileName; - - @ApiModelProperty(value = "文件类型") - private Integer fileType; - - @ApiModelProperty(value = "存储位置") - private String storageName; - - @ApiModelProperty(value = "文件MD5") - private String fileMd5; - - @ApiModelProperty(value = "操作人") - private String operator; - - @ApiModelProperty(value = "备注") - private String description; - - @ApiModelProperty(value = "修改时间") - private Date gmtModify; - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public String getFileName() { - return fileName; - } - - public void setFileName(String fileName) { - this.fileName = fileName; - } - - public Integer getFileType() { - return fileType; - } - - public void setFileType(Integer fileType) { - this.fileType = fileType; - } - - public String getStorageName() { - return storageName; - } - - public void setStorageName(String storageName) { - this.storageName = storageName; - } - - public String getFileMd5() { - return fileMd5; - } - - public void setFileMd5(String fileMd5) { - this.fileMd5 = fileMd5; - } - - public String getOperator() { - return operator; - } - - public void setOperator(String operator) { - this.operator = operator; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - @Override - public String toString() { - return "KafkaFileVO{" + - "clusterName='" + clusterName + '\'' + - ", id=" + id + - ", fileName='" + fileName + '\'' + - ", fileType=" + fileType + - ", storageName='" + storageName + '\'' + - ", fileMd5='" + fileMd5 + '\'' + - ", operator='" + operator + '\'' + - ", description='" + description + '\'' + - ", gmtModify=" + gmtModify + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/OperateRecordVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/OperateRecordVO.java deleted file mode 100644 index 576c6f53..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/OperateRecordVO.java +++ /dev/null @@ -1,139 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/09/03 - */ -@ApiModel(description = "操作记录") -public class OperateRecordVO { - @ApiModelProperty("id") - private Long id; - - @ApiModelProperty("模块ID") - private Integer moduleId; - - @ApiModelProperty("模块") - private String module; - - @ApiModelProperty("操作ID") - private Integer operateId; - - @ApiModelProperty("操作") - private String operate; - - @ApiModelProperty("资源(app、topic)") - private String resource; - - @ApiModelProperty("操作内容") - private String content; - - @ApiModelProperty("操作人") - private String operator; - - @ApiModelProperty("创建时间") - private Long createTime; - - @ApiModelProperty("修改时间") - private Long modifyTime; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Integer getModuleId() { - return moduleId; - } - - public void setModuleId(Integer moduleId) { - this.moduleId = moduleId; - } - - public String getModule() { - return module; - } - - public void setModule(String module) { - this.module = module; - } - - public Integer getOperateId() { - return operateId; - } - - public void setOperateId(Integer operateId) { - this.operateId = operateId; - } - - public String getOperate() { - return operate; - } - - public void setOperate(String operate) { - this.operate = operate; - } - - public String getResource() { - return resource; - } - - public void setResource(String resource) { - this.resource = resource; - } - - public String getContent() { - return content; - } - - public void setContent(String content) { - this.content = content; - } - - public String getOperator() { - return operator; - } - - public void setOperator(String operator) { - this.operator = operator; - } - - public Long getCreateTime() { - return createTime; - } - - public void setCreateTime(Long createTime) { - this.createTime = createTime; - } - - public Long getModifyTime() { - return modifyTime; - } - - public void setModifyTime(Long modifyTime) { - this.modifyTime = modifyTime; - } - - @Override - public String toString() { - return "OperateRecordVO{" + - "id=" + id + - ", moduleId=" + moduleId + - ", module='" + module + '\'' + - ", operateId=" + operateId + - ", operate='" + operate + '\'' + - ", resource='" + resource + '\'' + - ", content='" + content + '\'' + - ", operator='" + operator + '\'' + - ", createTime=" + createTime + - ", modifyTime=" + modifyTime + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/RdTopicBasicVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/RdTopicBasicVO.java deleted file mode 100644 index 75d50f05..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/RdTopicBasicVO.java +++ /dev/null @@ -1,128 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; -import java.util.Properties; - -/** - * @author zengqiao - * @date 20/6/10 - */ -@ApiModel(description = "Topic基本信息(RD视角)") -public class RdTopicBasicVO { - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "集群名称") - private String clusterName; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "保留时间(ms)") - private Long retentionTime; - - @ApiModelProperty(value = "应用ID") - private String appId; - - @ApiModelProperty(value = "应用名称") - private String appName; - - @ApiModelProperty(value = "Topic属性") - private Properties properties; - - @ApiModelProperty(value = "备注") - private String description; - - @ApiModelProperty(value = "所属region") - private List regionNameList; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Long getRetentionTime() { - return retentionTime; - } - - public void setRetentionTime(Long retentionTime) { - this.retentionTime = retentionTime; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getAppName() { - return appName; - } - - public void setAppName(String appName) { - this.appName = appName; - } - - public Properties getProperties() { - return properties; - } - - public void setProperties(Properties properties) { - this.properties = properties; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public List getRegionNameList() { - return regionNameList; - } - - public void setRegionNameList(List regionNameList) { - this.regionNameList = regionNameList; - } - - @Override - public String toString() { - return "RdTopicBasicVO{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", topicName='" + topicName + '\'' + - ", retentionTime=" + retentionTime + - ", appId='" + appId + '\'' + - ", appName='" + appName + '\'' + - ", properties=" + properties + - ", description='" + description + '\'' + - ", regionNameList='" + regionNameList + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/RegionVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/RegionVO.java deleted file mode 100644 index e51b2738..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/RegionVO.java +++ /dev/null @@ -1,153 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.Date; -import java.util.List; - -/** - * Region信息 - * @author zengqiao - * @date 19/4/1 - */ -@ApiModel(description = "Region信息") -public class RegionVO { - @ApiModelProperty(value = "RegionID") - protected Long id; - - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "Region名称") - private String name; - - @ApiModelProperty(value = "brokerId列表") - private List brokerIdList; - - @ApiModelProperty(value = "描述信息") - private String description; - - @ApiModelProperty(value = "状态, 0:正常 1:容量已满") - private Integer status; - - @ApiModelProperty(value = "容量(B/s)") - private Long capacity; - - @ApiModelProperty(value = "实际流量(B/s)") - private Long realUsed; - - @ApiModelProperty(value = "预估流量(B/s)") - private Long estimateUsed; - - @ApiModelProperty(value = "创建时间") - private Date gmtCreate; - - @ApiModelProperty(value = "修改时间") - private Date gmtModify; - - public Long getId() { - return id; - } - - public void setId(Long id) { - this.id = id; - } - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public List getBrokerIdList() { - return brokerIdList; - } - - public void setBrokerIdList(List brokerIdList) { - this.brokerIdList = brokerIdList; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Long getCapacity() { - return capacity; - } - - public void setCapacity(Long capacity) { - this.capacity = capacity; - } - - public Long getRealUsed() { - return realUsed; - } - - public void setRealUsed(Long realUsed) { - this.realUsed = realUsed; - } - - public Long getEstimateUsed() { - return estimateUsed; - } - - public void setEstimateUsed(Long estimateUsed) { - this.estimateUsed = estimateUsed; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - @Override - public String toString() { - return "RegionVO{" + - "id=" + id + - ", clusterId=" + clusterId + - ", name='" + name + '\'' + - ", brokerIdList=" + brokerIdList + - ", description='" + description + '\'' + - ", status=" + status + - ", capacity=" + capacity + - ", realUsed=" + realUsed + - ", estimateUsed=" + estimateUsed + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/TopicBrokerVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/TopicBrokerVO.java deleted file mode 100644 index ae7ae9ec..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/TopicBrokerVO.java +++ /dev/null @@ -1,102 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd; - -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * Topic所在Broker的信息 - * @author zengqiao - * @date 19/4/3 - */ -public class TopicBrokerVO { - @ApiModelProperty(value = "物理集群ID") - private Long clusterId; - - @ApiModelProperty(value = "brokerId") - private Integer brokerId; - - @ApiModelProperty(value = "broker主机名") - private String host; - - @ApiModelProperty(value = "分区数") - private Integer partitionNum; - - @ApiModelProperty(value = "分区的Id") - private List partitionIdList; - - @ApiModelProperty(value = "leader分区的Id") - private List leaderPartitionIdList; - - @ApiModelProperty(value = "是否存活") - private boolean alive; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public Integer getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Integer partitionNum) { - this.partitionNum = partitionNum; - } - - public List getPartitionIdList() { - return partitionIdList; - } - - public void setPartitionIdList(List partitionIdList) { - this.partitionIdList = partitionIdList; - } - - public List getLeaderPartitionIdList() { - return leaderPartitionIdList; - } - - public void setLeaderPartitionIdList(List leaderPartitionIdList) { - this.leaderPartitionIdList = leaderPartitionIdList; - } - - public boolean isAlive() { - return alive; - } - - public void setAlive(boolean alive) { - this.alive = alive; - } - - @Override - public String toString() { - return "TopicBrokerVO{" + - "clusterId=" + clusterId + - ", brokerId=" + brokerId + - ", host='" + host + '\'' + - ", partitionNum=" + partitionNum + - ", partitionIdList=" + partitionIdList + - ", leaderPartitionIdList=" + leaderPartitionIdList + - ", alive=" + alive + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/AnalysisBrokerVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/AnalysisBrokerVO.java deleted file mode 100644 index accc60ba..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/AnalysisBrokerVO.java +++ /dev/null @@ -1,114 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd.broker; - -import java.util.List; - -/** - * @author zengqiao - * @date 19/12/29 - */ -public class AnalysisBrokerVO { - private Long clusterId; - - private Integer brokerId; - - private Long baseTime; - - private Double bytesIn; - - private Double bytesOut; - - private Double messagesIn; - - private Double totalFetchRequests; - - private Double totalProduceRequests; - - List topicAnalysisVOList; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public Long getBaseTime() { - return baseTime; - } - - public void setBaseTime(Long baseTime) { - this.baseTime = baseTime; - } - - public Double getBytesIn() { - return bytesIn; - } - - public void setBytesIn(Double bytesIn) { - this.bytesIn = bytesIn; - } - - public Double getBytesOut() { - return bytesOut; - } - - public void setBytesOut(Double bytesOut) { - this.bytesOut = bytesOut; - } - - public Double getMessagesIn() { - return messagesIn; - } - - public void setMessagesIn(Double messagesIn) { - this.messagesIn = messagesIn; - } - - public Double getTotalFetchRequests() { - return totalFetchRequests; - } - - public void setTotalFetchRequests(Double totalFetchRequests) { - this.totalFetchRequests = totalFetchRequests; - } - - public Double getTotalProduceRequests() { - return totalProduceRequests; - } - - public void setTotalProduceRequests(Double totalProduceRequests) { - this.totalProduceRequests = totalProduceRequests; - } - - public List getTopicAnalysisVOList() { - return topicAnalysisVOList; - } - - public void setTopicAnalysisVOList(List topicAnalysisVOList) { - this.topicAnalysisVOList = topicAnalysisVOList; - } - - @Override - public String toString() { - return "AnalysisBrokerVO{" + - "clusterId=" + clusterId + - ", brokerId=" + brokerId + - ", baseTime=" + baseTime + - ", bytesIn=" + bytesIn + - ", bytesOut=" + bytesOut + - ", messagesIn=" + messagesIn + - ", totalFetchRequests=" + totalFetchRequests + - ", totalProduceRequests=" + totalProduceRequests + - ", topicAnalysisVOList=" + topicAnalysisVOList + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/AnalysisTopicVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/AnalysisTopicVO.java deleted file mode 100644 index d2a20213..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/AnalysisTopicVO.java +++ /dev/null @@ -1,134 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd.broker; - -/** - * @author zengqiao - * @date 20/3/11 - */ -public class AnalysisTopicVO { - private String topicName; - - private String bytesIn; - - private String bytesInRate; - - private String bytesOut; - - private String bytesOutRate; - - private String messagesIn; - - private String messagesInRate; - - private String totalFetchRequests; - - private String totalFetchRequestsRate; - - private String totalProduceRequests; - - private String totalProduceRequestsRate; - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public String getBytesIn() { - return bytesIn; - } - - public void setBytesIn(String bytesIn) { - this.bytesIn = bytesIn; - } - - public String getBytesInRate() { - return bytesInRate; - } - - public void setBytesInRate(String bytesInRate) { - this.bytesInRate = bytesInRate; - } - - public String getBytesOut() { - return bytesOut; - } - - public void setBytesOut(String bytesOut) { - this.bytesOut = bytesOut; - } - - public String getBytesOutRate() { - return bytesOutRate; - } - - public void setBytesOutRate(String bytesOutRate) { - this.bytesOutRate = bytesOutRate; - } - - public String getMessagesIn() { - return messagesIn; - } - - public void setMessagesIn(String messagesIn) { - this.messagesIn = messagesIn; - } - - public String getMessagesInRate() { - return messagesInRate; - } - - public void setMessagesInRate(String messagesInRate) { - this.messagesInRate = messagesInRate; - } - - public String getTotalFetchRequests() { - return totalFetchRequests; - } - - public void setTotalFetchRequests(String totalFetchRequests) { - this.totalFetchRequests = totalFetchRequests; - } - - public String getTotalFetchRequestsRate() { - return totalFetchRequestsRate; - } - - public void setTotalFetchRequestsRate(String totalFetchRequestsRate) { - this.totalFetchRequestsRate = totalFetchRequestsRate; - } - - public String getTotalProduceRequests() { - return totalProduceRequests; - } - - public void setTotalProduceRequests(String totalProduceRequests) { - this.totalProduceRequests = totalProduceRequests; - } - - public String getTotalProduceRequestsRate() { - return totalProduceRequestsRate; - } - - public void setTotalProduceRequestsRate(String totalProduceRequestsRate) { - this.totalProduceRequestsRate = totalProduceRequestsRate; - } - - @Override - public String toString() { - return "AnalysisTopicVO{" + - "topicName='" + topicName + '\'' + - ", bytesIn='" + bytesIn + '\'' + - ", bytesInRate='" + bytesInRate + '\'' + - ", bytesOut='" + bytesOut + '\'' + - ", bytesOutRate='" + bytesOutRate + '\'' + - ", messagesIn='" + messagesIn + '\'' + - ", messagesInRate='" + messagesInRate + '\'' + - ", totalFetchRequests='" + totalFetchRequests + '\'' + - ", totalFetchRequestsRate='" + totalFetchRequestsRate + '\'' + - ", totalProduceRequests='" + totalProduceRequests + '\'' + - ", totalProduceRequestsRate='" + totalProduceRequestsRate + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/BrokerBasicVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/BrokerBasicVO.java deleted file mode 100644 index 6754ece7..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/BrokerBasicVO.java +++ /dev/null @@ -1,101 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd.broker; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author huangjw - * @date 17/6/1. - */ -@ApiModel(description = "Broker基本信息") -public class BrokerBasicVO { - @ApiModelProperty(value = "主机名") - private String host; - - @ApiModelProperty(value = "服务端口") - private Integer port; - - @ApiModelProperty(value = "JMX端口") - private Integer jmxPort; - - @ApiModelProperty(value = "Topic数") - private Integer topicNum; - - @ApiModelProperty(value = "分区数") - private Integer partitionCount; - - @ApiModelProperty(value = "Leader数") - private Integer leaderCount; - - @ApiModelProperty(value = "启动时间") - private Long startTime; - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public Integer getPort() { - return port; - } - - public void setPort(Integer port) { - this.port = port; - } - - public Integer getJmxPort() { - return jmxPort; - } - - public void setJmxPort(Integer jmxPort) { - this.jmxPort = jmxPort; - } - - public Integer getTopicNum() { - return topicNum; - } - - public void setTopicNum(Integer topicNum) { - this.topicNum = topicNum; - } - - public Integer getPartitionCount() { - return partitionCount; - } - - public void setPartitionCount(Integer partitionCount) { - this.partitionCount = partitionCount; - } - - public Long getStartTime() { - return startTime; - } - - public void setStartTime(Long startTime) { - this.startTime = startTime; - } - - public Integer getLeaderCount() { - return leaderCount; - } - - public void setLeaderCount(Integer leaderCount) { - this.leaderCount = leaderCount; - } - - @Override - public String toString() { - return "BrokerBasicVO{" + - "host='" + host + '\'' + - ", port=" + port + - ", jmxPort=" + jmxPort + - ", topicNum=" + topicNum + - ", partitionCount=" + partitionCount + - ", leaderCount=" + leaderCount + - ", startTime=" + startTime + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/BrokerDiskTopicVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/BrokerDiskTopicVO.java deleted file mode 100644 index d75f9f20..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/BrokerDiskTopicVO.java +++ /dev/null @@ -1,115 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd.broker; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/4/30 - */ -@ApiModel(description = "Broker磁盘信息") -public class BrokerDiskTopicVO { - @ApiModelProperty(value = "集群ID") - private Long clusterId; - - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "BrokerID") - private Integer brokerId; - - @ApiModelProperty(value = "磁盘名") - private String diskName; - - @ApiModelProperty(value = "Leader分区") - private List leaderPartitions; - - @ApiModelProperty(value = "Follow分区") - private List followerPartitions; - - @ApiModelProperty(value = "处于同步状态") - private Boolean underReplicated; - - @ApiModelProperty(value = "未处于同步状态的分区") - private List notUnderReplicatedPartitions; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public String getDiskName() { - return diskName; - } - - public void setDiskName(String diskName) { - this.diskName = diskName; - } - - public List getLeaderPartitions() { - return leaderPartitions; - } - - public void setLeaderPartitions(List leaderPartitions) { - this.leaderPartitions = leaderPartitions; - } - - public List getFollowerPartitions() { - return followerPartitions; - } - - public void setFollowerPartitions(List followerPartitions) { - this.followerPartitions = followerPartitions; - } - - public Boolean getUnderReplicated() { - return underReplicated; - } - - public void setUnderReplicated(Boolean underReplicated) { - this.underReplicated = underReplicated; - } - - public List getNotUnderReplicatedPartitions() { - return notUnderReplicatedPartitions; - } - - public void setNotUnderReplicatedPartitions(List notUnderReplicatedPartitions) { - this.notUnderReplicatedPartitions = notUnderReplicatedPartitions; - } - - @Override - public String toString() { - return "BrokerDiskTopicVO{" + - "clusterId=" + clusterId + - ", topicName='" + topicName + '\'' + - ", brokerId=" + brokerId + - ", diskName='" + diskName + '\'' + - ", leaderPartitions=" + leaderPartitions + - ", followerPartitions=" + followerPartitions + - ", underReplicated=" + underReplicated + - ", notUnderReplicatedPartitions=" + notUnderReplicatedPartitions + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/BrokerMetadataVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/BrokerMetadataVO.java deleted file mode 100644 index a9d3950b..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/BrokerMetadataVO.java +++ /dev/null @@ -1,44 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd.broker; - -/** - * @author zengqiao - * @date 19/7/12 - */ -public class BrokerMetadataVO { - private Integer brokerId; - - private String host; - - public BrokerMetadataVO(int brokerId, String host) { - this.brokerId = brokerId; - this.host = host; - } - - public int getBrokerId() { - return brokerId; - } - - public void setBrokerId(int brokerId) { - this.brokerId = brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - @Override - public String toString() { - return "BrokerMetadataVO{" + - "brokerId=" + brokerId + - ", host='" + host + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/BrokerMetricsVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/BrokerMetricsVO.java deleted file mode 100644 index a77992cf..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/BrokerMetricsVO.java +++ /dev/null @@ -1,221 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd.broker; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 2019-06-03 - */ -@ApiModel(description = "Broker指标") -public class BrokerMetricsVO { - @ApiModelProperty(value = "健康分[0-100]") - private Integer healthScore; - - @ApiModelProperty(value = "流入流量(B/s)") - private Object bytesInPerSec; - - @ApiModelProperty(value = "流出流量(B/s)") - private Object bytesOutPerSec; - - @ApiModelProperty(value = "被拒绝流量(B/s)") - private Object bytesRejectedPerSec; - - @ApiModelProperty(value = "消息数") - private Object messagesInPerSec; - - @ApiModelProperty(value = "发送请求数") - private Object produceRequestPerSec; - - @ApiModelProperty(value = "消费请求数") - private Object fetchConsumerRequestPerSec; - - @ApiModelProperty(value = "请求处理器空闲百分比") - private Object requestHandlerIdlPercent; - - @ApiModelProperty(value = "网络处理器空闲百分比") - private Object networkProcessorIdlPercent; - - @ApiModelProperty(value = "请求队列大小") - private Integer requestQueueSize; - - @ApiModelProperty(value = "响应队列大小") - private Integer responseQueueSize; - - @ApiModelProperty(value = "刷日志事件") - private Object logFlushTime; - - @ApiModelProperty(value = "每秒消费失败数") - private Object failFetchRequestPerSec; - - @ApiModelProperty(value = "每秒发送失败数") - private Object failProduceRequestPerSec; - - @ApiModelProperty(value = "发送耗时99分位") - private Object totalTimeProduce99Th; - - @ApiModelProperty(value = "消费耗时99分位") - private Object totalTimeFetchConsumer99Th; - - @ApiModelProperty(value = "创建时间") - private Long gmtCreate; - - public Integer getHealthScore() { - return healthScore; - } - - public void setHealthScore(Integer healthScore) { - this.healthScore = healthScore; - } - - public Object getBytesInPerSec() { - return bytesInPerSec; - } - - public void setBytesInPerSec(Object bytesInPerSec) { - this.bytesInPerSec = bytesInPerSec; - } - - public Object getBytesOutPerSec() { - return bytesOutPerSec; - } - - public void setBytesOutPerSec(Object bytesOutPerSec) { - this.bytesOutPerSec = bytesOutPerSec; - } - - public Object getBytesRejectedPerSec() { - return bytesRejectedPerSec; - } - - public void setBytesRejectedPerSec(Object bytesRejectedPerSec) { - this.bytesRejectedPerSec = bytesRejectedPerSec; - } - - public Object getMessagesInPerSec() { - return messagesInPerSec; - } - - public void setMessagesInPerSec(Object messagesInPerSec) { - this.messagesInPerSec = messagesInPerSec; - } - - public Object getProduceRequestPerSec() { - return produceRequestPerSec; - } - - public void setProduceRequestPerSec(Object produceRequestPerSec) { - this.produceRequestPerSec = produceRequestPerSec; - } - - public Object getFetchConsumerRequestPerSec() { - return fetchConsumerRequestPerSec; - } - - public void setFetchConsumerRequestPerSec(Object fetchConsumerRequestPerSec) { - this.fetchConsumerRequestPerSec = fetchConsumerRequestPerSec; - } - - public Object getRequestHandlerIdlPercent() { - return requestHandlerIdlPercent; - } - - public void setRequestHandlerIdlPercent(Object requestHandlerIdlPercent) { - this.requestHandlerIdlPercent = requestHandlerIdlPercent; - } - - public Object getNetworkProcessorIdlPercent() { - return networkProcessorIdlPercent; - } - - public void setNetworkProcessorIdlPercent(Object networkProcessorIdlPercent) { - this.networkProcessorIdlPercent = networkProcessorIdlPercent; - } - - public Integer getRequestQueueSize() { - return requestQueueSize; - } - - public void setRequestQueueSize(Integer requestQueueSize) { - this.requestQueueSize = requestQueueSize; - } - - public Integer getResponseQueueSize() { - return responseQueueSize; - } - - public void setResponseQueueSize(Integer responseQueueSize) { - this.responseQueueSize = responseQueueSize; - } - - public Object getLogFlushTime() { - return logFlushTime; - } - - public void setLogFlushTime(Object logFlushTime) { - this.logFlushTime = logFlushTime; - } - - public Object getFailFetchRequestPerSec() { - return failFetchRequestPerSec; - } - - public void setFailFetchRequestPerSec(Object failFetchRequestPerSec) { - this.failFetchRequestPerSec = failFetchRequestPerSec; - } - - public Object getFailProduceRequestPerSec() { - return failProduceRequestPerSec; - } - - public void setFailProduceRequestPerSec(Object failProduceRequestPerSec) { - this.failProduceRequestPerSec = failProduceRequestPerSec; - } - - public Object getTotalTimeProduce99Th() { - return totalTimeProduce99Th; - } - - public void setTotalTimeProduce99Th(Object totalTimeProduce99Th) { - this.totalTimeProduce99Th = totalTimeProduce99Th; - } - - public Object getTotalTimeFetchConsumer99Th() { - return totalTimeFetchConsumer99Th; - } - - public void setTotalTimeFetchConsumer99Th(Object totalTimeFetchConsumer99Th) { - this.totalTimeFetchConsumer99Th = totalTimeFetchConsumer99Th; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } - - @Override - public String toString() { - return "BrokerMetricsVO{" + - "healthScore=" + healthScore + - ", bytesInPerSec=" + bytesInPerSec + - ", bytesOutPerSec=" + bytesOutPerSec + - ", bytesRejectedPerSec=" + bytesRejectedPerSec + - ", messagesInPerSec=" + messagesInPerSec + - ", produceRequestPerSec=" + produceRequestPerSec + - ", fetchConsumerRequestPerSec=" + fetchConsumerRequestPerSec + - ", requestHandlerIdlPercent=" + requestHandlerIdlPercent + - ", networkProcessorIdlPercent=" + networkProcessorIdlPercent + - ", requestQueueSize=" + requestQueueSize + - ", responseQueueSize=" + responseQueueSize + - ", logFlushTime=" + logFlushTime + - ", failFetchRequestPerSec=" + failFetchRequestPerSec + - ", failProduceRequestPerSec=" + failProduceRequestPerSec + - ", totalTimeProduce99Th=" + totalTimeProduce99Th + - ", totalTimeFetchConsumer99Th=" + totalTimeFetchConsumer99Th + - ", gmtCreate=" + gmtCreate + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/BrokerPartitionVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/BrokerPartitionVO.java deleted file mode 100644 index 6a35161e..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/BrokerPartitionVO.java +++ /dev/null @@ -1,79 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd.broker; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 19/4/22 - */ -@ApiModel(description = "Broker分区信息") -public class BrokerPartitionVO { - @ApiModelProperty(value = "Topic名称") - private String topicName; - - @ApiModelProperty(value = "Leader分区") - private List leaderPartitionList; - - @ApiModelProperty(value = "Follower分区") - private List followerPartitionIdList; - - @ApiModelProperty(value = "是否未同步完成") - private Boolean underReplicated; - - @ApiModelProperty(value = "未同步分区列表") - private List notUnderReplicatedPartitionIdList; - - public String getTopicName() { - return topicName; - } - - public void setTopicName(String topicName) { - this.topicName = topicName; - } - - public List getLeaderPartitionList() { - return leaderPartitionList; - } - - public void setLeaderPartitionList(List leaderPartitionList) { - this.leaderPartitionList = leaderPartitionList; - } - - public List getFollowerPartitionIdList() { - return followerPartitionIdList; - } - - public void setFollowerPartitionIdList(List followerPartitionIdList) { - this.followerPartitionIdList = followerPartitionIdList; - } - - public Boolean getUnderReplicated() { - return underReplicated; - } - - public void setUnderReplicated(Boolean underReplicated) { - this.underReplicated = underReplicated; - } - - public List getNotUnderReplicatedPartitionIdList() { - return notUnderReplicatedPartitionIdList; - } - - public void setNotUnderReplicatedPartitionIdList(List notUnderReplicatedPartitionIdList) { - this.notUnderReplicatedPartitionIdList = notUnderReplicatedPartitionIdList; - } - - @Override - public String toString() { - return "BrokerPartitionVO{" + - "topicName='" + topicName + '\'' + - ", leaderPartitionList=" + leaderPartitionList + - ", followerPartitionIdList=" + followerPartitionIdList + - ", underReplicated=" + underReplicated + - ", notUnderReplicatedPartitionIdList=" + notUnderReplicatedPartitionIdList + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/RdBrokerBasicVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/RdBrokerBasicVO.java deleted file mode 100644 index d09a2785..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/broker/RdBrokerBasicVO.java +++ /dev/null @@ -1,53 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd.broker; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zhongyuankai - * @date 2020/6/11 - */ -@ApiModel(description = "Broker基本信息") -public class RdBrokerBasicVO { - @ApiModelProperty(value = "brokerId") - private Integer brokerId; - - @ApiModelProperty(value = "主机名") - private String host; - - @ApiModelProperty(value = "逻辑集群") - private Long logicClusterId; - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public Long getLogicClusterId() { - return logicClusterId; - } - - public void setLogicClusterId(Long logicClusterId) { - this.logicClusterId = logicClusterId; - } - - @Override - public String toString() { - return "RdBrokerBasicVO{" + - "brokerId=" + brokerId + - ", host='" + host + '\'' + - ", logicClusterId=" + logicClusterId + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/ClusterBaseVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/ClusterBaseVO.java deleted file mode 100644 index 111304f1..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/ClusterBaseVO.java +++ /dev/null @@ -1,163 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.Date; - -/** - * @author zengqiao - * @date 20/4/16 - */ -@ApiModel(description="集群信息") -public class ClusterBaseVO { - @ApiModelProperty(value="集群Id") - private Long clusterId; - - @ApiModelProperty(value="集群名称") - private String clusterName; - - @ApiModelProperty(value="ZK地址") - private String zookeeper; - - @ApiModelProperty(value="bootstrap地址") - private String bootstrapServers; - - @ApiModelProperty(value="kafka版本") - private String kafkaVersion; - - @ApiModelProperty(value="数据中心") - private String idc; - - @ApiModelProperty(value="集群类型") - private Integer mode; - - @ApiModelProperty(value="Kafka安全配置") - private String securityProperties; - - @ApiModelProperty(value="Jmx配置") - private String jmxProperties; - - @ApiModelProperty(value="1:监控中, 0:暂停监控") - private Integer status; - - @ApiModelProperty(value="接入时间") - private Date gmtCreate; - - @ApiModelProperty(value="修改时间") - private Date gmtModify; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public String getClusterName() { - return clusterName; - } - - public void setClusterName(String clusterName) { - this.clusterName = clusterName; - } - - public String getZookeeper() { - return zookeeper; - } - - public void setZookeeper(String zookeeper) { - this.zookeeper = zookeeper; - } - - public String getBootstrapServers() { - return bootstrapServers; - } - - public void setBootstrapServers(String bootstrapServers) { - this.bootstrapServers = bootstrapServers; - } - - public String getKafkaVersion() { - return kafkaVersion; - } - - public void setKafkaVersion(String kafkaVersion) { - this.kafkaVersion = kafkaVersion; - } - - public String getIdc() { - return idc; - } - - public void setIdc(String idc) { - this.idc = idc; - } - - public Integer getMode() { - return mode; - } - - public void setMode(Integer mode) { - this.mode = mode; - } - - public String getSecurityProperties() { - return securityProperties; - } - - public void setSecurityProperties(String securityProperties) { - this.securityProperties = securityProperties; - } - - public String getJmxProperties() { - return jmxProperties; - } - - public void setJmxProperties(String jmxProperties) { - this.jmxProperties = jmxProperties; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - @Override - public String toString() { - return "ClusterBaseVO{" + - "clusterId=" + clusterId + - ", clusterName='" + clusterName + '\'' + - ", zookeeper='" + zookeeper + '\'' + - ", bootstrapServers='" + bootstrapServers + '\'' + - ", kafkaVersion='" + kafkaVersion + '\'' + - ", idc='" + idc + '\'' + - ", mode=" + mode + - ", securityProperties='" + securityProperties + '\'' + - ", jmxProperties='" + jmxProperties + '\'' + - ", status=" + status + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/ClusterBrokerStatusVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/ClusterBrokerStatusVO.java deleted file mode 100644 index a98ab766..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/ClusterBrokerStatusVO.java +++ /dev/null @@ -1,43 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/5/11 - */ -@ApiModel(description="集群Broker状态") -public class ClusterBrokerStatusVO { - @ApiModelProperty(value = "Broker副本同步状态: 总数, 已同步数, 未同步数") - private List brokerReplicaStatusList; - - @ApiModelProperty(value = "Broker峰值状态: 总数, 60-%, 60-80%, 80-100%, 100+%, 异常") - private List brokerBytesInStatusList; - - public List getBrokerReplicaStatusList() { - return brokerReplicaStatusList; - } - - public void setBrokerReplicaStatusList(List brokerReplicaStatusList) { - this.brokerReplicaStatusList = brokerReplicaStatusList; - } - - public List getBrokerBytesInStatusList() { - return brokerBytesInStatusList; - } - - public void setBrokerBytesInStatusList(List brokerBytesInStatusList) { - this.brokerBytesInStatusList = brokerBytesInStatusList; - } - - @Override - public String toString() { - return "ClusterBrokerStatusVO{" + - "brokerReplicaStatusList=" + brokerReplicaStatusList + - ", brokerBytesInStatusList=" + brokerBytesInStatusList + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/ClusterDetailVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/ClusterDetailVO.java deleted file mode 100644 index cdeb7da7..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/ClusterDetailVO.java +++ /dev/null @@ -1,77 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * @author zengqiao - * @date 20/4/23 - */ -@ApiModel(description="集群信息") -public class ClusterDetailVO extends ClusterBaseVO { - @ApiModelProperty(value="Broker数") - private Integer brokerNum; - - @ApiModelProperty(value="Topic数") - private Integer topicNum; - - @ApiModelProperty(value="ConsumerGroup数") - private Integer consumerGroupNum; - - @ApiModelProperty(value="ControllerID") - private Integer controllerId; - - @ApiModelProperty(value="Region数") - private Integer regionNum; - - public Integer getBrokerNum() { - return brokerNum; - } - - public void setBrokerNum(Integer brokerNum) { - this.brokerNum = brokerNum; - } - - public Integer getTopicNum() { - return topicNum; - } - - public void setTopicNum(Integer topicNum) { - this.topicNum = topicNum; - } - - public Integer getConsumerGroupNum() { - return consumerGroupNum; - } - - public void setConsumerGroupNum(Integer consumerGroupNum) { - this.consumerGroupNum = consumerGroupNum; - } - - public Integer getControllerId() { - return controllerId; - } - - public void setControllerId(Integer controllerId) { - this.controllerId = controllerId; - } - - public Integer getRegionNum() { - return regionNum; - } - - public void setRegionNum(Integer regionNum) { - this.regionNum = regionNum; - } - - @Override - public String toString() { - return "ClusterDetailVO{" + - "brokerNum=" + brokerNum + - ", topicNum=" + topicNum + - ", consumerGroupNum=" + consumerGroupNum + - ", controllerId=" + controllerId + - ", regionNum=" + regionNum + - "} " + super.toString(); - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/ControllerPreferredCandidateVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/ControllerPreferredCandidateVO.java deleted file mode 100644 index 399b35fb..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/ControllerPreferredCandidateVO.java +++ /dev/null @@ -1,61 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -@ApiModel(description = "Broker基本信息") -public class ControllerPreferredCandidateVO { - @ApiModelProperty(value = "brokerId") - private Integer brokerId; - - @ApiModelProperty(value = "主机名") - private String host; - - @ApiModelProperty(value = "启动时间") - private Long startTime; - - @ApiModelProperty(value = "broker状态[0:在线, -1:不在线]") - private Integer status; - - public Integer getBrokerId() { - return brokerId; - } - - public void setBrokerId(Integer brokerId) { - this.brokerId = brokerId; - } - - public String getHost() { - return host; - } - - public void setHost(String host) { - this.host = host; - } - - public Long getStartTime() { - return startTime; - } - - public void setStartTime(Long startTime) { - this.startTime = startTime; - } - - public Integer getStatus() { - return status; - } - - public void setStatus(Integer status) { - this.status = status; - } - - @Override - public String toString() { - return "ControllerPreferredBrokerVO{" + - "brokerId=" + brokerId + - ", host='" + host + '\'' + - ", startTime=" + startTime + - ", status=" + status + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/LogicalClusterVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/LogicalClusterVO.java deleted file mode 100644 index 61f9b90c..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/LogicalClusterVO.java +++ /dev/null @@ -1,140 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -import java.util.Date; -import java.util.List; - -/** - * @author zengqiao - * @date 20/6/29 - */ -@ApiModel(description = "逻辑集群") -public class LogicalClusterVO { - @ApiModelProperty(value = "逻辑集群ID") - protected Long logicalClusterId; - - @ApiModelProperty(value = "逻辑集群名称") - private String logicalClusterName; - - @ApiModelProperty(value = "逻辑集群标识") - private String logicalClusterIdentification; - - @ApiModelProperty(value = "物理集群ID") - private Long physicalClusterId; - - @ApiModelProperty(value = "brokerId列表") - private List regionIdList; - - @ApiModelProperty(value = "逻辑集群类型") - private Integer mode; - - @ApiModelProperty(value = "所属应用") - private String appId; - - @ApiModelProperty(value = "描述信息") - private String description; - - @ApiModelProperty(value = "创建时间") - private Date gmtCreate; - - @ApiModelProperty(value = "修改时间") - private Date gmtModify; - - public Long getLogicalClusterId() { - return logicalClusterId; - } - - public void setLogicalClusterId(Long logicalClusterId) { - this.logicalClusterId = logicalClusterId; - } - - public String getLogicalClusterName() { - return logicalClusterName; - } - - public void setLogicalClusterName(String logicalClusterName) { - this.logicalClusterName = logicalClusterName; - } - - public String getLogicalClusterIdentification() { - return logicalClusterIdentification; - } - - public void setLogicalClusterIdentification(String logicalClusterIdentification) { - this.logicalClusterIdentification = logicalClusterIdentification; - } - - public Long getPhysicalClusterId() { - return physicalClusterId; - } - - public void setPhysicalClusterId(Long physicalClusterId) { - this.physicalClusterId = physicalClusterId; - } - - public List getRegionIdList() { - return regionIdList; - } - - public void setRegionIdList(List regionIdList) { - this.regionIdList = regionIdList; - } - - public Integer getMode() { - return mode; - } - - public void setMode(Integer mode) { - this.mode = mode; - } - - public String getAppId() { - return appId; - } - - public void setAppId(String appId) { - this.appId = appId; - } - - public String getDescription() { - return description; - } - - public void setDescription(String description) { - this.description = description; - } - - public Date getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Date gmtCreate) { - this.gmtCreate = gmtCreate; - } - - public Date getGmtModify() { - return gmtModify; - } - - public void setGmtModify(Date gmtModify) { - this.gmtModify = gmtModify; - } - - @Override - public String toString() { - return "LogicalClusterVO{" + - "logicalClusterId=" + logicalClusterId + - ", logicalClusterName='" + logicalClusterName + '\'' + - ", logicalClusterIdentification='" + logicalClusterIdentification + '\'' + - ", physicalClusterId=" + physicalClusterId + - ", regionIdList=" + regionIdList + - ", mode=" + mode + - ", appId='" + appId + '\'' + - ", description='" + description + '\'' + - ", gmtCreate=" + gmtCreate + - ", gmtModify=" + gmtModify + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/RdClusterMetricsVO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/RdClusterMetricsVO.java deleted file mode 100644 index df15b96b..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/entity/vo/rd/cluster/RdClusterMetricsVO.java +++ /dev/null @@ -1,111 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.entity.vo.rd.cluster; - -import io.swagger.annotations.ApiModel; -import io.swagger.annotations.ApiModelProperty; - -/** - * ClusterMetricsVO - * @author zengqiao - * @date 19/4/3 - */ -@ApiModel(description="集群流量信息") -public class RdClusterMetricsVO { - @ApiModelProperty(value="集群Id") - private Long clusterId; - - @ApiModelProperty(value="Topic数量") - private Object topicNum; - - @ApiModelProperty(value="Partition数量") - private Object partitionNum; - - @ApiModelProperty(value="Broker数量") - private Object brokerNum; - - @ApiModelProperty(value="每秒流入的字节数") - private Object bytesInPerSec; - - @ApiModelProperty(value="每秒流出的字节数") - private Object bytesOutPerSec; - - @ApiModelProperty(value="每秒拒绝的字节数") - private Object bytesRejectedPerSec; - - @ApiModelProperty(value="每秒流入的消息数") - private Object messagesInPerSec; - - @ApiModelProperty(value="创建时间") - private Long gmtCreate; - - public Long getClusterId() { - return clusterId; - } - - public void setClusterId(Long clusterId) { - this.clusterId = clusterId; - } - - public Object getTopicNum() { - return topicNum; - } - - public void setTopicNum(Object topicNum) { - this.topicNum = topicNum; - } - - public Object getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(Object partitionNum) { - this.partitionNum = partitionNum; - } - - public Object getBrokerNum() { - return brokerNum; - } - - public void setBrokerNum(Object brokerNum) { - this.brokerNum = brokerNum; - } - - public Object getBytesInPerSec() { - return bytesInPerSec; - } - - public void setBytesInPerSec(Object bytesInPerSec) { - this.bytesInPerSec = bytesInPerSec; - } - - public Object getBytesOutPerSec() { - return bytesOutPerSec; - } - - public void setBytesOutPerSec(Object bytesOutPerSec) { - this.bytesOutPerSec = bytesOutPerSec; - } - - public Object getBytesRejectedPerSec() { - return bytesRejectedPerSec; - } - - public void setBytesRejectedPerSec(Object bytesRejectedPerSec) { - this.bytesRejectedPerSec = bytesRejectedPerSec; - } - - public Object getMessagesInPerSec() { - return messagesInPerSec; - } - - public void setMessagesInPerSec(Object messagesInPerSec) { - this.messagesInPerSec = messagesInPerSec; - } - - public Long getGmtCreate() { - return gmtCreate; - } - - public void setGmtCreate(Long gmtCreate) { - this.gmtCreate = gmtCreate; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/ConsumerMetricsCollectedEvent.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/ConsumerMetricsCollectedEvent.java deleted file mode 100644 index 9aa45d8f..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/ConsumerMetricsCollectedEvent.java +++ /dev/null @@ -1,23 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.events; - -import com.xiaojukeji.kafka.manager.common.entity.metrics.ConsumerMetrics; -import org.springframework.context.ApplicationEvent; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/8/31 - */ -public class ConsumerMetricsCollectedEvent extends ApplicationEvent { - private List metricsList; - - public ConsumerMetricsCollectedEvent(Object source, List metricsList) { - super(source); - this.metricsList = metricsList; - } - - public List getMetricsList() { - return metricsList; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/OrderApplyEvent.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/OrderApplyEvent.java deleted file mode 100644 index a71fa88f..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/OrderApplyEvent.java +++ /dev/null @@ -1,13 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.events; - -import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO; - -/** - * @author zengqiao - * @date 20/09/03 - */ -public class OrderApplyEvent extends OrderEvent { - public OrderApplyEvent(Object source, OrderDO orderDO, String idc) { - super(source, orderDO, idc); - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/OrderEvent.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/OrderEvent.java deleted file mode 100644 index ef3311ee..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/OrderEvent.java +++ /dev/null @@ -1,28 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.events; - -import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO; -import org.springframework.context.ApplicationEvent; - -/** - * @author zengqiao - * @date 20/8/27 - */ -public abstract class OrderEvent extends ApplicationEvent { - private OrderDO orderDO; - - private String idc; - - public OrderEvent(Object source, OrderDO orderDO, String idc) { - super(source); - this.orderDO = orderDO; - this.idc = idc; - } - - public OrderDO getOrderDO() { - return orderDO; - } - - public String getIdc() { - return idc; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/OrderPassedEvent.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/OrderPassedEvent.java deleted file mode 100644 index 21af0821..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/OrderPassedEvent.java +++ /dev/null @@ -1,16 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.events; - -import com.xiaojukeji.kafka.manager.common.entity.ao.account.Account; -import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/09/03 - */ -public class OrderPassedEvent extends OrderEvent { - public OrderPassedEvent(Object source, OrderDO orderDO, String idc, List accountList) { - super(source, orderDO, idc); - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/OrderRefusedEvent.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/OrderRefusedEvent.java deleted file mode 100644 index 4066915f..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/OrderRefusedEvent.java +++ /dev/null @@ -1,16 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.events; - -import com.xiaojukeji.kafka.manager.common.entity.ao.account.Account; -import com.xiaojukeji.kafka.manager.common.entity.pojo.OrderDO; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/09/03 - */ -public class OrderRefusedEvent extends OrderEvent { - public OrderRefusedEvent(Object source, OrderDO orderDO, String idc, List accountList) { - super(source, orderDO, idc); - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/RegionCreatedEvent.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/RegionCreatedEvent.java deleted file mode 100644 index b8d72de9..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/RegionCreatedEvent.java +++ /dev/null @@ -1,20 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.events; - -import com.xiaojukeji.kafka.manager.common.entity.pojo.RegionDO; -import lombok.Getter; -import org.springframework.context.ApplicationEvent; - -/** - * Region创建事件 - * @author zengqiao - * @date 22/01/1 - */ -@Getter -public class RegionCreatedEvent extends ApplicationEvent { - private final RegionDO regionDO; - - public RegionCreatedEvent(Object source, RegionDO regionDO) { - super(source); - this.regionDO = regionDO; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/TopicMetricsCollectedEvent.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/TopicMetricsCollectedEvent.java deleted file mode 100644 index 4ed78ee5..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/TopicMetricsCollectedEvent.java +++ /dev/null @@ -1,30 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.events; - -import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics; -import org.springframework.context.ApplicationEvent; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/8/31 - */ -public class TopicMetricsCollectedEvent extends ApplicationEvent { - private Long clusterId; - - private List metricsList; - - public TopicMetricsCollectedEvent(Object source, Long clusterId, List metricsList) { - super(source); - this.clusterId = clusterId; - this.metricsList = metricsList; - } - - public List getMetricsList() { - return metricsList; - } - - public Long getClusterId() { - return clusterId; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/metrics/BaseMetricsCollectedEvent.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/metrics/BaseMetricsCollectedEvent.java deleted file mode 100644 index 730e14c9..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/metrics/BaseMetricsCollectedEvent.java +++ /dev/null @@ -1,33 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.events.metrics; - -import org.springframework.context.ApplicationEvent; - -/** - * @author zengqiao - * @date 22/01/17 - */ -public class BaseMetricsCollectedEvent extends ApplicationEvent { - /** - * 物理集群ID - */ - protected final Long physicalClusterId; - - /** - * 收集时间,依据业务需要来设置,可以设置任务开始时间,也可以设置任务结束时间 - */ - protected final Long collectTime; - - public BaseMetricsCollectedEvent(Object source, Long physicalClusterId, Long collectTime) { - super(source); - this.physicalClusterId = physicalClusterId; - this.collectTime = collectTime; - } - - public Long getPhysicalClusterId() { - return physicalClusterId; - } - - public Long getCollectTime() { - return collectTime; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/metrics/BatchBrokerMetricsCollectedEvent.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/metrics/BatchBrokerMetricsCollectedEvent.java deleted file mode 100644 index 629a44ea..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/events/metrics/BatchBrokerMetricsCollectedEvent.java +++ /dev/null @@ -1,22 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.events.metrics; - -import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/8/31 - */ -public class BatchBrokerMetricsCollectedEvent extends BaseMetricsCollectedEvent { - private final List metricsList; - - public BatchBrokerMetricsCollectedEvent(Object source, Long physicalClusterId, Long collectTime, List metricsList) { - super(source, physicalClusterId, collectTime); - this.metricsList = metricsList; - } - - public List getMetricsList() { - return metricsList; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/exception/CopyException.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/exception/CopyException.java deleted file mode 100644 index 14260879..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/exception/CopyException.java +++ /dev/null @@ -1,25 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.exception; - -/** - * @author huangyiminghappy@163.com - * @date 2019/3/15 - */ -public class CopyException extends RuntimeException { - private final static long serialVersionUID = 1L; - - public CopyException(String message) { - super(message); - } - - public CopyException(String message, Throwable cause) { - super(message, cause); - } - - public CopyException(Throwable cause) { - super(cause); - } - - public CopyException() { - super(); - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/BackoffUtils.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/BackoffUtils.java deleted file mode 100644 index afbf8fc4..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/BackoffUtils.java +++ /dev/null @@ -1,75 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -public class BackoffUtils { - private BackoffUtils() { - } - - /** - * 需要进行回退的事件信息 - * <回退事件名,回退结束时间> - */ - private static final Map NEED_BACK_OFF_EVENT_MAP = new ConcurrentHashMap<>(); - - public static void backoff(long timeUnitMs) { - if (timeUnitMs <= 0) { - return; - } - - try { - Thread.sleep(timeUnitMs); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } catch (Exception e) { - // ignore - } - } - - /** - * 记录回退设置 - * @param backoffEventKey 回退事件key - * @param backoffTimeUnitMs 回退时间(ms) - */ - public static void putNeedBackoffEvent(String backoffEventKey, Long backoffTimeUnitMs) { - if (backoffEventKey == null || backoffTimeUnitMs == null || backoffTimeUnitMs <= 0) { - return; - } - - NEED_BACK_OFF_EVENT_MAP.put(backoffEventKey, backoffTimeUnitMs + System.currentTimeMillis()); - } - - /** - * 移除回退设置 - * @param backoffEventKey 回退事件key - */ - public static void removeNeedBackoffEvent(String backoffEventKey) { - NEED_BACK_OFF_EVENT_MAP.remove(backoffEventKey); - } - - /** - * 检查是否需要回退 - * @param backoffEventKey 回退事件key - * @return - */ - public static boolean isNeedBackoff(String backoffEventKey) { - Long backoffEventEndTimeUnitMs = NEED_BACK_OFF_EVENT_MAP.get(backoffEventKey); - if (backoffEventEndTimeUnitMs == null) { - return false; - } - - if (backoffEventEndTimeUnitMs > System.currentTimeMillis()) { - return true; - } - - // 移除 - try { - NEED_BACK_OFF_EVENT_MAP.remove(backoffEventKey, backoffEventEndTimeUnitMs); - } catch (Exception e) { - // 如果key不存在,这里可能出现NPE,不过不管什么异常都可以忽略 - } - - return false; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/CopyUtils.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/CopyUtils.java deleted file mode 100644 index bef175e4..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/CopyUtils.java +++ /dev/null @@ -1,482 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - -import com.xiaojukeji.kafka.manager.common.exception.CopyException; -import org.apache.commons.beanutils.PropertyUtils; - -import java.beans.PropertyDescriptor; -import java.lang.reflect.Constructor; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; - -/** - * 对象复制新类型和同类型深度克隆工具类 - * @author huangyiminghappy@163.com - * @date 2019/3/15 - */ -public class CopyUtils { - - @SuppressWarnings({"unchecked", "rawtypes"}) - private static T deepCopy(T obj) { - if (obj == null) { - return null; - } else if (obj instanceof String) { - return (T)(String) obj; - } else if (obj instanceof Integer) { - return (T)(Integer) obj; - } else if (obj instanceof Double) { - return (T)(Double) obj; - } else if (obj instanceof Byte) { - return (T)(Byte) obj; - } else if (obj instanceof Short) { - return (T)(Short) obj; - } else if (obj instanceof Long) { - return (T)(Long) obj; - } else if (obj instanceof Float) { - return (T)(Float) obj; - } else if (obj instanceof Character) { - return (T)(Character) obj; - } else if (obj instanceof Boolean) { - return (T)(Boolean) obj; - } else if (obj instanceof ArrayList) { - return (T) arrayListHandler((ArrayList) obj); - } else if (obj instanceof HashMap) { - return (T) mapHandler((Map) obj); - } else if (obj instanceof ConcurrentHashMap) { - return (T) concurrentMapHandler((Map) obj); - } else if (obj instanceof TreeMap) { - return (T) treeMapHandler((Map) obj); - } else if (obj instanceof LinkedList) { - return (T) linkedListHandler((LinkedList) obj); - } else if (obj instanceof HashSet) { - return (T) hashSetHandler((HashSet) obj); - } else if (isPrimitiveArray(obj)) { - return getPrimitiveArray(obj); - } - - T finObj = null; - Class rezClass = obj.getClass(); - rezClass.cast(finObj); - try { - Constructor constructor = getCompleteConstructor(rezClass); - finObj = (T) constructor.newInstance(getParamsObjForConstructor(rezClass)); - copyFields(rezClass, obj, finObj); - } catch (Exception e) { - e.printStackTrace(); - } - return finObj; - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - private static T deepCopy(T obj, Object parrent) { - if (obj == null) { - return null; - } else if (obj instanceof String) { - return (T)String.valueOf((String) obj); - } else if (obj instanceof Integer) { - return (T)Integer.valueOf((Integer) obj); - } else if (obj instanceof Double) { - return (T)Double.valueOf((Double) obj); - } else if (obj instanceof Byte) { - return (T)Byte.valueOf((Byte) obj); - } else if (obj instanceof Short) { - return (T)Short.valueOf((Short) obj); - } else if (obj instanceof Long) { - return (T)Long.valueOf((Long) obj); - } else if (obj instanceof Float) { - return (T)Float.valueOf((Float) obj); - } else if (obj instanceof Character) { - return (T)Character.valueOf((Character) obj); - } else if (obj instanceof Boolean) { - return (T)Boolean.valueOf((Boolean) obj); - } else if (obj instanceof ArrayList) { - return (T) arrayListHandler((ArrayList) obj); - } else if (obj instanceof HashMap) { - return (T) mapHandler((Map) obj); - } else if (obj instanceof ConcurrentHashMap) { - return (T) concurrentMapHandler((Map) obj); - } else if (obj instanceof TreeMap) { - return (T) treeMapHandler((Map) obj); - } else if (obj instanceof LinkedList) { - return (T) linkedListHandler((LinkedList) obj); - } else if (obj instanceof HashSet) { - return (T) hashSetHandler((HashSet) obj); - } else if (isPrimitiveArray(obj)) { - return getPrimitiveArray(obj); - } - - T finObj = null; - Class rezClass = obj.getClass(); - rezClass.cast(finObj); - try { - Constructor constructor = getCompleteConstructor(rezClass); - finObj = (T) constructor.newInstance(getParamsObjForConstructor(rezClass)); - copyFields(rezClass, obj, finObj, parrent); - } catch (Exception e) { - e.printStackTrace(); - } - return finObj; - } - - - @SuppressWarnings({"rawtypes", "unchecked"}) - private static ArrayList arrayListHandler(ArrayList obj) { - ArrayList srcList = obj; - ArrayList finList = new ArrayList(); - for (int i = 0; i < srcList.size(); i++) { - finList.add(CopyUtils.deepCopy(srcList.get(i))); - } - return finList; - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - private static Map mapHandler(Map obj) { - Map src = obj; - Map fin = new HashMap(); - for (Map.Entry entry : src.entrySet()) { - K key = (K) CopyUtils.deepCopy(entry.getKey()); - V value = (V) CopyUtils.deepCopy(entry.getValue()); - fin.put(key, value); - } - return fin; - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - private static Map concurrentMapHandler(Map obj) { - Map src = obj; - Map fin = new ConcurrentHashMap(); - for (Map.Entry entry : src.entrySet()) { - K key = (K) CopyUtils.deepCopy(entry.getKey()); - V value = (V) CopyUtils.deepCopy(entry.getValue()); - fin.put(key, value); - } - return fin; - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - private static Map treeMapHandler(Map obj) { - Map src = obj; - Map fin = new TreeMap(); - for (Map.Entry entry : src.entrySet()) { - K key = (K) CopyUtils.deepCopy(entry.getKey()); - V value = (V) CopyUtils.deepCopy(entry.getValue()); - fin.put(key, value); - } - return fin; - } - - @SuppressWarnings({"rawtypes", "unchecked"}) - private static LinkedList linkedListHandler(LinkedList obj) { - LinkedList srcList = obj; - LinkedList finList = new LinkedList<>(); - for (int i = 0; i < srcList.size(); i++) { - finList.add(CopyUtils.deepCopy(srcList.get(i))); - } - return finList; - } - - @SuppressWarnings({"rawtypes", "unchecked"}) - private static HashSet hashSetHandler(HashSet obj) { - HashSet srcList = obj; - HashSet finList = new HashSet<>(); - for (Object o : srcList) { - finList.add(CopyUtils.deepCopy(o)); - } - return finList; - } - - - private static boolean isPrimitiveArray(Object obj) { - if (obj instanceof byte[] || - obj instanceof short[] || - obj instanceof int[] || - obj instanceof long[] || - obj instanceof float[] || - obj instanceof double[] || - obj instanceof char[] || - obj instanceof boolean[]) { - return true; - } else { - return false; - } - } - - private static boolean isPrimitiveArray(String type) { - if ("byte[]".equals(type) || - "short[]".equals(type) || - "int[]".equals(type) || - "long[]".equals(type) || - "float[]".equals(type) || - "double[]".equals(type) || - "char[]".equals(type) || - "boolean[]".equals(type)) { - return true; - } else { - return false; - } - } - - @SuppressWarnings("unchecked") - private static T getPrimitiveArray(T obj) { - if (obj instanceof int[]) { - int[] arr = new int[((int[]) obj).length]; - for (int i = 0; i < ((int[]) obj).length; i++) { - arr[i] = ((int[]) obj)[i]; - } - return (T) arr; - } else if (obj instanceof byte[]) { - byte[] arr = new byte[((byte[]) obj).length]; - for (int i = 0; i < ((byte[]) obj).length; i++) { - arr[i] = ((byte[]) obj)[i]; - } - return (T) arr; - } else if (obj instanceof short[]) { - short[] arr = new short[((short[]) obj).length]; - for (int i = 0; i < ((short[]) obj).length; i++) { - arr[i] = ((short[]) obj)[i]; - } - return (T) arr; - } else if (obj instanceof long[]) { - long[] arr = new long[((long[]) obj).length]; - for (int i = 0; i < ((long[]) obj).length; i++) { - arr[i] = ((long[]) obj)[i]; - } - return (T) arr; - } else if (obj instanceof float[]) { - float[] arr = new float[((float[]) obj).length]; - for (int i = 0; i < ((float[]) obj).length; i++) { - arr[i] = ((float[]) obj)[i]; - } - return (T) arr; - } else if (obj instanceof double[]) { - double[] arr = new double[((double[]) obj).length]; - for (int i = 0; i < ((double[]) obj).length; i++) { - arr[i] = ((double[]) obj)[i]; - } - return (T) arr; - } else if (obj instanceof char[]) { - char[] arr = new char[((char[]) obj).length]; - for (int i = 0; i < ((char[]) obj).length; i++) { - arr[i] = ((char[]) obj)[i]; - } - return (T) arr; - } else if (obj instanceof boolean[]) { - boolean[] arr = new boolean[((boolean[]) obj).length]; - for (int i = 0; i < ((boolean[]) obj).length; i++) { - arr[i] = ((boolean[]) obj)[i]; - } - return (T) arr; - } - return null; - } - - @SuppressWarnings("unchecked") - private static T getPrimitiveArray(T obj, String type) { - if ("int[]".equals(type)) { - int[] arr = new int[1]; - arr[0] = 0; - return (T) arr; - } else if ("byte[]".equals(type)) { - byte[] arr = new byte[1]; - arr[0] = 0; - return (T) arr; - } else if ("short[]".equals(type)) { - short[] arr = new short[1]; - arr[0] = 0; - return (T) arr; - } else if ("long[]".equals(type)) { - long[] arr = new long[1]; - arr[0] = 0; - return (T) arr; - } else if ("float[]".equals(type)) { - float[] arr = new float[1]; - arr[0] = 0; - return (T) arr; - } else if ("double[]".equals(type)) { - double[] arr = new double[1]; - arr[0] = 0; - return (T) arr; - } else if ("char[]".equals(type)) { - char[] arr = new char[1]; - arr[0] = 0; - return (T) arr; - } else if ("boolean[]".equals(type)) { - boolean[] arr = new boolean[1]; - arr[0] = false; - return (T) arr; - } - return null; - } - - - @SuppressWarnings({"unchecked", "rawtypes"}) - private static Constructor getCompleteConstructor(Class ourClass) - throws NoSuchMethodException, SecurityException { - Constructor constructor = null; - Class[] params = new Class[ourClass.getDeclaredConstructors()[0].getParameterTypes().length]; - for (int i = 0; i < ourClass.getDeclaredConstructors()[0].getParameterTypes().length; i++) { - params[i] = ourClass.getDeclaredConstructors()[0].getParameterTypes()[i]; - } - constructor = ourClass.getConstructor(params); - constructor.setAccessible(true); - return constructor; - } - - @SuppressWarnings("rawtypes") - private static Object[] getParamsObjForConstructor(Class ourClass) - throws NoSuchMethodException, SecurityException { - Constructor constuctor = null; - constuctor = ourClass.getDeclaredConstructors()[0]; - constuctor.setAccessible(true); - Object[] objParams = new Object[constuctor.getParameterTypes().length]; - for (int i = 0; i < constuctor.getParameterTypes().length; i++) { - String fieldType = constuctor.getParameterTypes()[i].toString(); - if ("int".equalsIgnoreCase(fieldType) || - "double".toString().equalsIgnoreCase(fieldType) || - "float".equalsIgnoreCase(fieldType) || - "byte".toString().equalsIgnoreCase(fieldType) || - "char".equalsIgnoreCase(fieldType) || - "long".equalsIgnoreCase(fieldType)) { - objParams[i] = 0; - } else if ("boolean".equalsIgnoreCase(fieldType)) { - objParams[i] = false; - } else if (isPrimitiveArray(constuctor.getParameterTypes()[i].getCanonicalName())) { - objParams[i] = getPrimitiveArray(constuctor.getParameterTypes()[i], - constuctor.getParameterTypes()[i].getCanonicalName() - ); - } else { - objParams[i] = null; - } - } - return objParams; - } - - @SuppressWarnings("rawtypes") - private static void copyFields(Class ourClass, T srcObj, T finObj) - throws IllegalArgumentException, IllegalAccessException, NoSuchFieldException, SecurityException { - Field[] fields = ourClass.getDeclaredFields(); - for (int i = 0; i < fields.length; i++) { - fields[i].setAccessible(true); - Field modField = Field.class.getDeclaredField("modifiers"); - modField.setAccessible(true); - modField.setInt(fields[i], fields[i].getModifiers() & ~Modifier.FINAL); - String fieldType = fields[i].getType().toString(); - if ("int".equalsIgnoreCase(fieldType) || - "double".equalsIgnoreCase(fieldType) || - "float".equalsIgnoreCase(fieldType) || - "byte".equalsIgnoreCase(fieldType) || - "char".equalsIgnoreCase(fieldType) || - "boolean".equalsIgnoreCase(fieldType) || - "short".equalsIgnoreCase(fieldType) || - "long".equalsIgnoreCase(fieldType)) { - fields[i].set(finObj, fields[i].get(srcObj)); - } else { - fields[i].set(finObj, CopyUtils.deepCopy(fields[i].get(srcObj), finObj)); - } - } - } - - @SuppressWarnings("rawtypes") - private static void copyFields(Class ourClass, T srcObj, T finObj, Object parent) - throws IllegalArgumentException, IllegalAccessException, NoSuchFieldException, SecurityException { - Field[] fields = ourClass.getDeclaredFields(); - for (int i = 0; i < fields.length; i++) { - fields[i].setAccessible(true); - Field modField = Field.class.getDeclaredField("modifiers"); - modField.setAccessible(true); - modField.setInt(fields[i], fields[i].getModifiers() & ~Modifier.FINAL); - String fieldType = fields[i].getType().toString(); - if ("int".equalsIgnoreCase(fieldType) || - "double".equalsIgnoreCase(fieldType) || - "float".equalsIgnoreCase(fieldType) || - "byte".equalsIgnoreCase(fieldType) || - "char".equalsIgnoreCase(fieldType) || - "boolean".equalsIgnoreCase(fieldType) || - "short".equalsIgnoreCase(fieldType) || - "long".equalsIgnoreCase(fieldType)) { - fields[i].set(finObj, fields[i].get(srcObj)); - } else { - if (fields[i].get(srcObj).toString().equals(parent.toString())) { - fields[i].set(finObj, fields[i].get(srcObj)); - } else { - fields[i].set(finObj, CopyUtils.deepCopy(fields[i].get(srcObj), finObj)); - } - } - } - } - - static void setFinalStaticField(Field field, Object newValue) throws Exception { - field.setAccessible(true); - Field modifiersField = Field.class.getDeclaredField("modifiers"); - modifiersField.setAccessible(true); - modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); - field.set(null, newValue); - } - - public static Object copyProperties(Object target, Object orig) { - if (target == null || orig == null) { - return target; - } - - PropertyDescriptor[] destDesc = PropertyUtils.getPropertyDescriptors(target); - try { - for (int i = 0; i < destDesc.length; i++) { - Class destType = destDesc[i].getPropertyType(); - Class origType = PropertyUtils.getPropertyType(orig, destDesc[i].getName()); - if (destType != null && destType.equals(origType) && !destType.equals(Class.class)) { - if (!Collection.class.isAssignableFrom(origType)) { - try { - Object value = PropertyUtils.getProperty(orig, destDesc[i].getName()); - PropertyUtils.setProperty(target, destDesc[i].getName(), value); - } catch (Exception ex) { - } - } - } - } - - return target; - } catch (Exception ex) { - throw new CopyException(ex); - } - } - - public static Object copyProperties(Object dest, Object orig, String[] ignores) { - if (dest == null || orig == null) { - return dest; - } - - PropertyDescriptor[] destDesc = PropertyUtils.getPropertyDescriptors(dest); - try { - for (int i = 0; i < destDesc.length; i++) { - if (contains(ignores, destDesc[i].getName())) { - continue; - } - - Class destType = destDesc[i].getPropertyType(); - Class origType = PropertyUtils.getPropertyType(orig, destDesc[i].getName()); - if (destType != null && destType.equals(origType) && !destType.equals(Class.class)) { - if (!Collection.class.isAssignableFrom(origType)) { - Object value = PropertyUtils.getProperty(orig, destDesc[i].getName()); - PropertyUtils.setProperty(dest, destDesc[i].getName(), value); - } - } - } - - return dest; - } catch (Exception ex) { - throw new CopyException(ex); - } - } - - static boolean contains(String[] ignores, String name) { - boolean ignored = false; - for (int j = 0; ignores != null && j < ignores.length; j++) { - if (ignores[j].equals(name)) { - ignored = true; - break; - } - } - return ignored; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/DateUtils.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/DateUtils.java deleted file mode 100644 index d4fa72a4..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/DateUtils.java +++ /dev/null @@ -1,91 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - -import java.text.SimpleDateFormat; -import java.util.Calendar; -import java.util.Date; - -/** - * 日期工具 - * @author huangyiminghappy@163.com - * @date 2019-03-20 - */ -public class DateUtils { - public static Date long2Date(Long time){ - return new Date(time); - } - - /** - * 获取nDay的起始时间 - */ - public static Long getDayStarTime(int nDay) { - Calendar calendar = Calendar.getInstance(); - calendar.add(Calendar.DAY_OF_MONTH, nDay); - calendar.set(Calendar.HOUR_OF_DAY, 0); - calendar.set(Calendar.MINUTE, 0); - calendar.set(Calendar.SECOND, 0); - calendar.set(Calendar.MILLISECOND, 0); - return calendar.getTime().getTime(); - } - - /** - * 任意日期所在月的第一天的起始时间 - * @param date 任意日期 - * @author zengqiao - * @date 19/10/30 - * @return java.util.Date - */ - public static Date getMonthStartTime(Date date) { - Calendar calendar = Calendar.getInstance(); - calendar.setTime(date); - calendar.set( - calendar.get(Calendar.YEAR), - calendar.get(Calendar.MONTH), - calendar.get(Calendar.DAY_OF_MONTH), - 0, - 0, - 0 - ); - calendar.set(Calendar.DAY_OF_MONTH, calendar.getActualMinimum(Calendar.DAY_OF_MONTH)); - return calendar.getTime(); - } - - /** - * 任意日期所在月的最后一天的最后时间 - * @param date 任意日期 - * @author zengqiao - * @date 19/10/30 - * @return java.util.Date - */ - public static Date getMonthEndTime(Date date) { - Calendar calendar = Calendar.getInstance(); - calendar.setTime(date); - calendar.set( - calendar.get(Calendar.YEAR), - calendar.get(Calendar.MONTH) + 1, - calendar.get(Calendar.DAY_OF_MONTH), - 0, - 0, - 0 - ); - calendar.set(Calendar.MILLISECOND, calendar.getActualMinimum(Calendar.MILLISECOND)); - calendar.set(Calendar.DAY_OF_MONTH, calendar.getActualMinimum(Calendar.DAY_OF_MONTH)); - calendar.add(Calendar.MILLISECOND, -1); - return calendar.getTime(); - } - - public static String getFormattedDate(Date date) { - return new SimpleDateFormat("yyyy-MM-dd").format(date.getTime()); - } - - public static String getFormattedDate(Date date, String format) { - return new SimpleDateFormat(format).format(date.getTime()); - } - - public static String getFormattedDate(Long timestamp) { - return new SimpleDateFormat("yyyy-MM-dd").format(timestamp); - } - - public static Integer compare(Date t1, Date t2){ - return Long.compare(t1.getTime(), t2.getTime()); - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/EasyApiLimitUtils.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/EasyApiLimitUtils.java deleted file mode 100644 index 8589c95a..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/EasyApiLimitUtils.java +++ /dev/null @@ -1,69 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicLong; - -/** - * 宽松API请求限制工具 - * @author zengqiao - * @date 20/7/23 - */ -public class EasyApiLimitUtils { - private static final Long MAX_LIMIT_NUM = 10000L; - - private static final Long DEFAULT_DURATION_TIME = 24 * 60 * 60 * 1000L; - - private static final Map API_RECORD_MAP = new ConcurrentHashMap<>(); - - public static boolean incIfNotOverFlow(String key, long maxNum) { - AtomicLong atomicLong = API_RECORD_MAP.get(key); - if (atomicLong == null) { - API_RECORD_MAP.putIfAbsent(key, new AtomicLong(System.currentTimeMillis() * MAX_LIMIT_NUM)); - } - - while (true) { - atomicLong = API_RECORD_MAP.get(key); - - long value = atomicLong.longValue(); - long timestamp = value / MAX_LIMIT_NUM; - long presentNum = value % MAX_LIMIT_NUM; - if (System.currentTimeMillis() - timestamp < DEFAULT_DURATION_TIME && presentNum > maxNum) { - // 以及超过限制了 - return false; - } - - long newValue = System.currentTimeMillis() * MAX_LIMIT_NUM + 1; - if (System.currentTimeMillis() - timestamp < DEFAULT_DURATION_TIME && presentNum <= maxNum) { - newValue = timestamp * MAX_LIMIT_NUM + presentNum + 1; - } - - if (atomicLong.compareAndSet(value, newValue)) { - return true; - } - } - } - - public static void decIfNotOverFlow(String key) { - AtomicLong atomicLong = API_RECORD_MAP.get(key); - if (atomicLong == null) { - API_RECORD_MAP.putIfAbsent(key, new AtomicLong(System.currentTimeMillis() * MAX_LIMIT_NUM)); - } - - while (true) { - atomicLong = API_RECORD_MAP.get(key); - - long value = atomicLong.longValue(); - long timestamp = value / MAX_LIMIT_NUM; - long presentNum = value % MAX_LIMIT_NUM; - if (presentNum == 0) { - return; - } - - long newValue = timestamp * MAX_LIMIT_NUM + presentNum - 1; - if (atomicLong.compareAndSet(value, newValue)) { - return; - } - } - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/EncryptUtil.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/EncryptUtil.java deleted file mode 100644 index 1052eb12..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/EncryptUtil.java +++ /dev/null @@ -1,39 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - -import java.security.MessageDigest; - -/** - * @author zengqiao - * @date 20/3/17 - */ -public class EncryptUtil { - private static final char[] HEX_DIGITS = { - '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' - }; - - public static String md5(String key) { - return md5(key.getBytes()); - } - - public static String md5(byte[] btInput) { - try { - MessageDigest mdInst = MessageDigest.getInstance("MD5"); - - // 使用指定的字节更新摘要 - mdInst.update(btInput); - - // 获得密文 - byte[] md = mdInst.digest(); - - // 把密文转换成十六进制的字符串形式 - char[] str = new char[md.length * 2]; - for (int i = 0, k = 0; i < md.length; i++) { - str[k++] = HEX_DIGITS[md[i] >>> 4 & 0xf]; - str[k++] = HEX_DIGITS[md[i] & 0xf]; - } - return new String(str); - } catch (Exception e) { - return null; - } - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/FutureUtil.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/FutureUtil.java deleted file mode 100644 index b061ebed..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/FutureUtil.java +++ /dev/null @@ -1,150 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - -import com.xiaojukeji.kafka.manager.common.entity.ao.common.FutureTaskDelayQueueData; -import com.xiaojukeji.kafka.manager.common.utils.factory.DefaultThreadFactory; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.*; - -/** - * Future工具类 - */ -public class FutureUtil { - private static final Logger LOGGER = LoggerFactory.getLogger(FutureUtil.class); - - private ThreadPoolExecutor executor; - - private Map>> futuresMap; - - private FutureUtil() { - } - - public static FutureUtil init(String name, int corePoolSize, int maxPoolSize, int queueSize) { - FutureUtil futureUtil = new FutureUtil<>(); - - futureUtil.executor = new ThreadPoolExecutor( - corePoolSize, - maxPoolSize, - 3000, - TimeUnit.MILLISECONDS, - new LinkedBlockingDeque<>(queueSize), - new DefaultThreadFactory("KM-FutureUtil-" + name), - new ThreadPoolExecutor.DiscardOldestPolicy() //对拒绝任务不抛弃,而是抛弃队列里面等待最久的一个线程,然后把拒绝任务加到队列。 - ); - - futureUtil.futuresMap = new ConcurrentHashMap<>(); - return futureUtil; - } - - /** - * 必须配合 waitExecute使用 否则容易会撑爆内存 - */ - public FutureUtil runnableTask(String taskName, Integer timeoutUnisMs, Callable callable) { - Long currentThreadId = Thread.currentThread().getId(); - - futuresMap.putIfAbsent(currentThreadId, new DelayQueue<>()); - - DelayQueue> delayQueueData = futuresMap.get(currentThreadId); - - delayQueueData.put(new FutureTaskDelayQueueData<>(taskName, executor.submit(callable), timeoutUnisMs + System.currentTimeMillis())); - - return this; - } - - public FutureUtil runnableTask(String taskName, Integer timeoutUnisMs, Runnable runnable) { - Long currentThreadId = Thread.currentThread().getId(); - - futuresMap.putIfAbsent(currentThreadId, new DelayQueue<>()); - - DelayQueue> delayQueueData = futuresMap.get(currentThreadId); - - delayQueueData.put(new FutureTaskDelayQueueData(taskName, (Future) executor.submit(runnable), timeoutUnisMs + System.currentTimeMillis())); - - return this; - } - - public void waitExecute() { - this.waitResult(); - } - - public void waitExecute(Integer stepWaitTimeUnitMs) { - this.waitResult(stepWaitTimeUnitMs); - } - - public List waitResult() { - return waitResult(null); - } - - /** - * 等待结果 - * @param stepWaitTimeUnitMs 超时时间达到后,没有完成时,继续等待的时间 - */ - public List waitResult(Integer stepWaitTimeUnitMs) { - Long currentThreadId = Thread.currentThread().getId(); - - DelayQueue> delayQueueData = futuresMap.remove(currentThreadId); - if(delayQueueData == null || delayQueueData.isEmpty()) { - return new ArrayList<>(); - } - - List resultList = new ArrayList<>(); - while (!delayQueueData.isEmpty()) { - try { - // 不进行阻塞,直接获取第一个任务 - FutureTaskDelayQueueData queueData = delayQueueData.peek(); - if (queueData.getFutureTask().isDone()) { - // 如果第一个已经完成了,则移除掉第一个,然后获取其result - delayQueueData.remove(queueData); - resultList.add(queueData.getFutureTask().get()); - continue; - } - - // 如果第一个未完成,则阻塞10ms,判断是否达到超时时间了。 - // 这里的10ms不建议设置较大,因为任务可能在这段时间内完成了,此时如果设置的较大,会导致迟迟不能返回,从而影响接口调用的性能 - queueData = delayQueueData.poll(10, TimeUnit.MILLISECONDS); - if (queueData == null) { - continue; - } - - // 在到达超时时间后,任务没有完成,但是没有完成的原因可能是因为任务一直处于等待状态导致的。 - // 因此这里再给一段补充时间,看这段时间内是否可以完成任务。 - stepWaitResult(queueData, stepWaitTimeUnitMs); - - // 达到超时时间 - if (queueData.getFutureTask().isDone()) { - // 任务已经完成 - resultList.add(queueData.getFutureTask().get()); - continue; - } - - // 达到超时时间,但是任务未完成,则打印日志并强制取消 - LOGGER.error("class=FutureUtil||method=waitExecute||taskName={}||msg=cancel task", queueData.getTaskName()); - - queueData.getFutureTask().cancel(true); - } catch (Exception e) { - LOGGER.error("class=FutureUtil||method=waitExecute||msg=exception", e); - } - } - - return resultList; - } - - private T stepWaitResult(FutureTaskDelayQueueData queueData, Integer stepWaitTimeUnitMs) { - if (stepWaitTimeUnitMs == null) { - return null; - } - - try { - return queueData.getFutureTask().get(stepWaitTimeUnitMs, TimeUnit.MILLISECONDS); - } catch (Exception e) { - // 达到超时时间,但是任务未完成,则打印日志并强制取消 - LOGGER.error("class=FutureUtil||method=stepWaitResult||taskName={}||errMsg=exception", queueData.getTaskName(), e); - } - - return null; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/GitPropUtil.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/GitPropUtil.java deleted file mode 100644 index 297b8135..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/GitPropUtil.java +++ /dev/null @@ -1,67 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.BufferedReader; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.util.Properties; - -public class GitPropUtil { - private static final Logger log = LoggerFactory.getLogger(GitPropUtil.class); - - private static Properties props = null; - - public static final String VERSION_FIELD_NAME = "git.build.version"; - - public static final String COMMIT_ID_FIELD_NAME = "git.commit.id.abbrev"; - - public static String getProps(String fieldName) { - if (props == null) { - props = JsonUtils.stringToObj(readGitPropertiesInJarFile(), Properties.class); - } - - return props.getProperty(fieldName); - } - - public static Properties getProps() { - if (props == null) { - props = JsonUtils.stringToObj(readGitPropertiesInJarFile(), Properties.class); - } - - return props; - } - - private static String readGitPropertiesInJarFile() { - InputStream inputStream = null; - try { - inputStream = GitPropUtil.class.getClassLoader().getResourceAsStream("git.properties"); - - BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream)); - String line = null; - - StringBuilder sb = new StringBuilder(); - while ((line = bufferedReader.readLine()) != null) { - sb.append(line).append("\n"); - } - return sb.toString(); - } catch (Exception e) { - log.error("method=readGitPropertiesInJarFile||errMsg=exception.", e); - } finally { - try { - if (inputStream != null) { - inputStream.close(); - } - } catch (Exception e) { - log.error("method=readGitPropertiesInJarFile||msg=close failed||errMsg=exception.", e); - } - } - - return "{}"; - } - - private GitPropUtil() { - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/HttpUtils.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/HttpUtils.java deleted file mode 100644 index 03904058..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/HttpUtils.java +++ /dev/null @@ -1,255 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - -import org.apache.http.HttpEntity; -import org.apache.http.client.HttpClient; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.mime.HttpMultipartMode; -import org.apache.http.entity.mime.MultipartEntityBuilder; -import org.apache.http.impl.client.HttpClients; -import org.apache.http.util.EntityUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.web.multipart.MultipartFile; - -import java.io.*; -import java.net.HttpURLConnection; -import java.net.URL; -import java.nio.charset.Charset; -import java.util.Map; - - -/** - * @author zengqiao - * @date 20/5/24 - */ -public class HttpUtils { - private static final Logger LOGGER = LoggerFactory.getLogger(HttpUtils.class); - - // 连接超时时间, 单位: ms - private static int CONNECT_TIME_OUT = 15000; - - // 读取超时时间, 单位: ms - private static int READ_TIME_OUT = 3000; - - private static final String METHOD_GET = "GET"; - private static final String METHOD_POST = "POST"; - private static final String METHOD_PUT = "PUT"; - private static final String METHOD_DELETE = "DELETE"; - - private static final String CHARSET_UTF8 = "UTF-8"; - - private static final String FILE_PARAM = "filecontent"; - - private static final HttpClient HTTP_CLIENT = HttpClients.createDefault(); - - public static String get(String url, Map params) { - return sendRequest(url, METHOD_GET, params, null, null); - } - - public static String get(String url, Map params, Map headers) { - return sendRequest(url, METHOD_GET, params, headers, null); - } - - public static String postForString(String url, String content, Map headers) { - InputStream in = null; - try { - if (content != null && !content.isEmpty()) { - in = new ByteArrayInputStream(content.getBytes(CHARSET_UTF8)); - } - } catch (UnsupportedEncodingException e) { - throw new RuntimeException(e); - } - return sendRequest(url, METHOD_POST, null, headers, in); - } - - public static String uploadFile(String url, - MultipartFile multipartFile, - Map bodies, - Map headers) { - HttpPost post = new HttpPost(url); - String response = ""; - try { - - if (!ValidateUtils.isEmptyMap(headers)) { - for (Map.Entry e : headers.entrySet()) { - post.addHeader(e.getKey(), e.getValue()); - } - } - - MultipartEntityBuilder builder = MultipartEntityBuilder.create(); - builder.setCharset(Charset.forName(CHARSET_UTF8)); - - //加上此行代码解决返回中文乱码问题 - builder.setMode(HttpMultipartMode.BROWSER_COMPATIBLE); - - // 文件流 - builder.addBinaryBody( - FILE_PARAM, - multipartFile.getInputStream(), - ContentType.MULTIPART_FORM_DATA, - multipartFile.getOriginalFilename() - ); - - if (!ValidateUtils.isNull(bodies)) { - for (Map.Entry e : bodies.entrySet()) { - builder.addTextBody(e.getKey(), e.getValue()); - } - } - HttpEntity postEntity = builder.build(); - post.setEntity(postEntity); - HttpEntity entity = HTTP_CLIENT.execute(post).getEntity(); - response = EntityUtils.toString(entity, CHARSET_UTF8); - } catch (Exception e) { - throw new RuntimeException(e); - } finally { - post.releaseConnection(); - } - return response; - } - - public static String putForString(String url, String content, Map headers) { - InputStream in = null; - try { - if (content != null && !content.isEmpty()) { - in = new ByteArrayInputStream(content.getBytes(CHARSET_UTF8)); - } - } catch (UnsupportedEncodingException e) { - throw new RuntimeException(e); - } - return sendRequest(url, METHOD_PUT, null, headers, in); - } - - public static String deleteForString(String url, String content, Map headers) { - InputStream in = null; - try { - if (content != null && !content.isEmpty()) { - in = new ByteArrayInputStream(content.getBytes(CHARSET_UTF8)); - } - } catch (UnsupportedEncodingException e) { - throw new RuntimeException(e); - } - return sendRequest(url, METHOD_DELETE, null, headers, in); - } - - /** - * @param url 请求的链接, 只支持 http 和 https 链接 - * @param method GET or POST - * @param headers 请求头 (将覆盖默认请求), 可以为 null - * @param bodyStream 请求内容, 流将自动关闭, 可以为 null - * @return 返回响应内容的文本 - * @throws Exception http 响应 code 非 200, 或发生其他异常均抛出异常 - */ - private static String sendRequest(String url, - String method, - Map params, - Map headers, - InputStream bodyStream) { - HttpURLConnection conn = null; - try { - String paramUrl = setUrlParams(url, params); - - // 打开链接 - URL urlObj = new URL(paramUrl); - conn = (HttpURLConnection) urlObj.openConnection(); - - // 设置conn属性 - setConnProperties(conn, method, headers); - - // 设置请求内容 - if (bodyStream != null) { - conn.setDoOutput(true); - copyStreamAndClose(bodyStream, conn.getOutputStream()); - } - - return handleResponseBodyToString(conn.getInputStream()); - } catch (Exception e) { - throw new RuntimeException(e); - } finally { - closeConnection(conn); - } - } - - private static String setUrlParams(String url, Map params) { - if (url == null || params == null || params.isEmpty()) { - return url; - } - - StringBuilder sb = new StringBuilder(url).append('?'); - for (Map.Entry entry : params.entrySet()) { - sb.append(entry.getKey()).append('=').append(entry.getValue()).append('&'); - } - return sb.deleteCharAt(sb.length() - 1).toString(); - } - - private static void setConnProperties(HttpURLConnection conn, - String method, - Map headers) throws Exception { - // 设置连接超时时间 - conn.setConnectTimeout(CONNECT_TIME_OUT); - - // 设置读取超时时间 - conn.setReadTimeout(READ_TIME_OUT); - - // 设置请求方法 - if (method != null && !method.isEmpty()) { - conn.setRequestMethod(method); - } - - // 添加请求头 - conn.setRequestProperty("Content-Type", "application/json;charset=UTF-8"); - if (headers == null || headers.isEmpty()) { - return; - } - for (Map.Entry entry : headers.entrySet()) { - conn.setRequestProperty(entry.getKey(), entry.getValue()); - } - } - - private static String handleResponseBodyToString(InputStream in) throws Exception { - ByteArrayOutputStream bytesOut = null; - try { - bytesOut = new ByteArrayOutputStream(); - copyStreamAndClose(in, bytesOut); - return new String(bytesOut.toByteArray(), CHARSET_UTF8); - } finally { - closeStream(bytesOut); - } - } - - private static void copyStreamAndClose(InputStream in, OutputStream out) { - try { - byte[] buf = new byte[1024]; - int len = -1; - while ((len = in.read(buf)) != -1) { - out.write(buf, 0, len); - } - out.flush(); - } catch (Exception e) { - e.printStackTrace(); - } finally { - closeStream(in); - closeStream(out); - } - } - - private static void closeConnection(HttpURLConnection conn) { - if (conn != null) { - try { - conn.disconnect(); - } catch (Exception e) { - LOGGER.error("close connection failed", e); - } - } - } - - private static void closeStream(Closeable stream) { - if (stream != null) { - try { - stream.close(); - } catch (Exception e) { - LOGGER.error("close stream failed", e); - } - } - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/JsonUtils.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/JsonUtils.java deleted file mode 100644 index 283d59c5..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/JsonUtils.java +++ /dev/null @@ -1,98 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - -import com.alibaba.fastjson.JSON; -import com.alibaba.fastjson.JSONArray; -import com.alibaba.fastjson.JSONObject; -import com.alibaba.fastjson.serializer.SerializeConfig; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.xiaojukeji.kafka.manager.common.entity.pojo.gateway.TopicConnectionDO; - -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.Date; -import java.util.List; - -/** - * @author zengqiao - * @date 20/4/23 - */ -public class JsonUtils { - private static final String ENUM_METHOD_VALUES = "values"; - - private static final ObjectMapper MAPPER = new ObjectMapper(); - - public static Object toJson(Class clazz) { - try { - Method method = clazz.getMethod(ENUM_METHOD_VALUES); - Object invoke = method.invoke(null); - - int length = java.lang.reflect.Array.getLength(invoke); - List values = new ArrayList(); - for (int i = 0; i < length; i++) { - values.add(java.lang.reflect.Array.get(invoke, i)); - } - SerializeConfig config = new SerializeConfig(); - config.configEnumAsJavaBean(clazz); - return JSON.parseArray(JSON.toJSONString(values, config)); - } catch (Exception e) { - } - return ""; - } - - public static Object toJson(Enum obj) { - try { - SerializeConfig config = new SerializeConfig(); - config.configEnumAsJavaBean(obj.getClass()); - return JSON.parseObject(JSON.toJSONString(obj, config)); - } catch (Exception e) { - } - return ""; - } - - public static String toJSONString(Object obj) { - return JSON.toJSONString(obj); - } - - public static T stringToObj(String src, Class clazz) { - if (ValidateUtils.isBlank(src)) { - return null; - } - return JSON.parseObject(src, clazz); - } - - public static List stringToArrObj(String src, Class clazz) { - if (ValidateUtils.isBlank(src)) { - return null; - } - return JSON.parseArray(src, clazz); - } - - public static List parseTopicConnections(Long clusterId, JSONObject jsonObject, long postTime) { - List connectionDOList = new ArrayList<>(); - for (String clientType: jsonObject.keySet()) { - JSONObject topicObject = jsonObject.getJSONObject(clientType); - - // 解析单个Topic的连接信息 - for (String topicName: topicObject.keySet()) { - JSONArray appIdArray = topicObject.getJSONArray(topicName); - for (Object appIdDetail : appIdArray.toArray()) { - TopicConnectionDO connectionDO = new TopicConnectionDO(); - - String[] appIdDetailArray = appIdDetail.toString().split("#"); - if (appIdDetailArray.length >= 3) { - connectionDO.setAppId(appIdDetailArray[0]); - connectionDO.setIp(appIdDetailArray[1]); - connectionDO.setClientVersion(appIdDetailArray[2]); - } - - connectionDO.setClusterId(clusterId); - connectionDO.setTopicName(topicName); - connectionDO.setType(clientType); - connectionDO.setCreateTime(new Date(postTime)); - connectionDOList.add(connectionDO); - } - } - } - return connectionDOList; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/ListUtils.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/ListUtils.java deleted file mode 100644 index f7ab020f..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/ListUtils.java +++ /dev/null @@ -1,101 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - -import org.springframework.util.StringUtils; - -import java.util.ArrayList; -import java.util.List; - -/** - * @author arthur - * @date 2017/7/30. - */ -public class ListUtils { - private static final String REGEX = ","; - - public static List string2IntList(String str) { - if (!StringUtils.hasText(str)) { - return new ArrayList<>(); - } - List intList = new ArrayList<>(); - for (String elem :str.split(REGEX)) { - if (!StringUtils.hasText(elem)) { - continue; - } - intList.add(Integer.valueOf(elem)); - } - return intList; - } - - public static List string2LongList(String str) { - if (!StringUtils.hasText(str)) { - return new ArrayList<>(); - } - List longList = new ArrayList<>(); - for (String elem :str.split(REGEX)) { - if (!StringUtils.hasText(elem)) { - continue; - } - longList.add(Long.valueOf(elem)); - } - return longList; - } - - public static List string2StrList(String str) { - if (!StringUtils.hasText(str)) { - return new ArrayList<>(); - } - List strList = new ArrayList<>(); - for (String elem: str.split(REGEX)) { - if (!StringUtils.hasText(elem)) { - continue; - } - strList.add(elem); - } - return strList; - } - - public static String longList2String(List longList) { - if (longList == null || longList.isEmpty()) { - return ""; - } - - StringBuilder sb = new StringBuilder(); - for (Long elem: longList) { - if (elem == null) { - continue; - } - sb.append(elem).append(REGEX); - } - return sb.length() > 0 ? sb.substring(0, sb.length() - 1) : sb.toString(); - } - - public static String intList2String(List intList) { - if (intList == null || intList.isEmpty()) { - return ""; - } - - StringBuilder sb = new StringBuilder(); - for (Integer elem: intList) { - if (elem == null) { - continue; - } - sb.append(elem).append(REGEX); - } - return sb.length() > 0 ? sb.substring(0, sb.length() - 1) : sb.toString(); - } - - public static String strList2String(List strList) { - if (strList == null || strList.isEmpty()) { - return ""; - } - - StringBuilder sb = new StringBuilder(); - for (String elem: strList) { - if (!StringUtils.hasText(elem)) { - continue; - } - sb.append(elem).append(REGEX); - } - return sb.length() > 0 ? sb.substring(0, sb.length() - 1) : sb.toString(); - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/NetUtils.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/NetUtils.java deleted file mode 100644 index 696c5cb8..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/NetUtils.java +++ /dev/null @@ -1,69 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - -import java.net.Inet4Address; -import java.net.InetAddress; -import java.net.NetworkInterface; -import java.util.Enumeration; - -/** - * @author zengqiao - * @date 20/6/8 - */ -public class NetUtils { - private static String ipCache = null; - private static String hostnameCache = null; - - public static String localIp() { - if (!ValidateUtils.isBlank(ipCache)) { - return ipCache; - } - - InetAddress inetAddress = getInetAddress(); - hostnameCache = inetAddress.getHostName(); - ipCache = inetAddress.getHostAddress(); - return ipCache; - } - - public static String localHostname() { - if (!ValidateUtils.isNull(hostnameCache)) { - return hostnameCache; - } - localIp(); - return hostnameCache; - } - - public static Boolean hostnameLegal(String hostname) { - if (ValidateUtils.isExistBlank(hostname)) { - return false; - } - - hostname = hostname.trim(); - try { - InetAddress.getByName(hostname); - } catch (Exception e) { - return false; - } - return true; - } - - private static InetAddress getInetAddress() { - try { - Enumeration allNetInterfaces = NetworkInterface.getNetworkInterfaces(); - while (allNetInterfaces.hasMoreElements()) { - NetworkInterface netInterface = allNetInterfaces.nextElement(); - Enumeration addresses = netInterface.getInetAddresses(); - while (addresses.hasMoreElements()) { - InetAddress ip = addresses.nextElement(); - if (ip != null - && ip instanceof Inet4Address - && !ip.isLoopbackAddress() //loopback地址即本机地址,IPv4的loopback范围是127.0.0.0 ~ 127.255.255.255 - && ip.getHostAddress().indexOf(":") == -1) { - return ip; - } - } - } - } catch (Exception e) { - } - return null; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/NumberUtils.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/NumberUtils.java deleted file mode 100644 index 56e71148..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/NumberUtils.java +++ /dev/null @@ -1,30 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - -/** - * @author zhongyuankai - * @date 2020/6/8 - */ -public class NumberUtils { - - public static Long string2Long(String s) { - if (ValidateUtils.isNull(s)) { - return null; - } - try { - return Long.parseLong(s); - } catch (Exception e) { - } - return null; - } - - public static Integer string2Integer(String s) { - if (ValidateUtils.isNull(s)) { - return null; - } - try { - return Integer.parseInt(s); - } catch (Exception e) { - } - return null; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/SplitUtils.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/SplitUtils.java deleted file mode 100644 index d3692d96..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/SplitUtils.java +++ /dev/null @@ -1,14 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - -/** - * @className: SplitUtils - * @description: Split string of type keyValue - * @author: Hu.Yue - * @date: 2021/8/4 - **/ -public class SplitUtils { - - public static String keyValueSplit(String keyValue){ - return keyValue.split(":\\s+")[1]; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/SpringTool.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/SpringTool.java deleted file mode 100644 index d9cefe59..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/SpringTool.java +++ /dev/null @@ -1,112 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - -import com.xiaojukeji.kafka.manager.common.constant.Constant; -import com.xiaojukeji.kafka.manager.common.constant.LoginConstant; -import com.xiaojukeji.kafka.manager.common.constant.TrickLoginConstant; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.beans.BeansException; -import org.springframework.beans.factory.DisposableBean; -import org.springframework.context.ApplicationContext; -import org.springframework.context.ApplicationContextAware; -import org.springframework.context.ApplicationEvent; -import org.springframework.context.annotation.Lazy; -import org.springframework.core.annotation.Order; -import org.springframework.stereotype.Service; -import org.springframework.web.context.request.RequestAttributes; -import org.springframework.web.context.request.RequestContextHolder; -import org.springframework.web.context.request.ServletRequestAttributes; - -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpSession; -import java.util.Map; - -/** - * @author huangyiminghappy@163.com - * @date 2019-05-08 - */ -@Service -@Lazy(false) -@Order(value = 1) -public class SpringTool implements ApplicationContextAware, DisposableBean { - private static ApplicationContext applicationContext = null; - - private static Logger logger = LoggerFactory.getLogger(SpringTool.class); - - /** - * 去的存储在静态变量中的ApplicationContext - */ - private static ApplicationContext getApplicationContext() { - return applicationContext; - } - - /** - * 从静态变量applicationContext中去的Bean,自动转型为所复制对象的类型 - */ - public static T getBean(String name) { - return (T) applicationContext.getBean(name); - } - - public static T getBean(String name, Class clazz) { - return applicationContext.getBean(name, clazz); - } - - public static Map getBeansOfType(Class type) throws BeansException { - return getApplicationContext().getBeansOfType(type); - } - - /** - * 清除SpringContextHolder中的ApplicationContext为Null - */ - public static void clearHolder() { - if (logger.isDebugEnabled()) { - logger.debug("清除SpringContextHolder中的ApplicationContext:" + applicationContext); - } - applicationContext = null; - } - - /** - * 实现ApplicationContextAware接口,注入Context到静态变量 - */ - @Override - public void setApplicationContext(ApplicationContext context) throws BeansException { - SpringTool.applicationContext = context; - } - - /** - * 实现DisposableBean接口,在Context关闭时清理静态变量 - */ - @Override - public void destroy() throws Exception { - SpringTool.clearHolder(); - } - - public static String getUserName(){ - String username = null; - RequestAttributes requestAttributes = RequestContextHolder.getRequestAttributes(); - if (!ValidateUtils.isNull(requestAttributes)) { - HttpServletRequest request = ((ServletRequestAttributes) requestAttributes).getRequest(); - - if (TrickLoginConstant.TRICK_LOGIN_SWITCH_ON.equals(request.getHeader(TrickLoginConstant.TRICK_LOGIN_SWITCH))) { - // trick登录方式的获取用户 - username = request.getHeader(TrickLoginConstant.TRICK_LOGIN_USER); - } else { - // 走页面登录方式登录的获取用户 - HttpSession session = request.getSession(); - username = (String) session.getAttribute(LoginConstant.SESSION_USERNAME_KEY); - } - } - - if (ValidateUtils.isNull(username)) { - return Constant.DEFAULT_USER_NAME; - } - return username; - } - - /** - * 发布一个事件 - */ - public static void publish(ApplicationEvent event) { - getApplicationContext().publishEvent(event); - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/UUIDUtils.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/UUIDUtils.java deleted file mode 100644 index 5dd61199..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/UUIDUtils.java +++ /dev/null @@ -1,13 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - -import java.util.UUID; - -/** - * @author zengqiao - * @date 20/9/8 - */ -public class UUIDUtils { - public static String uuid() { - return UUID.randomUUID().toString().replaceAll("-", "_"); - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/factory/DefaultThreadFactory.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/factory/DefaultThreadFactory.java deleted file mode 100644 index bfcc824f..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/factory/DefaultThreadFactory.java +++ /dev/null @@ -1,57 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils.factory; - -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * @author limeng - * @date 2017/12/22 - */ -public class DefaultThreadFactory implements ThreadFactory { - private static final AtomicInteger POOL_ID = new AtomicInteger(); - private final AtomicInteger nextId; - private final String prefix; - private final boolean daemon; - private final int priority; - - public DefaultThreadFactory(String poolName) { - this((String) poolName, false, 5); - } - - public DefaultThreadFactory(String poolName, boolean daemon, int priority) { - this.nextId = new AtomicInteger(); - if (poolName == null) { - throw new NullPointerException("poolName"); - } else if (priority >= 1 && priority <= 10) { - this.prefix = poolName + '-' + POOL_ID.incrementAndGet() + '-'; - this.daemon = daemon; - this.priority = priority; - } else { - throw new IllegalArgumentException( - "priority: " + priority - + " (expected: Thread.MIN_PRIORITY <= priority <= Thread.MAX_PRIORITY)"); - } - } - - @Override - public Thread newThread(Runnable r) { - Thread t = new Thread(r, this.prefix + this.nextId.incrementAndGet()); - - try { - if (t.isDaemon()) { - if (!this.daemon) { - t.setDaemon(false); - } - } else if (this.daemon) { - t.setDaemon(true); - } - - if (t.getPriority() != this.priority) { - t.setPriority(this.priority); - } - } catch (Exception e) { - ; - } - return t; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/factory/KafkaConsumerFactory.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/factory/KafkaConsumerFactory.java deleted file mode 100644 index 5964d162..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/factory/KafkaConsumerFactory.java +++ /dev/null @@ -1,63 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils.factory; - -import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO; -import com.xiaojukeji.kafka.manager.common.utils.JsonUtils; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import org.apache.commons.pool2.BasePooledObjectFactory; -import org.apache.commons.pool2.PooledObject; -import org.apache.commons.pool2.impl.DefaultPooledObject; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.KafkaConsumer; - -import java.util.Properties; - -/** - * KafkaConsumer工厂 - * @author zengqiao - * @date 20/8/24 - */ -public class KafkaConsumerFactory extends BasePooledObjectFactory> { - private ClusterDO clusterDO; - - public KafkaConsumerFactory(ClusterDO clusterDO) { - this.clusterDO = clusterDO; - } - - @Override - public KafkaConsumer create() { - return new KafkaConsumer(createKafkaConsumerProperties(clusterDO)); - } - - @Override - public PooledObject> wrap(KafkaConsumer obj) { - return new DefaultPooledObject<>(obj); - } - - @Override - public void destroyObject(final PooledObject> p) throws Exception { - KafkaConsumer kafkaConsumer = p.getObject(); - if (ValidateUtils.isNull(kafkaConsumer)) { - return; - } - kafkaConsumer.close(); - } - - private static Properties createKafkaConsumerProperties(ClusterDO clusterDO) { - Properties properties = new Properties(); - properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterDO.getBootstrapServers()); - properties.setProperty( - ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, - "org.apache.kafka.common.serialization.StringDeserializer"); - properties.setProperty( - ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, - "org.apache.kafka.common.serialization.StringDeserializer"); - properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10000); - properties.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 15000); - properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false); - if (ValidateUtils.isBlank(clusterDO.getSecurityProperties())) { - return properties; - } - properties.putAll(JsonUtils.stringToObj(clusterDO.getSecurityProperties(), Properties.class)); - return properties; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxAttributeEnum.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxAttributeEnum.java deleted file mode 100644 index 2c71462e..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxAttributeEnum.java +++ /dev/null @@ -1,52 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils.jmx; - -import java.util.Arrays; - -/** - * @author zengqiao - * @date 20/6/18 - */ -public enum JmxAttributeEnum { - RATE_ATTRIBUTE(new String[]{ - "MeanRate", - "OneMinuteRate", - "FiveMinuteRate", - "FifteenMinuteRate" - }), - PERCENTILE_ATTRIBUTE(new String[]{ - "Mean", - "50thPercentile", - "75thPercentile", - "95thPercentile", - "98thPercentile", - "99thPercentile", - "999thPercentile" - }), - VALUE_ATTRIBUTE(new String[]{ - "Value" - }), - VERSION_ATTRIBUTE(new String[]{ - "Version" - }), - ; - private String[] attribute; - - JmxAttributeEnum(String[] attribute) { - this.attribute = attribute; - } - - public String[] getAttribute() { - return attribute; - } - - public void setAttribute(String[] attribute) { - this.attribute = attribute; - } - - @Override - public String toString() { - return "JmxAttributeEnum{" + - "attribute=" + Arrays.toString(attribute) + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConfig.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConfig.java deleted file mode 100644 index f5c380c2..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConfig.java +++ /dev/null @@ -1,33 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils.jmx; - -import lombok.Data; -import lombok.ToString; - -@Data -@ToString -public class JmxConfig { - /** - * 单台最大连接数 - */ - private Integer maxConn; - - /** - * 用户名 - */ - private String username; - - /** - * 密码 - */ - private String password; - - /** - * 开启SSL - */ - private Boolean openSSL; - - /** - * 连接重试回退事件 - */ - private Long retryConnectBackoffTimeUnitMs; -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConnectorWrap.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConnectorWrap.java deleted file mode 100644 index c66c7bc6..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConnectorWrap.java +++ /dev/null @@ -1,230 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils.jmx; - -import com.xiaojukeji.kafka.manager.common.utils.BackoffUtils; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.*; -import javax.management.remote.JMXConnector; -import javax.management.remote.JMXConnectorFactory; -import javax.management.remote.JMXServiceURL; -import javax.management.remote.rmi.RMIConnectorServer; -import javax.naming.Context; -import javax.rmi.ssl.SslRMIClientSocketFactory; -import java.io.IOException; -import java.net.MalformedURLException; -import java.util.HashMap; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReentrantLock; - -/** - * JMXConnector包装类 - * @author tukun - * @date 2015/11/9. - */ -public class JmxConnectorWrap { - private static final Logger LOGGER = LoggerFactory.getLogger(JmxConnectorWrap.class); - - private final Long physicalClusterId; - - private final Integer brokerId; - - private final String host; - - private final int port; - - private JMXConnector jmxConnector; - - private final AtomicInteger atomicInteger; - - private JmxConfig jmxConfig; - - private final ReentrantLock modifyJMXConnectorLock = new ReentrantLock(); - - public JmxConnectorWrap(Long physicalClusterId, Integer brokerId, String host, int port, JmxConfig jmxConfig) { - this.physicalClusterId = physicalClusterId; - this.brokerId = brokerId; - this.host = host; - this.port = port; - this.jmxConfig = jmxConfig; - if (ValidateUtils.isNull(this.jmxConfig)) { - this.jmxConfig = new JmxConfig(); - } - if (ValidateUtils.isNullOrLessThanZero(this.jmxConfig.getMaxConn())) { - // 默认设置20 - this.jmxConfig.setMaxConn(20); - } - if (ValidateUtils.isNullOrLessThanZero(this.jmxConfig.getRetryConnectBackoffTimeUnitMs())) { - // 默认回退10分钟 - this.jmxConfig.setRetryConnectBackoffTimeUnitMs(10 * 60 * 1000L); - } - this.atomicInteger = new AtomicInteger(this.jmxConfig.getMaxConn()); - } - - public boolean checkJmxConnectionAndInitIfNeed() { - if (jmxConnector != null) { - return true; - } - if (port == -1) { - return false; - } - return safeCreateJmxConnector(); - } - - public void close() { - this.closeJmxConnect(); - } - - public void closeJmxConnect() { - if (jmxConnector == null) { - return; - } - - try { - modifyJMXConnectorLock.lock(); - - // 移除设置的backoff事件 - BackoffUtils.removeNeedBackoffEvent(buildConnectJmxFailedBackoffEventKey(physicalClusterId, brokerId)); - - jmxConnector.close(); - } catch (Exception e) { - LOGGER.error("close JmxConnector exception, physicalClusterId:{} brokerId:{} host:{} port:{}.", physicalClusterId, brokerId, host, port, e); - } finally { - jmxConnector = null; - - modifyJMXConnectorLock.unlock(); - } - } - - private boolean safeCreateJmxConnector() { - try { - modifyJMXConnectorLock.lock(); - return createJmxConnector(); - } finally { - modifyJMXConnectorLock.unlock(); - } - } - - private synchronized boolean createJmxConnector() { - if (jmxConnector != null) { - return true; - } - - if (BackoffUtils.isNeedBackoff(buildConnectJmxFailedBackoffEventKey(physicalClusterId, brokerId))) { - // 被设置了需要进行回退,则本次不进行创建 - return false; - } - - String jmxUrl = String.format("service:jmx:rmi:///jndi/rmi://%s:%d/jmxrmi", host, port); - try { - Map environment = new HashMap(); - if (!ValidateUtils.isBlank(this.jmxConfig.getUsername()) && !ValidateUtils.isBlank(this.jmxConfig.getPassword())) { - // fixed by riyuetianmu - environment.put(JMXConnector.CREDENTIALS, new String[]{this.jmxConfig.getUsername(), this.jmxConfig.getPassword()}); - } - - if (jmxConfig.getOpenSSL() != null && this.jmxConfig.getOpenSSL()) { - // 开启ssl - environment.put(Context.SECURITY_PROTOCOL, "ssl"); - SslRMIClientSocketFactory clientSocketFactory = new SslRMIClientSocketFactory(); - environment.put(RMIConnectorServer.RMI_CLIENT_SOCKET_FACTORY_ATTRIBUTE, clientSocketFactory); - environment.put("com.sun.jndi.rmi.factory.socket", clientSocketFactory); - } - - jmxConnector = JMXConnectorFactory.connect(new JMXServiceURL(jmxUrl), environment); - LOGGER.info("connect JMX success, physicalClusterId:{} brokerId:{} host:{} port:{}.", physicalClusterId, brokerId, host, port); - return true; - } catch (MalformedURLException e) { - LOGGER.error("connect JMX failed, JMX url exception, physicalClusterId:{} brokerId:{} host:{} port:{} jmxUrl:{}.", physicalClusterId, brokerId, host, port, jmxUrl, e); - } catch (Exception e) { - LOGGER.error("connect JMX failed, physicalClusterId:{} brokerId:{} host:{} port:{}.", physicalClusterId, brokerId, host, port, e); - } - - // 设置连接backoff - BackoffUtils.putNeedBackoffEvent(buildConnectJmxFailedBackoffEventKey(physicalClusterId, brokerId), this.jmxConfig.getRetryConnectBackoffTimeUnitMs()); - - return false; - } - - public Object getAttribute(ObjectName name, String attribute) throws - MBeanException, - AttributeNotFoundException, - InstanceNotFoundException, - ReflectionException, - IOException { - try { - acquire(); - MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection(); - return mBeanServerConnection.getAttribute(name, attribute); - } catch (IOException ioe) { - // io错误,则重置连接 - this.closeJmxConnect(); - - throw ioe; - } finally { - atomicInteger.incrementAndGet(); - } - } - - public AttributeList getAttributes(ObjectName name, String[] attributes) throws - MBeanException, - AttributeNotFoundException, - InstanceNotFoundException, - ReflectionException, - IOException { - try { - acquire(); - MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection(); - return mBeanServerConnection.getAttributes(name, attributes); - } catch (IOException ioe) { - // io错误,则重置连接 - this.closeJmxConnect(); - - throw ioe; - } finally { - atomicInteger.incrementAndGet(); - } - } - - public Set queryNames(ObjectName name, - QueryExp query) - throws IOException { - try { - acquire(); - MBeanServerConnection mBeanServerConnection = jmxConnector.getMBeanServerConnection(); - return mBeanServerConnection.queryNames(name, query); - } catch (IOException ioe) { - // io错误,则重置连接 - this.closeJmxConnect(); - - throw ioe; - } finally { - atomicInteger.incrementAndGet(); - } - } - - private void acquire() { - long now = System.currentTimeMillis(); - while (true) { - try { - int num = atomicInteger.get(); - if (num <= 0) { - BackoffUtils.backoff(2); - } - - if (atomicInteger.compareAndSet(num, num - 1) || System.currentTimeMillis() - now > 6000) { - break; - } - } catch (Exception e) { - // ignore - } - } - } - - private static String buildConnectJmxFailedBackoffEventKey(Long physicalClusterId, Integer brokerId) { - return "CONNECT_JMX_FAILED_BACK_OFF_EVENT_PHY_" + physicalClusterId + "_BROKER_" + brokerId; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConstant.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConstant.java deleted file mode 100644 index 081bfba4..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/JmxConstant.java +++ /dev/null @@ -1,29 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils.jmx; - -import java.util.HashMap; -import java.util.Map; - -/** - * @author zhongyuankai - * @date 20/4/13 - */ -public class JmxConstant { - /** - * 健康分 - */ - public static final String HEALTH_SCORE = "HealthScore"; - - public static final String CREATE_TIME = "CreateTime"; - - public static final String TOPIC_NUM = "TopicNum"; - - public static final String PARTITION_NUM = "PartitionNum"; - - public static final String BROKER_NUM = "BrokerNum"; - - public static final String TOPIC = "topic"; - - public static final String APP_ID = "appId"; - - public static final String TOPIC_PARTITION_LOG_SIZE = "TopicPartitionLogSize"; -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/Mbean.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/Mbean.java deleted file mode 100644 index 0b03fe67..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/Mbean.java +++ /dev/null @@ -1,62 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils.jmx; - -/** - * Mbean的对象封装 - * @author tukun - * @date 2015/11/9. - */ -public class Mbean { - /** - * mbean的对象名称 - */ - private String objectName; - - /** - * mbean对象被监控的属性名称 - */ - private String property; - - /** - * mbean对象被监控的属性值对象类型 - */ - private Class propertyClass; - - public Mbean(String objectName, String property, Class propertyClass) { - this.objectName = objectName; - this.property = property; - this.propertyClass = propertyClass; - } - - public String getObjectName() { - return objectName; - } - - public void setObjectName(String objectName) { - this.objectName = objectName; - } - - public String getProperty() { - return property; - } - - public void setProperty(String property) { - this.property = property; - } - - public Class getPropertyClass() { - return propertyClass; - } - - public void setPropertyClass(Class propertyClass) { - this.propertyClass = propertyClass; - } - - @Override - public String toString() { - return "Mbean{" + - "objectName='" + objectName + '\'' + - ", property='" + property + '\'' + - ", propertyClass=" + propertyClass + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/MbeanNameUtil.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/MbeanNameUtil.java deleted file mode 100644 index b62b390e..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/MbeanNameUtil.java +++ /dev/null @@ -1,79 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils.jmx; - -import java.util.HashMap; -import java.util.Map; - -/** - * kafka集群的mbean的object name集合 - * @author tukun, zengqiao - * @date 2015/11/5. - */ -public class MbeanNameUtil { - /** - * 社区Metrics指标 - */ - - /** - * 滴滴Metrics指标 - */ - private static final String PRODUCE_BYTES_IN_PER_SEC = "kafka.server:type=DiskMetrics,name=ProducerBytesInPerSec"; - - /** - * 限流指标 - */ - private static final String PRODUCE_THROTTLE_TIME = "kafka.server:type=Produce,client-id=*"; - private static final String FETCH_THROTTLE_TIME = "kafka.server:type=Fetch,client-id=*"; - - - private static final String TOPIC_PARTITION_LOG_SIZE = "kafka.log:type=Log,name=Size,topic=%s,partition=%d"; - - - /** - * 存储监控的参数name到获取的object_name的映射关系图 - */ - private static Map MBEAN_NAME_MAP = new HashMap<>(); - static { - - MBEAN_NAME_MAP.put("ProduceThrottleTime", new Mbean(MbeanNameUtil.PRODUCE_THROTTLE_TIME, "throttle-time", Double.class)); - MBEAN_NAME_MAP.put("FetchThrottleTime", new Mbean(MbeanNameUtil.FETCH_THROTTLE_TIME, "throttle-time", Double.class)); - - - - /** - * 滴滴Metrics指标 - */ - MBEAN_NAME_MAP.put("ProducerBytesInPerSec", new Mbean(PRODUCE_BYTES_IN_PER_SEC,"OneMinuteRate", Double.class)); - - MBEAN_NAME_MAP.put("TopicPartitionLogSize", new Mbean(TOPIC_PARTITION_LOG_SIZE, "Value", Long.class)); - - } - - /** - * 根据属性名,kafka版本,topic获取相应的Mbean - */ - public static Mbean getMbean(String mbeanName) { - return MBEAN_NAME_MAP.get(mbeanName); - } - - public static Mbean getMbean(String mbeanName, String topicName) { - Mbean mbean = MBEAN_NAME_MAP.get(mbeanName); - if (mbean == null) { - return null; - } - if (topicName == null || topicName.isEmpty()) { - return mbean; - } - return new Mbean(mbean.getObjectName() + ",topic=" + topicName, mbean.getProperty(), mbean.getPropertyClass()); - } - - public static Mbean getMbean(String mbeanName, Integer brokerId) { - Mbean mbean = MBEAN_NAME_MAP.get(mbeanName); - if (mbean == null) { - return null; - } - if (brokerId == null) { - return mbean; - } - return new Mbean(mbean.getObjectName() + ",id=" + brokerId, mbean.getProperty(), mbean.getPropertyClass()); - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/MbeanNameUtilV2.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/MbeanNameUtilV2.java deleted file mode 100644 index eb43e989..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/MbeanNameUtilV2.java +++ /dev/null @@ -1,448 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils.jmx; - -import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections; -import com.xiaojukeji.kafka.manager.common.entity.KafkaVersion; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; - -import java.util.*; - -/** - * kafka集群的mbean的object name集合 - * @author zengqiao - * @date 20/6/15 - */ -public class MbeanNameUtilV2 { - - private static Map> FIELD_NAME_MBEAN_MAP = new HashMap<>(); - - private static void initMbean(MbeanV2 mbeanV2, List interfaceIdList) { - for (Integer interfaceId: interfaceIdList) { - List mbeanV2List = FIELD_NAME_MBEAN_MAP.getOrDefault(interfaceId, new ArrayList<>()); - mbeanV2List.add(mbeanV2); - FIELD_NAME_MBEAN_MAP.put(interfaceId, mbeanV2List); - } - } - - static { - //社区Kafka指标------------------------------------------------------------------------------------------- - - for (String fieldName: Arrays.asList("BytesInPerSec", "BytesOutPerSec")) { - initMbean( - new MbeanV2( - fieldName, - JmxAttributeEnum.RATE_ATTRIBUTE, - "kafka.server:type=BrokerTopicMetrics,name=" + fieldName - ), - Arrays.asList( - KafkaMetricsCollections.COMMON_DETAIL_METRICS, - KafkaMetricsCollections.BROKER_OVERVIEW_PAGE_METRICS, - KafkaMetricsCollections.TOPIC_METRICS_TO_DB, - KafkaMetricsCollections.BROKER_TO_DB_METRICS, - KafkaMetricsCollections.BROKER_ANALYSIS_METRICS, - KafkaMetricsCollections.BROKER_TOPIC_ANALYSIS_METRICS - ) - ); - } - - for (String fieldName: Arrays.asList( - "MessagesInPerSec", - "BytesRejectedPerSec", - "TotalProduceRequestsPerSec", - "TotalFetchRequestsPerSec" - )) { - initMbean( - new MbeanV2( - fieldName, - JmxAttributeEnum.RATE_ATTRIBUTE, - "kafka.server:type=BrokerTopicMetrics,name=" + fieldName - ), - Arrays.asList( - KafkaMetricsCollections.COMMON_DETAIL_METRICS, - KafkaMetricsCollections.TOPIC_METRICS_TO_DB, - KafkaMetricsCollections.BROKER_TO_DB_METRICS, - KafkaMetricsCollections.BROKER_ANALYSIS_METRICS, - KafkaMetricsCollections.BROKER_TOPIC_ANALYSIS_METRICS - ) - ); - } - - for (String fieldName: Arrays.asList( - "FailedFetchRequestsPerSec", - "FailedProduceRequestsPerSec" - )) { - initMbean( - new MbeanV2( - fieldName, - JmxAttributeEnum.RATE_ATTRIBUTE, - "kafka.server:type=BrokerTopicMetrics,name=" + fieldName - ), - Arrays.asList( - KafkaMetricsCollections.COMMON_DETAIL_METRICS, - KafkaMetricsCollections.TOPIC_METRICS_TO_DB, - KafkaMetricsCollections.BROKER_TO_DB_METRICS, - KafkaMetricsCollections.BROKER_ANALYSIS_METRICS, - KafkaMetricsCollections.BROKER_TOPIC_ANALYSIS_METRICS, - KafkaMetricsCollections.BROKER_HEALTH_SCORE_METRICS - - ) - ); - } - - for (String fieldName: Arrays.asList("Produce", "FetchConsumer")) { - initMbean( - new MbeanV2( - fieldName + "RequestsPerSec", - JmxAttributeEnum.RATE_ATTRIBUTE, - "kafka.network:type=RequestMetrics,name=RequestsPerSec,request=" + fieldName - ), - Arrays.asList( - KafkaMetricsCollections.BROKER_TO_DB_METRICS - ) - ); - } - - initMbean( - new MbeanV2( - "RequestHandlerAvgIdlePercent", - JmxAttributeEnum.RATE_ATTRIBUTE, - "kafka.server:type=KafkaRequestHandlerPool,name=RequestHandlerAvgIdlePercent" - ), - Arrays.asList( - KafkaMetricsCollections.BROKER_TO_DB_METRICS, - KafkaMetricsCollections.BROKER_HEALTH_SCORE_METRICS - ) - ); - - initMbean( - new MbeanV2( - "NetworkProcessorAvgIdlePercent", - JmxAttributeEnum.VALUE_ATTRIBUTE, - "kafka.network:type=SocketServer,name=NetworkProcessorAvgIdlePercent" - ), - Arrays.asList( - KafkaMetricsCollections.BROKER_TO_DB_METRICS, - KafkaMetricsCollections.BROKER_HEALTH_SCORE_METRICS - ) - ); - - for (String fieldName: Arrays.asList("RequestQueueSize", "ResponseQueueSize")) { - initMbean( - new MbeanV2( - fieldName, - JmxAttributeEnum.VALUE_ATTRIBUTE, - "kafka.network:type=RequestChannel,name=" + fieldName - ), - Arrays.asList( - KafkaMetricsCollections.BROKER_TO_DB_METRICS, KafkaMetricsCollections.BROKER_HEALTH_SCORE_METRICS - ) - ); - } - - for (String fieldName: Arrays.asList("Produce", "FetchConsumer")) { - initMbean( - new MbeanV2( - fieldName + "TotalTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - "kafka.network:type=RequestMetrics,name=TotalTimeMs,request=" + fieldName - ), - Arrays.asList( - KafkaMetricsCollections.BROKER_TO_DB_METRICS - ) - ); - } - - for (String fieldName: Arrays.asList("PartitionCount", "LeaderCount", "UnderReplicatedPartitions")) { - initMbean( - new MbeanV2( - fieldName, - JmxAttributeEnum.VALUE_ATTRIBUTE, - "kafka.server:type=ReplicaManager,name=" + fieldName - ), - Arrays.asList( - KafkaMetricsCollections.BROKER_BASIC_PAGE_METRICS, - KafkaMetricsCollections.BROKER_OVERVIEW_PAGE_METRICS, - KafkaMetricsCollections.BROKER_STATUS_PAGE_METRICS - ) - ); - } - - initMbean( - new MbeanV2( - "TopicCodeC", - JmxAttributeEnum.VALUE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.server:type=ReplicaManager,name=TopicCodeC"), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=AppIdTopicMetrics,name=RecordCompression,appId=") - ) - ), - Arrays.asList( - KafkaMetricsCollections.TOPIC_BASIC_PAGE_METRICS - ) - ); - - - - - initMbean( - new MbeanV2( - "LogFlushRateAndTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - "kafka.log:type=LogFlushStats,name=LogFlushRateAndTimeMs" - ), - Arrays.asList( - KafkaMetricsCollections.BROKER_TO_DB_METRICS - ) - ); - - initMbean( - new MbeanV2( - "LogEndOffset", - JmxAttributeEnum.VALUE_ATTRIBUTE, - "kafka.log:type=Log,name=LogEndOffset" - ), - Arrays.asList( - ) - ); - - for (String fieldName: Arrays.asList("Produce")) { - initMbean(new MbeanV2( - fieldName + "TotalTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.network:type=TopicRequestMetrics,name=TotalTimeMs,request=" + fieldName), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=TopicRequestMetrics,name=TotalTimeMs,request=" + fieldName) - ) - - ), - Arrays.asList( - KafkaMetricsCollections.TOPIC_REQUEST_TIME_METRICS_TO_DB, - KafkaMetricsCollections.TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS - ) - ); - - initMbean( - new MbeanV2( - fieldName + "RequestQueueTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.network:type=TopicRequestMetrics,name=RequestQueueTimeMs,request=" + fieldName), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=TopicRequestMetrics,name=RequestQueueTimeMs,request=" + fieldName) - ) - ), - Arrays.asList( - KafkaMetricsCollections.TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS - ) - ); - - initMbean( - new MbeanV2( - fieldName + "LocalTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.network:type=TopicRequestMetrics,name=LocalTimeMs,request=" + fieldName), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=TopicRequestMetrics,name=LocalTimeMs,request=" + fieldName) - ) - ), - Arrays.asList( - KafkaMetricsCollections.TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS - ) - ); - - initMbean( - new MbeanV2( - fieldName + "RemoteTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.network:type=TopicRequestMetrics,name=RemoteTimeMs,request=" + fieldName), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=TopicRequestMetrics,name=RemoteTimeMs,request=" + fieldName) - ) - ), - Arrays.asList( - KafkaMetricsCollections.TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS - ) - ); - - initMbean( - new MbeanV2( - fieldName + "ThrottleTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.network:type=TopicRequestMetrics,name=ThrottleTimeMs,request=" + fieldName), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=TopicRequestMetrics,name=ThrottleTimeMs,request=" + fieldName) - ) - ), - Arrays.asList( - KafkaMetricsCollections.TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS - ) - ); - - initMbean( - new MbeanV2( - fieldName + "ResponseQueueTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.network:type=TopicRequestMetrics,name=ResponseQueueTimeMs,request=" + fieldName), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=TopicRequestMetrics,name=ResponseQueueTimeMs,request=" + fieldName) - ) - ), - Arrays.asList( - KafkaMetricsCollections.TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS - ) - ); - - initMbean( - new MbeanV2( - fieldName + "ResponseSendTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.network:type=TopicRequestMetrics,name=ResponseSendTimeMs,request=" + fieldName), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=TopicRequestMetrics,name=ResponseSendTimeMs,request=" + fieldName) - ) - ), - Arrays.asList( - KafkaMetricsCollections.TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS - ) - ); - } - - for (String fieldName: Arrays.asList("Fetch")) { - initMbean(new MbeanV2( - fieldName + "ConsumerTotalTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.network:type=TopicRequestMetrics,name=TotalTimeMs,request=" + fieldName + "Consumer"), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=TopicRequestMetrics,name=TotalTimeMs,request=" + fieldName) - ) - - ), - Arrays.asList( - KafkaMetricsCollections.TOPIC_REQUEST_TIME_METRICS_TO_DB, - KafkaMetricsCollections.TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS - ) - ); - - initMbean( - new MbeanV2( - fieldName + "ConsumerRequestQueueTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.network:type=TopicRequestMetrics,name=RequestQueueTimeMs,request=" + fieldName + "Consumer"), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=TopicRequestMetrics,name=RequestQueueTimeMs,request=" + fieldName) - ) - ), - Arrays.asList( - KafkaMetricsCollections.TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS - ) - ); - - initMbean( - new MbeanV2( - fieldName + "ConsumerLocalTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.network:type=TopicRequestMetrics,name=LocalTimeMs,request=" + fieldName + "Consumer"), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=TopicRequestMetrics,name=LocalTimeMs,request=" + fieldName) - ) - ), - Arrays.asList( - KafkaMetricsCollections.TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS - ) - ); - - initMbean( - new MbeanV2( - fieldName + "ConsumerRemoteTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.network:type=TopicRequestMetrics,name=RemoteTimeMs,request=" + fieldName + "Consumer"), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=TopicRequestMetrics,name=RemoteTimeMs,request=" + fieldName) - ) - ), - Arrays.asList( - KafkaMetricsCollections.TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS - ) - ); - - initMbean( - new MbeanV2( - fieldName + "ConsumerThrottleTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.network:type=TopicRequestMetrics,name=ThrottleTimeMs,request=" + fieldName + "Consumer"), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=TopicRequestMetrics,name=ThrottleTimeMs,request=" + fieldName) - ) - ), - Arrays.asList( - KafkaMetricsCollections.TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS - ) - ); - - initMbean( - new MbeanV2( - fieldName + "ConsumerResponseQueueTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.network:type=TopicRequestMetrics,name=ResponseQueueTimeMs,request=" + fieldName + "Consumer"), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=TopicRequestMetrics,name=ResponseQueueTimeMs,request=" + fieldName) - ) - ), - Arrays.asList( - KafkaMetricsCollections.TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS - ) - ); - - initMbean( - new MbeanV2( - fieldName + "ConsumerResponseSendTimeMs", - JmxAttributeEnum.PERCENTILE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.network:type=TopicRequestMetrics,name=ResponseSendTimeMs,request=" + fieldName + "Consumer"), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=TopicRequestMetrics,name=ResponseSendTimeMs,request=" + fieldName) - ) - ), - Arrays.asList( - KafkaMetricsCollections.TOPIC_REQUEST_TIME_DETAIL_PAGE_METRICS - ) - ); - } - - initMbean( - new MbeanV2( - "KafkaVersion", - JmxAttributeEnum.VERSION_ATTRIBUTE, - "kafka.server:type=app-info" - ), - Arrays.asList( - KafkaMetricsCollections.BROKER_VERSION - ) - ); - - - //滴滴Kafka指标------------------------------------------------------------------------------------------- - for (String fieldName: Arrays.asList("AppIdBytesInPerSec", "AppIdBytesOutPerSec", "AppIdMessagesInPerSec")) { - initMbean( - new MbeanV2( - "Topic" + fieldName, - JmxAttributeEnum.RATE_ATTRIBUTE, - Arrays.asList( - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_0_10_3, "kafka.server:type=AppIdTopicMetrics,name=" + fieldName + ",topic=*,appId=*"), - new AbstractMap.SimpleEntry<>(KafkaVersion.VERSION_MAX, "kafka.server:type=AppIdTopicMetrics,name=" + fieldName.replace("AppId", "") + ",topic=*,appId=*") - ) - ), - Arrays.asList( - KafkaMetricsCollections.APP_TOPIC_METRICS_TO_DB - ) - ); - } - }; - - /** - * 根据属性名,kafka版本,topic获取相应的Mbean - */ - public static List getMbeanList(Integer interfaceId) { - if (ValidateUtils.isNull(interfaceId)) { - return new ArrayList<>(); - } - return FIELD_NAME_MBEAN_MAP.getOrDefault(interfaceId, new ArrayList<>()); - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/MbeanV2.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/MbeanV2.java deleted file mode 100644 index e8baaa0b..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/utils/jmx/MbeanV2.java +++ /dev/null @@ -1,69 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils.jmx; - -import com.xiaojukeji.kafka.manager.common.entity.KafkaVersion; -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; - -import java.util.List; -import java.util.Map; -import java.util.TreeMap; - -/** - * @author zengqiao - * @date 20/6/15 - */ -public class MbeanV2 { - private String fieldName; - - /** - * mbean对象被监控的属性名称 - */ - private JmxAttributeEnum attributeEnum; - - /** - * mbean的对象名称 - */ - private Map versionObjectNameMap = new TreeMap<>(); - - public MbeanV2(String fieldName, JmxAttributeEnum attributeEnum, String objectName) { - this.fieldName = fieldName; - this.attributeEnum = attributeEnum; - this.versionObjectNameMap.put(KafkaVersion.VERSION_MAX, objectName); - } - - public MbeanV2(String fieldName, JmxAttributeEnum attributeEnum, List> versionObjectNames) { - this.fieldName = fieldName; - this.attributeEnum = attributeEnum; - for (Map.Entry entry: versionObjectNames) { - this.versionObjectNameMap.put(entry.getKey(), entry.getValue()); - } - } - - public String getFieldName() { - return fieldName; - } - - public JmxAttributeEnum getAttributeEnum() { - return attributeEnum; - } - - public String getObjectName(Long versionNum) { - if (ValidateUtils.isNull(versionNum)) { - return versionObjectNameMap.get(KafkaVersion.VERSION_MAX); - } - for (Map.Entry entry: versionObjectNameMap.entrySet()) { - if (entry.getKey() >= versionNum) { - return entry.getValue(); - } - } - return null; - } - - @Override - public String toString() { - return "MbeanV2{" + - "fieldName='" + fieldName + '\'' + - ", attributeEnum=" + attributeEnum + - ", versionObjectNameMap=" + versionObjectNameMap + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/ConfigClient.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/ConfigClient.java deleted file mode 100644 index ca155a79..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/ConfigClient.java +++ /dev/null @@ -1,172 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper; - -import com.xiaojukeji.kafka.manager.common.exception.ConfigException; -import org.apache.zookeeper.data.Stat; - -import java.util.List; - -/** - * Created by limeng on 2017/12/22 - */ -public interface ConfigClient { - - /** - * 添加连接状态监听器 - * - * @param listener - */ - void addStateChangeListener(StateChangeListener listener); - - /** - * 检查节点是否存在 - * - * @param path - * @return - * @throws ConfigException - */ - boolean checkPathExists(String path) throws ConfigException; - - /** - * 获取节点信息 - * - * @param path - * @return - * @throws ConfigException - */ - Stat getNodeStat(String path) throws ConfigException; - - /** - * 重置zk下面数据 - * - * @param path - * @param data - * @throws ConfigException - */ - Stat setNodeStat(String path, String data) throws ConfigException; - - Stat setOrCreatePersistentNodeStat(String path, String data) throws ConfigException; - - String createPersistentSequential(String path, String data) throws ConfigException; - - /** - * 创建一个节点并包含数据,在失去连接后不会删除. - *

- * save是持久化存储,如果是临时数据,请使用register - * - * @param path - * @param data - * @param - * @throws ConfigException - */ - // void save(String path, T data) throws ConfigException; - - /** - * 创建一个节点并包含数据,在失去连接后不会删除. - *

- * save是持久化存储,如果是临时数据,请使用register - * - * @param path - * @param data - * @param - * @throws ConfigException - */ - // void saveIfNotExisted(String path, T data) throws ConfigException; - - // /** - // * 注册一个数据,在连接断开时需要重新删除,重连后重新注册 - // * - // * @param path - // * @param data - // * @param - // * @throws ConfigException - // */ - // void register(String path, T data) throws ConfigException; - - /** - * 获取数据 - * - * @param path - * @param clazz - * @param - * @return - * @throws ConfigException - */ - T get(String path, Class clazz) throws ConfigException; - - /** - * 删除数据,如果有子节点也会删除 - * - * @param path - * @throws ConfigException - */ - void delete(String path) throws ConfigException; - - /** - * 获取zkString字符 - * @param path - * @return - * @throws ConfigException - */ - String get(String path) throws ConfigException; - - /** - * 监听数据变化 - * - * @param path - * @param listener - */ - void watch(String path, StateChangeListener listener) throws ConfigException; - - /** - * 获取路径下的子节点 - * - * @param path - * @return - * @throws ConfigException - */ - List getChildren(String path) throws ConfigException; - - /** - * 监听子节点的变化并通知出来 - * - * @param path - * @param listener - * @return - * @throws ConfigException - */ - void watchChildren(String path, StateChangeListener listener) throws ConfigException; - - /** - * 取消监听子节点的变化 - * - * @param path - * @return - */ - void cancelWatchChildren(String path); - - /** - * 锁住某个节点 - * - * @param path - * @param timeoutMS - * @param data - * @param - * @return - * @throws ConfigException - */ - void lock(String path, long timeoutMS, T data) throws ConfigException; - - /** - * 释放节点锁 - * - * @param path - */ - void unLock(String path); - - /** - * 资源释放 - */ - void close(); - - // void setConfigClientTracer(ConfigClientTracer configClientTracer); -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/StateChangeListener.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/StateChangeListener.java deleted file mode 100644 index f4dea218..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/StateChangeListener.java +++ /dev/null @@ -1,22 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper; - -/** - * @author limeng - * @date 2017/12/22 - */ -public interface StateChangeListener { - enum State { - CONNECTION_RECONNECT, // - CONNECTION_DISCONNECT, - NODE_DATA_CHANGED, - CHILD_UPDATED, - CHILD_ADDED, - CHILD_DELETED, - // - ; - } - - void init(); - - void onChange(State state, String path); -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/ZkConfigImpl.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/ZkConfigImpl.java deleted file mode 100644 index d41bc7d5..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/ZkConfigImpl.java +++ /dev/null @@ -1,531 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper; - -import com.alibaba.fastjson.JSON; -import com.xiaojukeji.kafka.manager.common.exception.ConfigException; -import com.google.common.base.Preconditions; -import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.framework.CuratorFrameworkFactory; -import org.apache.curator.framework.recipes.cache.*; -import org.apache.curator.framework.recipes.locks.InterProcessSemaphoreMutex; -import org.apache.curator.framework.state.ConnectionState; -import org.apache.curator.framework.state.ConnectionStateListener; -import org.apache.curator.retry.ExponentialBackoffRetry; -import org.apache.curator.utils.ThreadUtils; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.data.Stat; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.ReentrantLock; - -/** - * @author limeng - * @date 2017/12/22 - */ -public class ZkConfigImpl implements ConfigClient, ConnectionStateListener { - private static final int DEFAULT_SESSION_TIMEOUT_MS = 12000; - private static final int DEFAULT_CONNECTION_TIMEOUT_MS = 3000; - private static final int DEFAULT_THREAD_POOL_SIZE = Math.max(Runtime.getRuntime().availableProcessors(), 16); - - private final static Logger logger = LoggerFactory.getLogger(ZkConfigImpl.class); - - final byte[] EMPTY = new byte[0]; - - /** - * 监听连接状态 - */ - private final Map registerLocks = new ConcurrentHashMap<>(); - private Map connectionListenerMap = new ConcurrentHashMap<>(); - private Set connectionStateListeners = new HashSet<>(); - - /** - * 监听节点数据变化的缓存 - */ - private final Map dataPathLocks = new ConcurrentHashMap<>(); - private final Map dataWatchers = new ConcurrentHashMap<>(); - private final Map> dataListeners = new ConcurrentHashMap<>(); - - /** - * 监听子节点变化的缓存 - */ - private final Map childrenPathLocks = new ConcurrentHashMap<>(); - private final Map childrenWatcher = new ConcurrentHashMap<>(); - private final Map> childrenListeners = new ConcurrentHashMap<>(); - - /** - * 所有持有的锁 - */ - private final Map lockMap = new ConcurrentHashMap<>(); - - private final CuratorFramework curator; - private final ExecutorService executor; - - public ZkConfigImpl(String zkAddress) { - this(zkAddress, DEFAULT_SESSION_TIMEOUT_MS, DEFAULT_CONNECTION_TIMEOUT_MS); - } - - public ZkConfigImpl(String zkAddress, int sessionTimeoutMs, int connectionTimeoutMs) { - this(zkAddress, sessionTimeoutMs, connectionTimeoutMs, DEFAULT_THREAD_POOL_SIZE); - } - - public ZkConfigImpl(String zkAddress, int sessionTimeoutMs, int connectionTimeoutMs, int threadPoolSize) { - ExponentialBackoffRetry retryPolicy = new ExponentialBackoffRetry(1000, 3); - CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder().connectString(zkAddress); - builder.retryPolicy(retryPolicy); - builder.sessionTimeoutMs(sessionTimeoutMs).connectionTimeoutMs(connectionTimeoutMs); - curator = builder.build(); - curator.getConnectionStateListenable().addListener(this); - curator.start(); - executor = Executors.newFixedThreadPool(threadPoolSize, ThreadUtils.newThreadFactory("PathChildrenCache")); - } - - private synchronized java.util.concurrent.locks.Lock getRegisterLock(String registerPath) { - registerLocks.putIfAbsent(registerPath, new ReentrantLock()); - return registerLocks.get(registerPath); - } - - private synchronized java.util.concurrent.locks.Lock getDataPathLock(String dataPath) { - dataPathLocks.putIfAbsent(dataPath, new ReentrantLock()); - return dataPathLocks.get(dataPath); - } - - private synchronized java.util.concurrent.locks.Lock getChildrenPathLock(String childrenPath) { - childrenPathLocks.putIfAbsent(childrenPath, new ReentrantLock()); - return childrenPathLocks.get(childrenPath); - } - - @Override - public void stateChanged(CuratorFramework client, ConnectionState newState) { - - StateChangeListener.State state; - switch (newState) { - case LOST: - logger.error("[zk] current connection status is {}", newState); - releaseLocks(); - state = StateChangeListener.State.CONNECTION_DISCONNECT; - break; - case CONNECTED: - case RECONNECTED: - logger.warn("[zk] current connection status is {}", newState); - state = StateChangeListener.State.CONNECTION_RECONNECT; - break; - default: - logger.info("[zk] current connection status is {}", newState); - return; - } - for (StateChangeListener listener : connectionListenerMap.values()) { - listener.onChange(state, null); - } - - for (StateChangeListener listener : connectionStateListeners) { - listener.onChange(state, null); - } - } - - @Override - public void addStateChangeListener(StateChangeListener listener) { - connectionStateListeners.add(listener); - } - - @Override - public boolean checkPathExists(String path) throws ConfigException { - try { - return curator.checkExists().forPath(path) != null; - } catch (Exception e) { - String info = String.format("[zk] Failed to check EXIST for path [%s]", path); - logger.warn(info); - throw new ConfigException(e); - } - } - - @Override - public Stat getNodeStat(String path) throws ConfigException { - try { - return curator.checkExists().forPath(path); - } catch (Exception e) { - String info = String.format("[zk] Failed to get node stat for path [%s]", path); - logger.warn(info); - throw new ConfigException(e); - } - } - - @Override - public Stat setNodeStat(String path, String data) throws ConfigException { - try { - return curator.setData().forPath(path, data.getBytes()); - } catch (Exception e) { - throw new ConfigException(e); - } - } - - @Override - public Stat setOrCreatePersistentNodeStat(String path, String data) throws ConfigException { - try { - return curator.setData().forPath(path, data.getBytes()); - } catch (KeeperException.NoNodeException e) { - try { - curator.create().withMode(CreateMode.PERSISTENT).forPath(path); - return setNodeStat(path, data); - } catch (KeeperException.NodeExistsException nee) { - return setNodeStat(path, data); - } catch (Exception e2) { - throw new ConfigException(e2); - } - } catch (Exception e) { - throw new ConfigException(e); - } - } - - @Override - public String createPersistentSequential(String path, String data) throws ConfigException { - try { - return curator.create().withMode(CreateMode.PERSISTENT_SEQUENTIAL).forPath(path, data.getBytes()); - } catch (Exception e) { - throw new ConfigException(e); - } - } -// -// @Override -// public void save(String path, T data) throws ConfigException { -// try { -// byte[] bytes = EMPTY; -// if (data != null) { -// bytes = JSON.toJSONBytes(data); -// } -// Stat stat = curator.checkExists().forPath(path); -// if (stat == null) { -// curator.create().creatingParentsIfNeeded().forPath(path, bytes); -// } else { -// curator.setData().forPath(path, bytes); -// } -// } catch (Exception e) { -// logger.warn("create {} failed", path); -// throw new ConfigException(e); -// } -// } -// -// @Override -// public void saveIfNotExisted(String path, T data) throws ConfigException { -// try { -// byte[] bytes = EMPTY; -// if (data != null) { -// bytes = JSON.toJSONBytes(data); -// } -// Stat stat = curator.checkExists().forPath(path); -// if (stat == null) { -// curator.create().creatingParentsIfNeeded().forPath(path, bytes); -// } -// } catch (Exception e) { -// logger.warn("create {} failed", path, e); -// throw new ConfigException(e); -// } -// } - -// @Override -// public void register(final String path, final T data) throws ConfigException { -// java.util.concurrent.locks.Lock registerLock = getRegisterLock(path); -// registerLock.lock(); -// try { -// byte[] bytes = EMPTY; -// if (data != null) { -// bytes = JSON.toJSONBytes(data); -// } -// if (!connectionListenerMap.containsKey(path)) { -// connectionListenerMap.put(path, new StateChangeListener() { -// @Override -// public void onChange(State state, String stateChangePath) { -// logger.warn("on state change " + state); -// switch (state) { -// case CONNECTION_RECONNECT: -// try { -// register(path, data); -// } catch (ConfigException e) { -// logger.warn("register {} failed", path); -// } -// break; -// default: -// break; -// } -// } -// }); -// } -// try { -// deletePath(path); -// logger.warn("register reconnect delete {} succeed", path); -// } catch (ConfigException e) { -// logger.warn("register reconnect delete {} failed", path); -// } -// curator.create().creatingParentsIfNeeded().withMode(CreateMode.EPHEMERAL).forPath(path, bytes); -// logger.info("register reconnect create {} succeed", path); -// } catch (Exception e) { -// logger.warn("register reconnect create {} failed", path); -// throw new ConfigException(e); -// } finally { -// registerLock.unlock(); -// } -// } - - @Override - public T get(String path, Class clazz) throws ConfigException { - try { - byte[] bytes = curator.getData().forPath(path); - return JSON.parseObject(bytes, clazz); - } catch (Exception e) { - throw new ConfigException(e); - } - } - - @Override - public String get(String path) throws ConfigException { - try { - byte[] bytes = curator.getData().forPath(path); - return new String(bytes); - } catch (Exception e) { - throw new ConfigException(e); - } - } - - @Override - public void delete(String path) throws ConfigException { - try { - connectionListenerMap.remove(path); - if (curator.checkExists().forPath(path) != null) { - curator.delete().deletingChildrenIfNeeded().forPath(path); - } - } catch (Exception e) { - throw new ConfigException(e); - } - } - -// private void deletePath(String path) throws ConfigException { -// try { -// if (curator.checkExists().forPath(path) != null) { -// curator.delete().deletingChildrenIfNeeded().forPath(path); -// } -// } catch (Exception e) { -// throw new ConfigException(e); -// } -// } - - @SuppressWarnings("all") - @Override - public void watch(final String path, final StateChangeListener listener) throws ConfigException { - java.util.concurrent.locks.Lock dataLock = getDataPathLock(path); - dataLock.lock(); - try { - NodeCache nodeCache = dataWatchers.get(path); - if (nodeCache == null) { - nodeCache = new NodeCache(curator, path); - nodeCache.start(); - dataWatchers.put(path, nodeCache); - nodeCache.getListenable().addListener(new NodeCacheListener() { - @Override - public void nodeChanged() throws Exception { - listener.onChange(StateChangeListener.State.NODE_DATA_CHANGED, path); - } - }); - List listeners = new ArrayList<>(); - listeners.add(listener); - dataListeners.put(path, listeners); - } else { - List listeners = dataListeners.get(path); - Preconditions.checkState(listeners != null); - if (!listeners.contains(listener)) { - listeners.add(listener); - nodeCache.getListenable().addListener(new NodeCacheListener() { - @Override - public void nodeChanged() throws Exception { - listener.onChange(StateChangeListener.State.NODE_DATA_CHANGED, path); - } - }); - } - } - } catch (Exception e) { - throw new ConfigException(e); - } finally { - dataLock.unlock(); - } - } - - @Override - public List getChildren(String path) throws ConfigException{ - try { - return curator.getChildren().forPath(path); - } catch (Exception e) { - throw new ConfigException(e); - } - } - - @Override - public void watchChildren(final String path, final StateChangeListener listener) throws ConfigException { - java.util.concurrent.locks.Lock childrenLock = getChildrenPathLock(path); - childrenLock.lock(); - try { - PathChildrenCache pathChildrenCache = childrenWatcher.get(path); - if (pathChildrenCache == null) { - pathChildrenCache = new PathChildrenCache(curator, path, false, false, executor); - pathChildrenCache.start(PathChildrenCache.StartMode.POST_INITIALIZED_EVENT); - childrenWatcher.put(path, pathChildrenCache); - - pathChildrenCache.getListenable().addListener(new PathChildrenCacheListenerImpl(listener)); - List listeners = new ArrayList<>(); - listeners.add(listener); - childrenListeners.put(path, listeners); - } else { - List listeners = childrenListeners.get(path); - Preconditions.checkState(listeners != null); - if (!listeners.contains(listener)) { - listeners.add(listener); - pathChildrenCache.getListenable().addListener(new PathChildrenCacheListenerImpl(listener)); - } - } - } catch (Exception e) { - throw new ConfigException(e); - } finally { - childrenLock.unlock(); - } - } - - @Override - public void cancelWatchChildren(String path) { - java.util.concurrent.locks.Lock childrenLock = getChildrenPathLock(path); - childrenLock.lock(); - try { - PathChildrenCache pathChildrenCache = childrenWatcher.get(path); - if (pathChildrenCache != null) { - try { - pathChildrenCache.close(); - } catch (IOException e) { - logger.warn("close node cache for path {} error", path, e); - } - } - childrenWatcher.remove(path); - childrenListeners.remove(path); - } finally { - childrenLock.unlock(); - } - } - - private static class PathChildrenCacheListenerImpl implements PathChildrenCacheListener { - StateChangeListener listener; - - public PathChildrenCacheListenerImpl(StateChangeListener listener) { - this.listener = listener; - } - - @Override - public void childEvent(CuratorFramework client, PathChildrenCacheEvent event) throws Exception { - String path = event.getData() == null ? null : event.getData().getPath(); - switch (event.getType()) { - case CHILD_ADDED: - listener.onChange(StateChangeListener.State.CHILD_ADDED, path); - break; - case CHILD_UPDATED: - listener.onChange(StateChangeListener.State.CHILD_UPDATED, path); - break; - case CHILD_REMOVED: - listener.onChange(StateChangeListener.State.CHILD_DELETED, path); - break; - default: - break; - } - } - } - - @Override - public void lock(String path, long timeoutMS, T t) throws ConfigException { - try { - Lock lock = lockMap.get(path); - if (lock != null) { - if (lock.isAcquiredInThisProcess()) { - return; - } - lock.release(); - lockMap.remove(path); - } - InterProcessSemaphoreMutex mutex = new InterProcessSemaphoreMutex(curator, path); - boolean locked = mutex.acquire(timeoutMS, TimeUnit.MILLISECONDS); - if (!locked) { - throw new ConfigException("lock " + path + " failed " + timeoutMS); - } - if (t != null) { - curator.setData().forPath(path, JSON.toJSONBytes(t)); - } - lock = new Lock(mutex, path); - lockMap.put(path, lock); - } catch (Exception e) { - logger.warn("lock {} failed", path, e); - throw new ConfigException(e); - } - } - - @Override - public void unLock(String path) { - Lock lock = lockMap.remove(path); - if (lock != null) { - lock.release(); - } - } - - public class Lock { - InterProcessSemaphoreMutex mutex; - String path; - - public Lock(InterProcessSemaphoreMutex mutex, String path) { - this.mutex = mutex; - this.path = path; - } - - public void release() { - lockMap.remove(path); - try { - mutex.release(); - } catch (Exception e) { - logger.warn("release path {} lock error {}", path, e.getMessage()); - } - } - - public boolean isAcquiredInThisProcess() { - return mutex.isAcquiredInThisProcess(); - } - } - - @Override - public void close() { - connectionListenerMap.clear(); - connectionStateListeners.clear(); - for (NodeCache nodeCache : dataWatchers.values()) { - try { - nodeCache.close(); - } catch (Exception e) { - logger.warn("close node cache error", e); - } - } - dataWatchers.clear(); - for (PathChildrenCache pathChildrenCache : childrenWatcher.values()) { - try { - pathChildrenCache.close(); - } catch (IOException e) { - logger.warn("close children cache error", e); - } - } - childrenWatcher.clear(); - releaseLocks(); - curator.close(); - executor.shutdown(); - } - - private void releaseLocks() { - for (Lock lock : lockMap.values()) { - lock.release(); - } - lockMap.clear(); - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/ZkPathUtil.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/ZkPathUtil.java deleted file mode 100644 index 0410a553..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/ZkPathUtil.java +++ /dev/null @@ -1,125 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper; - -/** - * @author tukun - * @date 15/11/05 - * @version 1.0.0 - */ -public class ZkPathUtil { - private static final String ZOOKEEPER_SEPARATOR = "/"; - - public static final String BROKER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "brokers"; - - public static final String CONTROLLER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "controller"; - - public static final String BROKER_IDS_ROOT = BROKER_ROOT_NODE + ZOOKEEPER_SEPARATOR + "ids"; - - public static final String BROKER_TOPICS_ROOT = BROKER_ROOT_NODE + ZOOKEEPER_SEPARATOR + "topics"; - - public static final String CONSUMER_ROOT_NODE = ZOOKEEPER_SEPARATOR + "consumers"; - - public static final String REASSIGN_PARTITIONS_ROOT_NODE = "/admin/reassign_partitions"; - - /** - * config - */ - public static final String CONFIG_ROOT_NODE = ZOOKEEPER_SEPARATOR + "config"; - - public static final String CONFIG_TOPICS_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "topics"; - - public static final String CONFIG_CLIENTS_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "clients"; - - public static final String CONFIG_ENTITY_CHANGES_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "changes/config_change_"; - - private static final String D_METRICS_CONFIG_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "KafkaExMetrics"; - - public static final String D_CONFIG_EXTENSION_ROOT_NODE = CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + "extension"; - - public static final String D_CONTROLLER_CANDIDATES = D_CONFIG_EXTENSION_ROOT_NODE + ZOOKEEPER_SEPARATOR + "candidates"; - - public static String getBrokerIdNodePath(Integer brokerId) { - return BROKER_IDS_ROOT + ZOOKEEPER_SEPARATOR + String.valueOf(brokerId); - } - - public static String getBrokerTopicRoot(String topicName) { - return BROKER_TOPICS_ROOT + ZOOKEEPER_SEPARATOR + topicName; - } - - public static String getBrokerTopicPartitionStatePath(String topicName, Integer partitionId) { - return BROKER_TOPICS_ROOT + ZOOKEEPER_SEPARATOR + topicName + ZOOKEEPER_SEPARATOR + "partitions" - + ZOOKEEPER_SEPARATOR + String.valueOf(partitionId) + ZOOKEEPER_SEPARATOR + "state"; - } - - //for consumer - public static String getConsumerTopicPartitionOffsetNodePath(String consumerGroup, - String topic, int partitionId) { - return String.format(CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + "%s" + ZOOKEEPER_SEPARATOR - + "offset" + "%s" + "%d", consumerGroup, topic, partitionId); - } - - public static String getConsumerGroupRoot(String consumerGroup) { - return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup; - } - - public static String getConsumerGroupIdsRoot(String consumerGroup) { - return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup + ZOOKEEPER_SEPARATOR - + "ids"; - } - - public static String getConsumerGroupOffsetRoot(String consumerGroup) { - return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup + ZOOKEEPER_SEPARATOR - + "offsets"; - } - - public static String getConsumerGroupOwnersRoot(String consumerGroup) { - return CONSUMER_ROOT_NODE + ZOOKEEPER_SEPARATOR + consumerGroup + ZOOKEEPER_SEPARATOR - + "owners"; - } - - public static String getConsumerGroupConsumerIdsNodePath(String consumerGroup, String consumerId) { - return getConsumerGroupIdsRoot(consumerGroup) + ZOOKEEPER_SEPARATOR + consumerId; - } - - public static String getConsumerGroupOffsetTopicNode(String consumerGroup, String topic) { - return getConsumerGroupOffsetRoot(consumerGroup) + ZOOKEEPER_SEPARATOR + topic; - } - - public static String getConsumerGroupOffsetTopicPartitionNode(String consumerGroup, - String topic, int partitionId) { - return getConsumerGroupOffsetTopicNode(consumerGroup, topic) + ZOOKEEPER_SEPARATOR - + partitionId; - } - - public static String getConsumerGroupOwnersTopicNode(String consumerGroup, String topic) { - return getConsumerGroupOwnersRoot(consumerGroup) + ZOOKEEPER_SEPARATOR + topic; - } - - public static String getConsumerGroupOwnersTopicPartitionNode(String consumerGroup, - String topic, int partitionId) { - return getConsumerGroupOwnersTopicNode(consumerGroup, topic) + ZOOKEEPER_SEPARATOR - + partitionId; - } - - public static String getConfigTopicNode(String topicName) { - return CONFIG_TOPICS_ROOT_NODE + ZOOKEEPER_SEPARATOR + topicName; - } - - public static String getConfigClientNodePath(String appId, String topicName) { - return CONFIG_CLIENTS_ROOT_NODE + ZOOKEEPER_SEPARATOR + appId + "." + topicName; - } - - public static String parseLastPartFromZkPath(String zkPath) { - return zkPath.substring(zkPath.lastIndexOf(ZOOKEEPER_SEPARATOR) + 1); - } - - public static String getKafkaExtraMetricsPath(Integer brokerId) { - return D_METRICS_CONFIG_ROOT_NODE + ZOOKEEPER_SEPARATOR + brokerId; - } - - public static String getControllerCandidatePath(Integer brokerId) { - return D_CONTROLLER_CANDIDATES + ZOOKEEPER_SEPARATOR + brokerId; - } - - private ZkPathUtil() { - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/ControllerData.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/ControllerData.java deleted file mode 100644 index f5ea25ce..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/ControllerData.java +++ /dev/null @@ -1,46 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper.znode; - -/** - * @author zengqiao - * @date 19/4/22 - */ -public class ControllerData { - private Integer brokerid; - - private Integer version; - - private Long timestamp; - - public Integer getBrokerid() { - return brokerid; - } - - public void setBrokerid(Integer brokerid) { - this.brokerid = brokerid; - } - - public Integer getVersion() { - return version; - } - - public void setVersion(Integer version) { - this.version = version; - } - - public Long getTimestamp() { - return timestamp; - } - - public void setTimestamp(Long timestamp) { - this.timestamp = timestamp; - } - - @Override - public String toString() { - return "ControllerData{" + - "brokerid=" + brokerid + - ", version=" + version + - ", timestamp=" + timestamp + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/ReassignmentDTO.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/ReassignmentDTO.java deleted file mode 100644 index 0cfc29d4..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/ReassignmentDTO.java +++ /dev/null @@ -1,48 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper.znode; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * @author zengqiao - * @date 20/1/15 - */ -public class ReassignmentDTO { - private Integer version; - - private List> topics; - - public ReassignmentDTO(Integer version, String topicName) { - this.version = version; - Map topic = new HashMap<>(); - topic.put("topic", topicName); - topics = new ArrayList<>(); - topics.add(topic); - } - - public Integer getVersion() { - return version; - } - - public void setVersion(Integer version) { - this.version = version; - } - - public List> getTopics() { - return topics; - } - - public void setTopics(List> topics) { - this.topics = topics; - } - - @Override - public String toString() { - return "ReassignmentDTO{" + - "version=" + version + - ", topics=" + topics + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/ReassignmentElemData.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/ReassignmentElemData.java deleted file mode 100644 index 0480e88a..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/ReassignmentElemData.java +++ /dev/null @@ -1,48 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper.znode; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/1/15 - */ -public class ReassignmentElemData { - private String topic; - - private Integer partition; - - private List replicas; - - public String getTopic() { - return topic; - } - - public void setTopic(String topic) { - this.topic = topic; - } - - public Integer getPartition() { - return partition; - } - - public void setPartition(Integer partition) { - this.partition = partition; - } - - public List getReplicas() { - return replicas; - } - - public void setReplicas(List replicas) { - this.replicas = replicas; - } - - @Override - public String toString() { - return "ReassignmentElemDTO{" + - "topic='" + topic + '\'' + - ", partition=" + partition + - ", replicas=" + replicas + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/ReassignmentJsonData.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/ReassignmentJsonData.java deleted file mode 100644 index 45c5b9e6..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/ReassignmentJsonData.java +++ /dev/null @@ -1,37 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper.znode; - -import java.util.List; - -/** - * @author zengqiao - * @date 20/1/15 - */ -public class ReassignmentJsonData { - private Integer version; - - private List partitions; - - public Integer getVersion() { - return version; - } - - public void setVersion(Integer version) { - this.version = version; - } - - public List getPartitions() { - return partitions; - } - - public void setPartitions(List partitions) { - this.partitions = partitions; - } - - @Override - public String toString() { - return "ReassignmentJsonDTO{" + - "version=" + version + - ", partitions=" + partitions + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/BrokerMetadata.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/BrokerMetadata.java deleted file mode 100644 index 598784ca..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/BrokerMetadata.java +++ /dev/null @@ -1,131 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers; - -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.xiaojukeji.kafka.manager.common.constant.KafkaConstant; -import com.xiaojukeji.kafka.manager.common.entity.ao.common.IpPortData; -import com.xiaojukeji.kafka.manager.common.utils.NumberUtils; -import lombok.Data; - -import java.io.Serializable; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * @author zengqiao - * @date 19/4/3 - * - * 存储Broker的元信息, 元信息对应的ZK节点是/brokers/ids/{brokerId} - * 节点结构: - * { - * "listener_security_protocol_map":{"SASL_PLAINTEXT":"SASL_PLAINTEXT"}, - * "endpoints":["SASL_PLAINTEXT://127.0.0.1:9093"], - * "jmx_port":9999, - * "host":null, - * "timestamp":"1546632983233", - * "port":-1, - * "version":4, - * "rack": "CY" - * } - * - * { - * "listener_security_protocol_map":{"SASL_PLAINTEXT":"SASL_PLAINTEXT","PLAINTEXT":"PLAINTEXT"}, - * "endpoints":["SASL_PLAINTEXT://127.0.0.1:9093","PLAINTEXT://127.0.0.1:9092"], - * "jmx_port":8099, - * "host":"127.0.0.1", - * "timestamp":"1628833925822", - * "port":9092, - * "version":4 - * } - * - * { - * "listener_security_protocol_map":{"EXTERNAL":"SASL_PLAINTEXT","INTERNAL":"SASL_PLAINTEXT"}, - * "endpoints":["EXTERNAL://127.0.0.1:7092","INTERNAL://127.0.0.1:7093"], - * "jmx_port":8099, - * "host":null, - * "timestamp":"1627289710439", - * "port":-1, - * "version":4 - * } - * - */ -@Data -@JsonIgnoreProperties(ignoreUnknown = true) -public class BrokerMetadata implements Serializable { - private static final long serialVersionUID = 3918113492423375809L; - - private long clusterId; - - private int brokerId; - - private List endpoints; - - // > - private Map endpointMap; - - private String host; - - private int port; - - @JsonProperty("jmx_port") - private int jmxPort; - - private String version; - - private long timestamp; - - private String rack; - - @JsonIgnore - public String getExternalHost() { - if (!endpointMap.containsKey(KafkaConstant.EXTERNAL_KEY)) { - return null; - } - return endpointMap.get(KafkaConstant.EXTERNAL_KEY).getIp(); - } - - @JsonIgnore - public String getInternalHost() { - if (!endpointMap.containsKey(KafkaConstant.INTERNAL_KEY)) { - return null; - } - return endpointMap.get(KafkaConstant.INTERNAL_KEY).getIp(); - } - - public static void parseAndUpdateBrokerMetadata(BrokerMetadata brokerMetadata) { - brokerMetadata.setEndpointMap(new HashMap<>()); - - if (brokerMetadata.getEndpoints().isEmpty()) { - return; - } - - // example EXTERNAL://10.179.162.202:7092 - for (String endpoint: brokerMetadata.getEndpoints()) { - int idx1 = endpoint.indexOf("://"); - int idx2 = endpoint.lastIndexOf(":"); - if (idx1 == -1 || idx2 == -1 || idx1 == idx2) { - continue; - } - - String brokerHost = endpoint.substring(idx1 + "://".length(), idx2); - String brokerPort = endpoint.substring(idx2 + 1); - - brokerMetadata.getEndpointMap().put(endpoint.substring(0, idx1), new IpPortData(brokerHost, brokerPort)); - - if (KafkaConstant.EXTERNAL_KEY.equals(endpoint.substring(0, idx1))) { - // 优先使用external的地址进行展示 - brokerMetadata.setHost(brokerHost); - brokerMetadata.setPort(NumberUtils.string2Integer(brokerPort)); - } - - if (null == brokerMetadata.getHost()) { - brokerMetadata.setHost(brokerHost); - brokerMetadata.setPort(NumberUtils.string2Integer(brokerPort)); - } - } - } -} - - diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/PartitionMap.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/PartitionMap.java deleted file mode 100644 index 48bf2129..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/PartitionMap.java +++ /dev/null @@ -1,47 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers; - -import java.io.Serializable; -import java.util.List; -import java.util.Map; - -/** - * 根据/brokers/topics/topic的节点内容定义 - * @author tukun - * @date 2015/11/10. - */ -public class PartitionMap implements Serializable { - - /** - * 版本号 - */ - private int version; - - /** - * Map - */ - private Map> partitions; - - public int getVersion() { - return version; - } - - public void setVersion(int version) { - this.version = version; - } - - public Map> getPartitions() { - return partitions; - } - - public void setPartitions(Map> partitions) { - this.partitions = partitions; - } - - @Override - public String toString() { - return "PartitionMap{" + - "version=" + version + - ", partitions=" + partitions + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/PartitionState.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/PartitionState.java deleted file mode 100644 index ade9680b..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/PartitionState.java +++ /dev/null @@ -1,177 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers; - -import java.util.ArrayList; -import java.util.List; - -/** - * PartitionState实例 - * 对应zookeeper下的state节点信息以及partition的其它信息 - * @author tukun - * @date 2015/11/10. - */ -public class PartitionState implements Cloneable { - /** - * partition id - */ - private int partitionId; - - /** - * kafka集群中的中央控制器选举次数 - */ - private int controller_epoch; - - /** - * Partition所属的leader broker编号 - */ - private int leader; - - /** - * partition的版本号 - */ - private int version; - - /** - * 该partition leader选举次数 - */ - private int leader_epoch; - - /** - * 同步副本组brokerId列表 - */ - private List isr; - - /** - * 是否处于复制同步状态, true表示未同步, false表示已经同步 - */ - private boolean isUnderReplicated; - - /** - * Partition的offset - */ - private long offset; - - /** - * 被消费的offset - */ - private long consumeOffset; - - /** - * 消费者对应的消费group - */ - private String consumerGroup; - - public int getPartitionId() { - return partitionId; - } - - public void setPartitionId(int partitionId) { - this.partitionId = partitionId; - } - - public int getControllerEpoch() { - return controller_epoch; - } - - public void setControllerEpoch(int controllerEpoch) { - this.controller_epoch = controllerEpoch; - } - - public int getLeader() { - return leader; - } - - public void setLeader(int leader) { - this.leader = leader; - } - - public int getVersion() { - return version; - } - - public void setVersion(int version) { - this.version = version; - } - - public int getLeaderEpoch() { - return leader_epoch; - } - - public void setLeaderEpoch(int leaderEpoch) { - this.leader_epoch = leaderEpoch; - } - - public List getIsr() { - return isr; - } - - public void setIsr(List isr) { - this.isr = isr; - } - - public boolean isUnderReplicated() { - return isUnderReplicated; - } - - public void setUnderReplicated(boolean underReplicated) { - isUnderReplicated = underReplicated; - } - - public long getOffset() { - return offset; - } - - public void setOffset(long offset) { - this.offset = offset; - } - - public long getConsumeOffset() { - return consumeOffset; - } - - public void setConsumeOffset(long consumeOffset) { - this.consumeOffset = consumeOffset; - } - - public String getConsumerGroup() { - return consumerGroup; - } - - public void setConsumerGroup(String consumerGroup) { - this.consumerGroup = consumerGroup; - } - - @Override - public String toString() { - return "PartitionState{" + - "partitionId=" + partitionId + - ", controller_epoch=" + controller_epoch + - ", leader=" + leader + - ", version=" + version + - ", leader_epoch=" + leader_epoch + - ", isr=" + isr + - ", isUnderReplicated=" + isUnderReplicated + - ", offset=" + offset + - ", consumeOffset=" + consumeOffset + - ", consumerGroup='" + consumerGroup + '\'' + - '}'; - } - - @Override - public PartitionState clone() { - try { - PartitionState partitionState = (PartitionState) super.clone(); - partitionState.setPartitionId(this.partitionId); - partitionState.setControllerEpoch(this.controller_epoch); - partitionState.setLeader(this.leader); - partitionState.setVersion(this.version); - partitionState.setLeaderEpoch(this.leader_epoch); - partitionState.setIsr(new ArrayList<>(this.isr)); - partitionState.setOffset(this.offset); - partitionState.setConsumeOffset(this.consumeOffset); - partitionState.setConsumerGroup(this.consumerGroup); - return partitionState; - } catch (CloneNotSupportedException e) { - } - return null; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/TopicMetadata.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/TopicMetadata.java deleted file mode 100644 index 928276cf..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/brokers/TopicMetadata.java +++ /dev/null @@ -1,93 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper.znode.brokers; - -import java.util.Set; - -/** - * 存储Topic的元信息, 元信息对应的ZK节点是/brokers/topics/${topicName} - * @author zengqiao - * @date 19/4/3 - */ -public class TopicMetadata implements Cloneable { - private String topic; //topic名称 - - private PartitionMap partitionMap; //partition所在的Broker - - private Set brokerIdSet; //topic所在的broker, 由partitionMap获取得到 - - private int replicaNum; //副本数 - - private int partitionNum; //分区数 - - private long modifyTime; //修改节点的时间 - - private long createTime; //创建节点的时间 - - public String getTopic() { - return topic; - } - - public void setTopic(String topic) { - this.topic = topic; - } - - public int getReplicaNum() { - return replicaNum; - } - - public void setReplicaNum(int replicaNum) { - this.replicaNum = replicaNum; - } - - public PartitionMap getPartitionMap() { - return partitionMap; - } - - public void setPartitionMap(PartitionMap partitionMap) { - this.partitionMap = partitionMap; - } - - public Set getBrokerIdSet() { - return brokerIdSet; - } - - public void setBrokerIdSet(Set brokerIdSet) { - this.brokerIdSet = brokerIdSet; - } - - public int getPartitionNum() { - return partitionNum; - } - - public void setPartitionNum(int partitionNum) { - this.partitionNum = partitionNum; - } - - public long getModifyTime() { - return modifyTime; - } - - public void setModifyTime(long modifyTime) { - this.modifyTime = modifyTime; - } - - public long getCreateTime() { - return createTime; - } - - public void setCreateTime(long createTime) { - this.createTime = createTime; - } - - @Override - public String toString() { - return "TopicMetadata{" + - "topic='" + topic + '\'' + - ", partitionMap=" + partitionMap + - ", brokerIdSet=" + brokerIdSet + - ", replicaNum=" + replicaNum + - ", partitionNum=" + partitionNum + - ", modifyTime=" + modifyTime + - ", createTime=" + createTime + - '}'; - } -} diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/config/ChangeData.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/config/ChangeData.java deleted file mode 100644 index 20ed2f04..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/config/ChangeData.java +++ /dev/null @@ -1,44 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper.znode.config; - -/** - * @author zengqiao - * @date 20/5/14 - */ -public class ChangeData { - private static final Integer CHANGE_DATA_VERSION = 2; - - private String entity_path; - - private Integer version; - - public String getEntity_path() { - return entity_path; - } - - public void setEntity_path(String entity_path) { - this.entity_path = entity_path; - } - - public Integer getVersion() { - return version; - } - - public void setVersion(Integer version) { - this.version = version; - } - - public static ChangeData getChangeData(String entity_path) { - ChangeData changeData = new ChangeData(); - changeData.setEntity_path(entity_path); - changeData.setVersion(CHANGE_DATA_VERSION); - return changeData; - } - - @Override - public String toString() { - return "ConfigChangesData{" + - "entity_path='" + entity_path + '\'' + - ", version=" + version + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/config/ConfigNodeData.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/config/ConfigNodeData.java deleted file mode 100644 index 97bf3fa7..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/config/ConfigNodeData.java +++ /dev/null @@ -1,37 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper.znode.config; - -/** - * @author zengqiao - * @date 20/5/12 - */ -public class ConfigNodeData { - public static final Integer CONFIGDATA_VERSION = 1; - - private T config; - - private Integer version; - - public T getConfig() { - return config; - } - - public void setConfig(T config) { - this.config = config; - } - - public Integer getVersion() { - return version; - } - - public void setVersion(Integer version) { - this.version = version; - } - - @Override - public String toString() { - return "CommonDataDTO{" + - "config=" + config + - ", version=" + version + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/config/TopicQuotaData.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/config/TopicQuotaData.java deleted file mode 100644 index 88531518..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/config/TopicQuotaData.java +++ /dev/null @@ -1,48 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper.znode.config; - -import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; - -/** - * @author zengqiao - * @date 20/5/12 - */ -public class TopicQuotaData { - private String consumer_byte_rate; - - private String producer_byte_rate; - - public String getConsumer_byte_rate() { - return consumer_byte_rate; - } - - public void setConsumer_byte_rate(String consumer_byte_rate) { - this.consumer_byte_rate = consumer_byte_rate; - } - - public String getProducer_byte_rate() { - return producer_byte_rate; - } - - public void setProducer_byte_rate(String producer_byte_rate) { - this.producer_byte_rate = producer_byte_rate; - } - - public static TopicQuotaData getClientData(Long producerByteRate, Long consumerByteRate) { - TopicQuotaData clientData = new TopicQuotaData(); - if (!ValidateUtils.isNull(consumerByteRate) && consumerByteRate != -1) { - clientData.setConsumer_byte_rate(consumerByteRate.toString()); - } - if (!ValidateUtils.isNull(producerByteRate) && producerByteRate != -1) { - clientData.setProducer_byte_rate(producerByteRate.toString()); - } - return clientData; - } - - @Override - public String toString() { - return "ClientQuotaData{" + - "consumer_byte_rate='" + consumer_byte_rate + '\'' + - ", producer_byte_rate='" + producer_byte_rate + '\'' + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/didi/JmxSwitchDataConstant.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/didi/JmxSwitchDataConstant.java deleted file mode 100644 index 134979e2..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/didi/JmxSwitchDataConstant.java +++ /dev/null @@ -1,15 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper.znode.didi; - -/** - * @author zengqiao - * @date 20/8/21 - */ -public class JmxSwitchDataConstant { - public static final String TOPIC_REQUEST_METRICS = "TopicRequestMetrics."; - - public static final String APP_ID_TOPIC_METRICS = "AppIdTopicMetrics."; - - public static final String CLIENT_REQUEST_METRICS = "ClientRequestMetrics."; - - public static final String DISK_METRICS = "DiskMetrics."; -} \ No newline at end of file diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/didi/TopicJmxSwitch.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/didi/TopicJmxSwitch.java deleted file mode 100644 index b43904a9..00000000 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/zookeeper/znode/didi/TopicJmxSwitch.java +++ /dev/null @@ -1,54 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.zookeeper.znode.didi; - -/** - * @author zengqiao - * @date 20/8/21 - */ -public class TopicJmxSwitch { - private Boolean openTopicRequestMetrics = Boolean.FALSE; - - private Boolean openAppIdTopicMetrics = Boolean.FALSE; - - private Boolean openClientRequestMetrics = Boolean.FALSE; - - public TopicJmxSwitch(Boolean openTopicRequestMetrics, - Boolean openAppIdTopicMetrics, - Boolean openClientRequestMetrics) { - this.openTopicRequestMetrics = openTopicRequestMetrics; - this.openAppIdTopicMetrics = openAppIdTopicMetrics; - this.openClientRequestMetrics = openClientRequestMetrics; - } - - public Boolean getOpenTopicRequestMetrics() { - return openTopicRequestMetrics; - } - - public void setOpenTopicRequestMetrics(Boolean openTopicRequestMetrics) { - this.openTopicRequestMetrics = openTopicRequestMetrics; - } - - public Boolean getOpenAppIdTopicMetrics() { - return openAppIdTopicMetrics; - } - - public void setOpenAppIdTopicMetrics(Boolean openAppIdTopicMetrics) { - this.openAppIdTopicMetrics = openAppIdTopicMetrics; - } - - public Boolean getOpenClientRequestMetrics() { - return openClientRequestMetrics; - } - - public void setOpenClientRequestMetrics(Boolean openClientRequestMetrics) { - this.openClientRequestMetrics = openClientRequestMetrics; - } - - @Override - public String toString() { - return "TopicJmxSwitch{" + - "openTopicRequestMetrics=" + openTopicRequestMetrics + - ", openAppIdTopicMetrics=" + openAppIdTopicMetrics + - ", openClientRequestMetrics=" + openClientRequestMetrics + - '}'; - } -} \ No newline at end of file diff --git a/kafka-manager-common/src/test/java/com/xiaojukeji/kafka/manager/common/utils/JsonUtilsTest.java b/kafka-manager-common/src/test/java/com/xiaojukeji/kafka/manager/common/utils/JsonUtilsTest.java deleted file mode 100644 index 1d338015..00000000 --- a/kafka-manager-common/src/test/java/com/xiaojukeji/kafka/manager/common/utils/JsonUtilsTest.java +++ /dev/null @@ -1,18 +0,0 @@ -package com.xiaojukeji.kafka.manager.common.utils; - -import org.junit.Assert; -import org.junit.Test; - -import java.util.HashMap; -import java.util.Map; - -public class JsonUtilsTest { - @Test - public void testMapToJsonString() { - Map map = new HashMap<>(); - map.put("key", "value"); - map.put("int", 1); - String expectRes = "{\"key\":\"value\",\"int\":1}"; - Assert.assertEquals(expectRes, JsonUtils.toJSONString(map)); - } -} diff --git a/kafka-manager-console/Dockerfile b/kafka-manager-console/Dockerfile deleted file mode 100644 index 7657fcc9..00000000 --- a/kafka-manager-console/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -ARG NODE_VERSION=12.20.0 -ARG NGINX_VERSION=1.21.5-alpine -FROM node:${NODE_VERSION} AS builder -ARG OUTPUT_PATH=dist - -ENV TZ Asia/Shanghai -WORKDIR /opt -COPY . . -RUN npm config set registry https://registry.npm.taobao.org \ - && npm install \ - # Change the output directory to dist - && sed -i "s#../kafka-manager-web/src/main/resources/templates#$OUTPUT_PATH#g" webpack.config.js \ - && npm run prod-build - -FROM nginx:${NGINX_VERSION} - -ENV TZ=Asia/Shanghai - -COPY --from=builder /opt/dist /opt/dist -COPY --from=builder /opt/web.conf /etc/nginx/conf.d/default.conf diff --git a/kafka-manager-console/package.json b/kafka-manager-console/package.json deleted file mode 100644 index 5d33a320..00000000 --- a/kafka-manager-console/package.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "name": "logi-kafka", - "version": "2.6.1", - "description": "", - "scripts": { - "prestart": "npm install --save-dev webpack-dev-server", - "start": "webpack serve", - "daily-build": "cross-env NODE_ENV=production webpack", - "pre-build": "cross-env NODE_ENV=production webpack", - "prod-build": "cross-env NODE_ENV=production webpack", - "fix-memory": "cross-env LIMIT=4096 increase-memory-limit" - }, - "author": "", - "license": "ISC", - "devDependencies": { - "@hot-loader/react-dom": "^16.8.6", - "@types/events": "^3.0.0", - "@types/lodash.debounce": "^4.0.6", - "@types/react": "^16.8.8", - "@types/react-dom": "^16.8.2", - "@types/react-router-dom": "^4.3.1", - "@types/spark-md5": "^3.0.2", - "@webpack-cli/serve": "^1.6.0", - "antd": "^3.26.15", - "clean-webpack-plugin": "^3.0.0", - "clipboard": "^2.0.8", - "cross-env": "^7.0.2", - "css-loader": "^2.1.0", - "echarts": "^5.2.1", - "file-loader": "^5.0.2", - "html-webpack-plugin": "^3.2.0", - "increase-memory-limit": "^1.0.7", - "less": "^3.9.0", - "less-loader": "^4.1.0", - "mini-css-extract-plugin": "^0.6.0", - "mobx": "^5.9.4", - "mobx-react": "^5.4.3", - "moment": "^2.24.0", - "monaco-editor": "^0.20.0", - "monaco-editor-webpack-plugin": "^1.9.0", - "optimize-css-assets-webpack-plugin": "^5.0.1", - "react": "^16.8.4", - "react-hot-loader": "^4.8.4", - "react-router-dom": "^5.0.0", - "spark-md5": "^3.0.1", - "style-loader": "^0.23.1", - "terser-webpack-plugin": "^1.2.3", - "ts-loader": "^5.3.3", - "tsconfig-paths-webpack-plugin": "^3.2.0", - "tslint": "^5.13.1", - "tslint-react": "^3.6.0", - "typescript": "^3.3.3333", - "url-loader": "^4.1.1", - "webpack": "^4.29.6", - "webpack-cli": "^4.9.1", - "xlsx": "^0.16.1" - }, - "dependencies": { - "format-to-json": "^1.0.4" - } -} diff --git a/kafka-manager-console/pom.xml b/kafka-manager-console/pom.xml deleted file mode 100644 index 02ee7e1c..00000000 --- a/kafka-manager-console/pom.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - 4.0.0 - kafka-manager-console - - - kafka-manager - com.xiaojukeji.kafka - ${kafka-manager.revision} - - - - - - com.github.eirslett - frontend-maven-plugin - 1.6 - - target - ./ - - - - install node and npm - - install-node-and-npm - - - v12.20.0 - 6.14.8 - http://npm.taobao.org/mirrors/node/ - https://registry.npm.taobao.org/npm/-/ - - - - npm install - - npm - - - install - - - - npm run prod-build - - npm - - - run prod-build - - - - - - - diff --git a/kafka-manager-console/src/assets/image/admin.png b/kafka-manager-console/src/assets/image/admin.png deleted file mode 100644 index 7e293b1a..00000000 Binary files a/kafka-manager-console/src/assets/image/admin.png and /dev/null differ diff --git a/kafka-manager-console/src/assets/image/devops.png b/kafka-manager-console/src/assets/image/devops.png deleted file mode 100644 index 9e4c7e73..00000000 Binary files a/kafka-manager-console/src/assets/image/devops.png and /dev/null differ diff --git a/kafka-manager-console/src/assets/image/images.d.ts b/kafka-manager-console/src/assets/image/images.d.ts deleted file mode 100644 index ee5149e2..00000000 --- a/kafka-manager-console/src/assets/image/images.d.ts +++ /dev/null @@ -1,5 +0,0 @@ -declare module '*.svg'; -declare module '*.png'; -declare module '*.jpg'; -declare module '*.jpeg'; -declare module '*.gif'; diff --git a/kafka-manager-console/src/assets/image/kafka-logo.png b/kafka-manager-console/src/assets/image/kafka-logo.png deleted file mode 100644 index a04fee05..00000000 Binary files a/kafka-manager-console/src/assets/image/kafka-logo.png and /dev/null differ diff --git a/kafka-manager-console/src/assets/image/kafka-manager.png b/kafka-manager-console/src/assets/image/kafka-manager.png deleted file mode 100644 index 6f740a68..00000000 Binary files a/kafka-manager-console/src/assets/image/kafka-manager.png and /dev/null differ diff --git a/kafka-manager-console/src/assets/image/login-bg.png b/kafka-manager-console/src/assets/image/login-bg.png deleted file mode 100644 index 9c4566b7..00000000 Binary files a/kafka-manager-console/src/assets/image/login-bg.png and /dev/null differ diff --git a/kafka-manager-console/src/assets/image/logo.ico b/kafka-manager-console/src/assets/image/logo.ico deleted file mode 100644 index fcecf158..00000000 Binary files a/kafka-manager-console/src/assets/image/logo.ico and /dev/null differ diff --git a/kafka-manager-console/src/assets/image/normal.png b/kafka-manager-console/src/assets/image/normal.png deleted file mode 100644 index 70ced930..00000000 Binary files a/kafka-manager-console/src/assets/image/normal.png and /dev/null differ diff --git a/kafka-manager-console/src/assets/image/weChat.png b/kafka-manager-console/src/assets/image/weChat.png deleted file mode 100644 index 126f5816..00000000 Binary files a/kafka-manager-console/src/assets/image/weChat.png and /dev/null differ diff --git a/kafka-manager-console/src/assets/image/wechats.jpeg b/kafka-manager-console/src/assets/image/wechats.jpeg deleted file mode 100644 index 6333ad89..00000000 Binary files a/kafka-manager-console/src/assets/image/wechats.jpeg and /dev/null differ diff --git a/kafka-manager-console/src/component/antd/index.tsx b/kafka-manager-console/src/component/antd/index.tsx deleted file mode 100644 index d0958daf..00000000 --- a/kafka-manager-console/src/component/antd/index.tsx +++ /dev/null @@ -1,150 +0,0 @@ -import message from 'antd/es/message'; -import 'antd/es/message/style'; - -import Input from 'antd/es/input'; -import 'antd/es/input/style'; - -import InputNumber from 'antd/es/input-number'; -import 'antd/es/input-number/style'; - -import Table from 'antd/es/table'; -import 'antd/es/table/style'; - -import Tabs from 'antd/es/tabs'; -import 'antd/es/tabs/style'; - -import Select from 'antd/es/select'; -import 'antd/es/select/style'; - -import DatePicker from 'antd/es/date-picker'; -import 'antd/es/date-picker/style'; - -import Button from 'antd/es/button'; -import 'antd/es/button/style'; - -import Modal from 'antd/es/modal'; -import 'antd/es/modal/style'; - -import Form from 'antd/es/form'; -import 'antd/es/form/style'; - -import Row from 'antd/es/row'; -import 'antd/es/row/style'; - -import Col from 'antd/es/col'; -import 'antd/es/col/style'; - -import Switch from 'antd/es/switch'; -import 'antd/es/switch/style'; - -import Alert from 'antd/es/alert'; -import 'antd/es/alert/style'; - -import { PaginationConfig, ColumnProps } from 'antd/es/table/interface'; - -import notification from 'antd/es/notification'; -import 'antd/es/notification/style'; - -import Tooltip from 'antd/es/tooltip'; -import 'antd/es/tooltip/style'; - -import Popover from 'antd/es/popover'; -import 'antd/es/popover/style'; - -import Radio from 'antd/es/radio'; -import 'antd/es/radio'; -import { RadioChangeEvent } from 'antd/es/radio'; - -import Collapse from 'antd/es/collapse'; -import 'antd/es/collapse/style'; - -import Icon from 'antd/es/icon'; -import 'antd/es/icon/style'; - -import Dropdown from 'antd/es/dropdown'; -import 'antd/es/dropdown/style'; - -import Spin from 'antd/es/spin'; -import 'antd/es/spin/style'; - -import Drawer from 'antd/es/drawer'; -import 'antd/es/drawer/style'; - -import Checkbox from 'antd/es/checkbox'; -import 'antd/es/checkbox/style'; - -import Affix from 'antd/es/affix'; -import 'antd/es/affix/style'; - -import Popconfirm from 'antd/es/popconfirm'; -import 'antd/es/popconfirm/style'; - -import PageHeader from 'antd/es/page-header'; -import 'antd/es/page-header/style'; - -import Descriptions from 'antd/es/descriptions'; -import 'antd/es/descriptions/style'; - -import Steps from 'antd/es/steps'; -import 'antd/es/steps/style'; - -import Divider from 'antd/es/divider'; -import 'antd/es/divider/style'; - -import Upload from 'antd/es/upload'; -import 'antd/es/upload/style'; - -import Transfer from 'antd/es/transfer'; -import 'antd/es/transfer/style'; - -import TimePicker from 'antd/es/time-picker'; -import 'antd/es/time-picker/style'; - -import Badge from 'antd/es/badge'; -import 'antd/es/badge/style'; - -import Progress from 'antd/es/progress'; -import 'antd/es/progress/style'; - -import { RangePickerValue } from 'antd/es/date-picker/interface'; - -export { - PaginationConfig, - notification, - ColumnProps, - DatePicker, - message, - Tooltip, - Button, - Select, - Switch, - Modal, - Input, - Table, - Radio, - Alert, - Tabs, - Form, - Row, - Col, - RadioChangeEvent, - InputNumber, - Collapse, - Icon, - Dropdown, - Spin, - Drawer, - Checkbox, - Affix, - Popconfirm, - PageHeader, - Descriptions, - Steps, - Divider, - Upload, - TimePicker, - RangePickerValue, - Badge, - Popover, - Transfer -}; diff --git a/kafka-manager-console/src/component/chart/bar-chart.tsx b/kafka-manager-console/src/component/chart/bar-chart.tsx deleted file mode 100644 index d31fcfd7..00000000 --- a/kafka-manager-console/src/component/chart/bar-chart.tsx +++ /dev/null @@ -1,105 +0,0 @@ -import * as React from 'react'; -import { Spin, notification } from 'component/antd'; -import * as echarts from 'echarts/core'; - -// 引入柱状图 -import { BarChart } from 'echarts/charts'; - -// 引入提示框和标题组件 -import { - TitleComponent, - TooltipComponent, - LegendComponent, - GridComponent, -} from 'echarts/components'; -import { CanvasRenderer } from 'echarts/renderers'; -import { EChartsOption } from 'echarts'; - -// 注册必须的组件 -echarts.use([ - TitleComponent, - LegendComponent, - TooltipComponent, - BarChart, - GridComponent, - CanvasRenderer, -]); - -interface IChartProps { - getChartData: any; - customerNode?: React.ReactNode; -} - -export class BarChartComponet extends React.Component { - public id: HTMLDivElement = null; - public chart: echarts.ECharts; - - public state = { - loading: false, - noData: false, - }; - - public componentDidMount() { - this.chart = echarts.init(this.id); - this.getChartData(); - window.addEventListener('resize', this.resize); - } - - public componentWillUnmount() { - window.removeEventListener('resize', this.resize); - } - - public resize = () => { - this.chart.resize(); - } - - public isHasData = (data: any) => { - const noData = !(data.series && data.series.length); - this.setState({ noData }); - return !noData; - } - - public getChartData = () => { - const { getChartData } = this.props; - if (!getChartData) { - return notification.error({ message: '图表信息有误' }); - } - - this.setState({ loading: true }); - const chartOptions = getChartData(); - - if ((typeof chartOptions.then) === 'function') { - return chartOptions.then((data: EChartsOption) => { - this.setState({ loading: false }); - - if (this.isHasData(data)) { - this.changeChartOptions(data); - } - }); - } - - if (this.isHasData(chartOptions)) { - this.changeChartOptions(chartOptions); - this.setState({ loading: false }); - } - } - - public changeChartOptions(options: any) { - this.chart.setOption(options, true); - } - - public handleRefreshChart() { - this.getChartData(); - } - - public render() { - return ( - <> - - {this.state.noData ?

暂无数据
: null} -
this.id = id} /> - - - ); - } -} diff --git a/kafka-manager-console/src/component/chart/date-picker-chart.tsx b/kafka-manager-console/src/component/chart/date-picker-chart.tsx deleted file mode 100644 index 39878805..00000000 --- a/kafka-manager-console/src/component/chart/date-picker-chart.tsx +++ /dev/null @@ -1,145 +0,0 @@ -import * as React from 'react'; -import { DatePicker, notification, Spin } from 'component/antd'; -import moment, { Moment } from 'moment'; -import { timeStampStr } from 'constants/strategy'; -import { disabledDate } from 'lib/utils'; -import * as echarts from 'echarts/core'; - -// 引入柱状图 -import { BarChart, LineChart } from 'echarts/charts'; - -// 引入提示框和标题组件 -import { - TitleComponent, - TooltipComponent, - LegendComponent, - GridComponent, - MarkLineComponent, - DatasetComponent, -} from 'echarts/components'; -import { CanvasRenderer } from 'echarts/renderers'; - -// 注册必须的组件 -echarts.use([ - TitleComponent, - LegendComponent, - TooltipComponent, - GridComponent, - BarChart, - LineChart, - CanvasRenderer, - DatasetComponent, - MarkLineComponent, -]); -import './index.less'; - -const { RangePicker } = DatePicker; - -interface IChartProps { - getChartData: (startTime: moment.Moment, endTime: moment.Moment) => any; - customerNode?: React.ReactNode; -} - -export class ChartWithDatePicker extends React.Component { - public state = { - startTime: moment().subtract(1, 'hour'), - endTime: moment(), - loading: false, - noData: false, - }; - - public id: HTMLDivElement = null; - public chart: echarts.ECharts; - - public getData = () => { - const { startTime, endTime } = this.state; - const { getChartData } = this.props; - this.setState({ loading: true }); - getChartData(startTime, endTime).then((data: any) => { - this.setState({ loading: false }); - this.changeChartOptions(data); - }); - } - - public componentDidMount() { - this.chart = echarts.init(this.id); - this.getData(); - window.addEventListener('resize', this.resize); - } - - public componentWillUnmount() { - window.removeEventListener('resize', this.resize); - } - - public resize = () => { - this.chart.resize(); - } - - public changeChartOptions(options: any) { - const noData = options.series.length ? false : true; - this.setState({ noData }); - options.tooltip.formatter = (params: any) => { - let res = - '

' + - params[0].data.time + - '

'; - // tslint:disable-next-line:prefer-for-of - for (let i = 0; i < params.length; i++) { - res += `
- - ${params[i].seriesName} - ${params[i].data[params[i].seriesName]} -
`; - } - return res; - }; - this.chart.setOption(options, true); - } - - public handleTimeChange = (dates: Moment[]) => { - this.setState({ - startTime: dates[0], - endTime: dates[1], - }); - const { getChartData } = this.props; - this.setState({ loading: true }); - getChartData(dates[0], dates[1]).then((data: any) => { - this.setState({ loading: false }); - this.changeChartOptions(data); - }); - } - - public render() { - const { customerNode } = this.props; - return ( -
-
-
- {customerNode} -
-
    -
  • - -
  • -
-
- - {this.state.noData ?
暂无数据
: null} -
this.id = id} /> - -
- ); - } -} diff --git a/kafka-manager-console/src/component/chart/doughnut-chart.tsx b/kafka-manager-console/src/component/chart/doughnut-chart.tsx deleted file mode 100644 index d4069b48..00000000 --- a/kafka-manager-console/src/component/chart/doughnut-chart.tsx +++ /dev/null @@ -1,74 +0,0 @@ -import * as React from 'react'; -import { Spin } from 'component/antd'; -import * as echarts from 'echarts/core'; -// 引入饼图 -import { PieChart } from 'echarts/charts'; - -// 引入提示框和标题组件 -import { - TitleComponent, - TooltipComponent, - LegendComponent, - GridComponent, -} from 'echarts/components'; -import { CanvasRenderer } from 'echarts/renderers'; - -// 注册必须的组件 -echarts.use([ - PieChart, - TitleComponent, - LegendComponent, - TooltipComponent, - GridComponent, - CanvasRenderer, -]); -interface IPieProps { - getChartData: any; -} - -export class DoughnutChart extends React.Component { - public id: HTMLDivElement = null; - public chart: echarts.ECharts; - - public state = { - loading: true, - isNoData: false, - }; - - public getChartData = () => { - const { getChartData } = this.props; - - this.setState({ loading: true }); - const options = getChartData(); - if (!options || !options.series || !options.series.length) { - this.setState({ - isNoData: true, - loading: false, - }); - return; - } - - this.changeChartOptions(options); - } - - public changeChartOptions(options: any) { - this.chart.setOption(options, true); - this.setState({ loading: false }); - } - - public componentDidMount() { - this.chart = echarts.init(this.id); - this.getChartData(); - } - - public render() { - return ( - <> - - {this.state.isNoData ?
暂无数据
: null} -
this.id = id} /> - - - ); - } -} diff --git a/kafka-manager-console/src/component/chart/index.less b/kafka-manager-console/src/component/chart/index.less deleted file mode 100644 index 4c0cb8d9..00000000 --- a/kafka-manager-console/src/component/chart/index.less +++ /dev/null @@ -1,80 +0,0 @@ -.status-box{ - float: left; - margin: 0 5px; - width: 100%; - .status-graph { - position: relative; - height: 48px; - width: 100%; - background: rgba(255, 255, 255, 255); - display: flex; - justify-content: space-between; - line-height: 48px; - font-family: PingFangSC-Regular; - color: rgba(0, 0, 0, 0.85); - padding: 0 5px; - margin: -15px 0; - .k-toolbar { - &>span.label { - padding: 10px; - font-size: 12px; - } - - li { - float: left; - vertical-align: middle; - line-height: 48px; - margin-right: 20px; - - &>span.label { - padding-right: 10px; - } - } - - .title-toolbar { - float: right; - right: 30px; - - span:first-child { - margin-right: 10px; - } - } - } - } - .graph-none{ - display: none; - } -} -.nothing-style { - height: 300px; - line-height: 300px; - text-align: center; -} - -.chart { - height: 400px; - padding: 10px 20px; -} -.doughnut-chart { - width: 500px; - height: 350px; -} - -.chart-no-data { - height: 0px; - display: none; -} - -.ant-spin-nested-loading { - margin: 0 auto; -} - -.no-footer { - .ant-modal-confirm-btns { - display: none; - } -} - -.no-data-info { - text-align: center; -} \ No newline at end of file diff --git a/kafka-manager-console/src/component/chart/index.tsx b/kafka-manager-console/src/component/chart/index.tsx deleted file mode 100644 index befb951e..00000000 --- a/kafka-manager-console/src/component/chart/index.tsx +++ /dev/null @@ -1,4 +0,0 @@ -export * from './bar-chart'; -export * from './date-picker-chart'; -export * from './doughnut-chart'; -export * from './line-chart'; diff --git a/kafka-manager-console/src/component/chart/line-chart.tsx b/kafka-manager-console/src/component/chart/line-chart.tsx deleted file mode 100644 index 230a503d..00000000 --- a/kafka-manager-console/src/component/chart/line-chart.tsx +++ /dev/null @@ -1,75 +0,0 @@ -import React from 'react'; -import * as echarts from 'echarts/core'; -import './index.less'; - -// 引入柱状图 -import { PieChart, LineChart } from 'echarts/charts'; - -// 引入提示框和标题组件 -import { - TitleComponent, - TooltipComponent, - LegendComponent, - GridComponent, - ToolboxComponent, - DatasetComponent, -} from 'echarts/components'; -import { CanvasRenderer } from 'echarts/renderers'; - -// 注册必须的组件 -echarts.use([ - PieChart, - LineChart, - ToolboxComponent, - TitleComponent, - LegendComponent, - TooltipComponent, - GridComponent, - DatasetComponent, - CanvasRenderer, -]); -export interface IEchartsProps { - width?: number; - height?: number; - options?: any; -} - -export const hasData = (options: any) => { - if (options && options.series && options.series.length) return true; - return false; -}; - -export default class LineCharts extends React.Component { - public id = null as HTMLDivElement; - - public myChart = null as echarts.ECharts; - - public componentDidMount() { - const { options } = this.props; - this.myChart = echarts.init(this.id); - this.myChart.setOption(options, true); - window.addEventListener('resize', this.resize); - } - - public componentWillUnmount() { - window.removeEventListener('resize', this.resize); - } - - public componentDidUpdate() { - this.refresh(); - } - - public refresh = () => { - const { options } = this.props; - this.myChart.setOption(options, true); - } - - public resize = () => { - this.myChart.resize(); - } - - public render() { - const { height, width } = this.props; - return
this.id = id} style={{ width: `${width}px`, height: `${height}px` }} />; - } -} diff --git a/kafka-manager-console/src/component/clipboard/index.tsx b/kafka-manager-console/src/component/clipboard/index.tsx deleted file mode 100755 index e9fa015a..00000000 --- a/kafka-manager-console/src/component/clipboard/index.tsx +++ /dev/null @@ -1,55 +0,0 @@ -import * as React from 'react'; -import ClipboardJS from 'clipboard'; -import { - message, -} from 'component/antd'; - -const triggerEvent = (eventName: string, element: Element) => { - let event; - const ele = element || document; - - event = document.createEvent('HTMLEvents'); - event.initEvent(eventName, true, true); - ele.dispatchEvent(event); -}; - -export class Clipboard extends React.Component { - public state = { - text: '', - }; - - private clipboard: any = null; - private dom: Element = null; - - public componentDidMount() { - const clipboard = this.clipboard = new ClipboardJS('.___clipboard', { - text(trigger: Element) { - return trigger.getAttribute('data-text'); - }, - }); - - clipboard.on('success', (e: any) => { - message.success('复制成功!'); - e.clearSelection(); - }); - - clipboard.on('error', (e: any) => { - message.error('复制失败!' + e); - }); - } - - public componentWillUnmount() { - this.clipboard.destroy(); - } - - public copy(text: string) { - this.setState({ text }); - setTimeout(() => triggerEvent('click', this.dom), 0); - } - - public render() { - return ( -
this.dom = dom} /> - ); - } -} diff --git a/kafka-manager-console/src/component/editor/editor.tsx b/kafka-manager-console/src/component/editor/editor.tsx deleted file mode 100644 index cf86f2c7..00000000 --- a/kafka-manager-console/src/component/editor/editor.tsx +++ /dev/null @@ -1,68 +0,0 @@ -// import * as React from 'react'; -// import CodeMirror from 'codemirror/lib/codemirror'; -// import 'codemirror/lib/codemirror.css'; -// import 'codemirror/mode/sql/sql'; -// import 'codemirror/mode/javascript/javascript'; -// import 'codemirror/addon/hint/show-hint.js'; -// import 'codemirror/addon/hint/sql-hint.js'; -// import 'codemirror/addon/hint/show-hint.css'; -// import './index.less'; -// import { indexStore } from 'store/my-index'; - -// interface IProps { -// value?: string; -// placeholder?: string; -// readOnly?: boolean; -// } -// export class CodeMirrorEditor extends React.Component { - -// public editor = null as any; - -// public handleCodeFocus = () => { -// // tslint:disable-next-line:no-unused-expression -// this.editor && this.editor.focus(); -// } - -// public componentDidMount() { -// const { value, placeholder, readOnly } = this.props; -// const code = document.querySelector('.codemirror'); -// code.innerHTML = ''; -// const editor = CodeMirror(document.querySelector('.codemirror'), { -// mode: 'application/json', -// indentWithTabs: true, -// smartIndent: true, -// lineNumbers: true, -// matchBrackets: true, -// autoCloseBrackets: true, -// styleSelectedText: true, -// foldGutter: true, -// readOnly, -// extraKeys: readOnly ? {} : { -// 'Ctrl-Enter': 'autocomplete', -// 'Tab': (cm) => { -// const spaces = Array(cm.getOption('indentUnit') + 1).join(' '); -// cm.replaceSelection(spaces); -// }, -// }, -// placeholder, -// }); -// editor.setValue(value || ''); -// indexStore.setCodeEditorValue(value || ''); -// editor.on('changes', (a: any) => { -// const data = a.getValue(); -// indexStore.setCodeEditorValue(data); -// }); -// this.editor = editor; -// } - -// public render() { -// return ( -//
-//
-//
-// ); -// } -// } diff --git a/kafka-manager-console/src/component/editor/index.less b/kafka-manager-console/src/component/editor/index.less deleted file mode 100644 index 36c52cde..00000000 --- a/kafka-manager-console/src/component/editor/index.less +++ /dev/null @@ -1,31 +0,0 @@ -.editor { - height: 100%; -} - -.CodeMirror-placeholder { - color:#999; - font-size: 12px; - line-height: 14px; - font-family: -apple-system,BlinkMacSystemFont,Neue Haas Grotesk Text Pro,Arial Nova,Segoe UI,Helvetica Neue,\.PingFang SC,PingFang SC,Microsoft YaHei,Microsoft JhengHei,Source Han Sans SC,Noto Sans CJK SC,Source Han Sans CN,Noto Sans SC,Source Han Sans TC,Noto Sans CJK TC,Hiragino Sans GB,sans-serif; -} -.editor-wrap { - max-height: 100%; -} - -.CodeMirror { - height: 100vh; -} - -.monacoEditor{ - height: 150px; - position: relative; - overflow: hidden; - border: 1px solid #cccccc; - border-radius: 4px; - .editor{ - height: 100%; - position: absolute; - left: -12%; - width: 120%; - } -} diff --git a/kafka-manager-console/src/component/editor/index.tsx b/kafka-manager-console/src/component/editor/index.tsx deleted file mode 100644 index 6f605b9c..00000000 --- a/kafka-manager-console/src/component/editor/index.tsx +++ /dev/null @@ -1,50 +0,0 @@ -import * as React from 'react'; -import * as monaco from 'monaco-editor'; - -import './index.less'; - -export interface IEditorProps { - style?: React.CSSProperties; - options: monaco.editor.IStandaloneEditorConstructionOptions; - uri?: monaco.Uri; - autoUnmount?: boolean; - customMount?: (editor: monaco.editor.IStandaloneCodeEditor, monaco: any) => any; - placeholder?: string; -} - -export class EditorCom extends React.Component { - public ref: HTMLElement = null; - public editor: monaco.editor.IStandaloneCodeEditor; - public state = { - placeholder: this.props.placeholder ?? '', - }; - - public componentWillUnmount() { - if (this.props.autoUnmount === false) return; - const model = this.editor.getModel(); - model.dispose(); - this.editor.dispose(); - } - - public componentDidMount() { - const { customMount, options, uri } = this.props; - const { value, language } = options; - if (uri) { - options.model = monaco.editor.createModel(value, language, uri); - } - - this.editor = monaco.editor.create(this.ref, - options, - ); - if (customMount) customMount(this.editor, monaco); - } - - public render() { - const { style } = this.props; - return ( - <> -
{ this.ref = id; }} /> - - ); - } -} diff --git a/kafka-manager-console/src/component/editor/monacoEditor.tsx b/kafka-manager-console/src/component/editor/monacoEditor.tsx deleted file mode 100644 index ac0a297a..00000000 --- a/kafka-manager-console/src/component/editor/monacoEditor.tsx +++ /dev/null @@ -1,65 +0,0 @@ -import * as React from 'react'; -import * as monaco from 'monaco-editor'; -import format2json from 'format-to-json'; -import { Input } from 'component/antd'; -import './index.less'; - -export interface IEditorProps { - style?: React.CSSProperties; - options: monaco.editor.IStandaloneEditorConstructionOptions; - uri?: monaco.Uri; - autoUnmount?: boolean; - customMount?: (editor: monaco.editor.IStandaloneCodeEditor, monaco: any) => any; - placeholder?: string; - value: ''; - onChange?: any; -} - -class Monacoeditor extends React.Component { - public ref: HTMLElement = null; - public editor: monaco.editor.IStandaloneCodeEditor; - public state = { - placeholder: '', - }; - - public async componentDidMount() { - const { value, onChange } = this.props; - const format: any = await format2json(value); - this.editor = monaco.editor.create(this.ref, { - value: format.result || value, - language: 'json', - lineNumbers: 'off', - scrollBeyondLastLine: false, - // selectOnLineNumbers: true, - // roundedSelection: false, - // readOnly: true, - minimap: { - enabled: false, - }, - automaticLayout: true, // 自动布局 - glyphMargin: true, // 字形边缘 {},[] - // useTabStops: false, - // formatOnPaste: true, - // mode: 'application/json', - // indentWithTabs: true, - // smartIndent: true, - // matchBrackets: 'always', - // autoCloseBrackets: true, - // styleSelectedText: true, - // foldGutter: true, - }); - this.editor.onDidChangeModelContent((e) => { - const newValue = this.editor.getValue(); - onChange(newValue); - }); - } - public render() { - return ( -
- -
{ this.ref = id; }} /> -
- ); - } -} -export default Monacoeditor; diff --git a/kafka-manager-console/src/component/expand-card/index.less b/kafka-manager-console/src/component/expand-card/index.less deleted file mode 100644 index bd0bbbb8..00000000 --- a/kafka-manager-console/src/component/expand-card/index.less +++ /dev/null @@ -1,39 +0,0 @@ -.card-wrapper { - margin: 24px 0 32px; -} -.card-title { - font-family: PingFangSC-Medium; - font-size: 14px; - color: #333333; - height: 22px; - line-height: 22px; - margin: 15px 0; - display: flex; - align-items: center; - cursor: pointer; - i { - font-size: 15px; - margin-right: 8px; - } -} -.card-content { - background-color: #FAFAFA; - padding: 16px; - overflow: auto; - .chart-row { - overflow: hidden; - width: 100%; - } - .chart-row:not(:first-child) { - margin-top: 16px; - } - .chart-wrapper { - background-color: #FFFFFF; - width: calc(50% - 8px); - float: left; - padding: 16px; - } - .chart-wrapper:nth-child(2n) { - margin-left: 16px; - } -} \ No newline at end of file diff --git a/kafka-manager-console/src/component/expand-card/index.tsx b/kafka-manager-console/src/component/expand-card/index.tsx deleted file mode 100644 index b583ada6..00000000 --- a/kafka-manager-console/src/component/expand-card/index.tsx +++ /dev/null @@ -1,48 +0,0 @@ -import React from 'react'; -import './index.less'; -import { Icon } from 'component/antd'; - -interface ICardProps { - title: string; - expand?: boolean; - charts?: JSX.Element[]; -} - -export class ExpandCard extends React.Component { - public state = { - innerExpand: true, - }; - - public handleClick = () => { - this.setState({ innerExpand: !this.state.innerExpand }); - } - - public render() { - let { expand } = this.props; - if (expand === undefined) expand = this.state.innerExpand; - const { charts } = this.props; - return ( -
- {/*
- - {this.props.title} -
*/} - {expand ? -
- {(charts || []).map((c, index) => { - if (index % 2 !== 0) return null; - return ( -
-
{c}
- {(index + 1 < charts.length) ?
{charts[index + 1]}
: null} -
- ); - })} -
: null} -
- ); - } -} diff --git a/kafka-manager-console/src/component/flow-table/index.tsx b/kafka-manager-console/src/component/flow-table/index.tsx deleted file mode 100644 index c62f7089..00000000 --- a/kafka-manager-console/src/component/flow-table/index.tsx +++ /dev/null @@ -1,91 +0,0 @@ - -import * as React from 'react'; -import { Table } from 'component/antd'; - -interface IFlow { - key: string; - avr: number; - pre1: number; - pre5: number; - pre15: number; -} - -const flowColumns = [{ - title: '名称', - dataIndex: 'key', - key: 'name', - sorter: (a: IFlow, b: IFlow) => a.key.charCodeAt(0) - b.key.charCodeAt(0), - render(t: string) { - return t === 'byteRejected' ? 'byteRejected(B/s)' : (t === 'byteIn' || t === 'byteOut' ? `${t}(KB/s)` : t); - }, -}, -{ - title: '平均数', - dataIndex: 'avr', - key: 'partition_num', - sorter: (a: IFlow, b: IFlow) => b.avr - a.avr, -}, -{ - title: '前1分钟', - dataIndex: 'pre1', - key: 'byte_input', - sorter: (a: IFlow, b: IFlow) => b.pre1 - a.pre1, -}, -{ - title: '前5分钟', - dataIndex: 'pre5', - key: 'byte_output', - sorter: (a: IFlow, b: IFlow) => b.pre5 - a.pre5, -}, -{ - title: '前15分钟', - dataIndex: 'pre15', - key: 'message', - sorter: (a: IFlow, b: IFlow) => b.pre15 - a.pre15, -}]; - -export interface IFlowInfo { - byteIn: number[]; - byteOut: number[]; - byteRejected: number[]; - failedFetchRequest: number[]; - failedProduceRequest: number[]; - messageIn: number[]; - totalFetchRequest: number[]; - totalProduceRequest: number[]; - [key: string]: number[]; -} - -export class StatusGraghCom extends React.Component { - public getData(): T { - return null; - } - - public getLoading(): boolean { - return null; - } - - public render() { - const statusData = this.getData(); - const loading = this.getLoading(); - const data: any[] = []; - if (!statusData) return ; - Object.keys(statusData).map((key) => { - if (statusData[key]) { - const v = key === 'byteIn' || key === 'byteOut' ? statusData[key].map(i => i && (i / 1024).toFixed(2)) : - statusData[key].map(i => i && i.toFixed(2)); - const obj = { - key, - avr: v[0], - pre1: v[1], - pre5: v[2], - pre15: v[3], - }; - data.push(obj); - } - }); - return ( -
- ); - } -} diff --git a/kafka-manager-console/src/component/virtual-scroll-select.tsx b/kafka-manager-console/src/component/virtual-scroll-select.tsx deleted file mode 100644 index 7222856c..00000000 --- a/kafka-manager-console/src/component/virtual-scroll-select.tsx +++ /dev/null @@ -1,136 +0,0 @@ -import * as React from 'react'; -import debounce from 'lodash.debounce'; -import { Select, Tooltip } from 'component/antd'; -import { ILabelValue } from 'types/base-type'; -import { searchProps } from 'constants/table'; - -interface IAttars { - mode?: 'multiple' | 'tags' | 'default' | 'combobox'; - placeholder?: string; -} - -interface ISelectProps { - onChange: (result: string[] | string) => any; - value?: string[] | string; - isDisabled?: boolean; - attrs?: IAttars; - getData: () => any; - refetchData?: boolean; // 有些页面通过store拿数据需要二次更新 -} -export class VirtualScrollSelect extends React.Component { - public static getDerivedStateFromProps(nextProps: any, prevState: any) { - if (nextProps.refetchData) { - return { - ...prevState, - refetchData: true, - }; - } - return null; - } - public state = { - optionsData: [] as ILabelValue[], - scrollPage: 0, - keyword: '', - total: 0, - refetchData: false, - }; - - public componentDidMount() { - this.getData(); - } - - public getData = async () => { - const { getData } = this.props; - if (!getData) return; - const pageSize = this.state.scrollPage; - let originData = await getData(); - - if (originData) { - originData = this.state.keyword ? - originData.filter((item: any) => item.label.includes(this.state.keyword)) : originData; - let data = [].concat(originData); - // tslint:disable-next-line:no-bitwise - const total = data.length ? data.length / 30 | 1 : 0; - data = data.splice(pageSize * 30, 30); // 每页展示30条数据 - - return this.setState({ - optionsData: data, - total, - refetchData: false, - }); - } - } - - public componentDidUpdate(prevProps: any) { - if (this.state.refetchData && !this.state.optionsData.length) { - // this.getData(); - } - } - - public handleSearch = (e: string) => { - debounce(() => { - this.setState({ - keyword: e.trim(), - scrollPage: 0, - }, () => { - this.getData(); - }); - }, 300)(); - } - - public handleSelectScroll = (e: any) => { - e.persist(); - const { target } = e; - const { scrollPage } = this.state; - debounce(() => { - if (target.scrollTop + target.offsetHeight === target.scrollHeight) { - const nextScrollPage = scrollPage + 1; - if (this.state.total <= nextScrollPage) { // 已全部拉取 - return; - } - this.setState({ - scrollPage: nextScrollPage, - }, () => { - this.getData(); - }); - } - if (target.scrollTop === 0 && scrollPage !== 0) { // 往上滚且不是第一页 - const nextScrollPage = scrollPage - 1; - this.setState({ - scrollPage: nextScrollPage, - }, () => { - this.getData(); - }); - } - }, 200)(); - } - - public render() { - // tslint:disable-next-line:prefer-const - let { value, isDisabled, attrs } = this.props; - if (attrs && (attrs.mode === 'multiple' || attrs.mode === 'tags')) { - value = value || []; - } - return ( - <> - - - ); - } -} diff --git a/kafka-manager-console/src/component/x-form-wrapper/index.tsx b/kafka-manager-console/src/component/x-form-wrapper/index.tsx deleted file mode 100755 index e39f3ef4..00000000 --- a/kafka-manager-console/src/component/x-form-wrapper/index.tsx +++ /dev/null @@ -1,160 +0,0 @@ -import * as React from 'react'; -import { Drawer, Modal, Button, message } from 'component/antd'; -import { XFormComponent } from 'component/x-form'; -import { IXFormWrapper } from 'types/base-type'; -import { wrapper } from 'store'; - -export class XFormWrapper extends React.Component { - public state = { - confirmLoading: false, - formMap: this.props.formMap || [] as any, - formData: this.props.formData || {} - }; - - private $formRef: any; - - public updateFormMap$(formMap?: any, formData?: any, isResetForm?: boolean, resetFields?: string[]) { - if (isResetForm) { - resetFields ? this.resetForm(resetFields) : this.resetForm(); - } - this.setState({ - formMap, - formData, - }); - } - - public render() { - const { type } = this.props; - switch (type) { - case 'drawer': - return this.renderDrawer(); - default: - return this.renderModal(); - } - } - - public renderDrawer() { - const { - visible, - title, - width, - formData, - formMap, - formLayout, - cancelText, - okText, - customRenderElement, - noform, - nofooter, - } = this.props; - - return ( - - <> - {customRenderElement} - - {!noform && ( - this.$formRef = form} - formData={formData} - formMap={formMap} - formLayout={formLayout} - />)} - {!nofooter && (
- - -
)} - <> - -
- ); - } - - public renderModal() { - const { visible, title, width, formLayout, cancelText, okText, customRenderElement } = this.props; - const { formMap, formData } = this.state; - return ( - - this.$formRef = form} - formData={formData} - formMap={formMap} - formLayout={formLayout} - /> - <>{customRenderElement} - - ); - } - - public handleSubmit = () => { - this.$formRef.validateFields((error: Error, result: any) => { - if (error) { - return; - } - const { onSubmit, isWaitting, onSubmitFaild } = this.props; - - if (typeof onSubmit === 'function') { - if (isWaitting) { - this.setState({ - confirmLoading: true, - }); - onSubmit(result).then(() => { - message.success('操作成功'); - this.resetForm(); - this.closeModalWrapper(); - }).catch((err: any) => { - const { formMap, formData } = wrapper.xFormWrapper; - onSubmitFaild(err, this.$formRef, formData, formMap); - }).finally(() => { - this.setState({ - confirmLoading: false, - }); - }); - return; - } - - // tslint:disable-next-line:no-unused-expression - onSubmit && onSubmit(result); - - this.resetForm(); - this.closeModalWrapper(); - } - }); - } - - public handleCancel = () => { - const { onCancel } = this.props; - // tslint:disable-next-line:no-unused-expression - onCancel && onCancel(); - this.resetForm(); - this.closeModalWrapper(); - } - - public resetForm = (resetFields?: string[]) => { - // tslint:disable-next-line:no-unused-expression - this.$formRef && this.$formRef.resetFields(resetFields || ''); - } - - public closeModalWrapper = () => { - const { onChangeVisible } = this.props; - // tslint:disable-next-line:no-unused-expression - onChangeVisible && onChangeVisible(false); - } -} diff --git a/kafka-manager-console/src/component/x-form/index.less b/kafka-manager-console/src/component/x-form/index.less deleted file mode 100644 index a08230a6..00000000 --- a/kafka-manager-console/src/component/x-form/index.less +++ /dev/null @@ -1,11 +0,0 @@ -.ant-input-number, .ant-form-item-children .ant-select { - width: 314px -} - -.footer-btn { - float: right; - - Button:first-child { - margin-right: 16px; - } -} \ No newline at end of file diff --git a/kafka-manager-console/src/component/x-form/index.tsx b/kafka-manager-console/src/component/x-form/index.tsx deleted file mode 100755 index dc435d0f..00000000 --- a/kafka-manager-console/src/component/x-form/index.tsx +++ /dev/null @@ -1,202 +0,0 @@ -import * as React from 'react'; -import { Select, Input, InputNumber, Form, Switch, Checkbox, DatePicker, Radio, Upload, Button, Icon, Tooltip } from 'component/antd'; -import Monacoeditor from 'component/editor/monacoEditor'; -import { searchProps } from 'constants/table'; -import { version } from 'store/version'; -import './index.less'; - -const TextArea = Input.TextArea; -const { RangePicker } = DatePicker; - -export enum FormItemType { - input = 'input', - inputPassword = 'input_password', - inputNumber = 'input_number', - textArea = 'text_area', - select = 'select', - _switch = '_switch', - custom = 'custom', - checkBox = 'check_box', - datePicker = 'date_picker', - rangePicker = 'range_picker', - radioGroup = 'radio_group', - upload = 'upload', - monacoEditor = 'monaco_editor', -} - -export interface IFormItem { - key: string; - label: string; - type: FormItemType; - value?: string; - // 内部组件属性注入 - attrs?: any; - // form属性注入 - formAttrs?: any; - defaultValue?: string | number | any[]; - rules?: any[]; - invisible?: boolean; - renderExtraElement?: () => JSX.Element; -} - -export interface IFormSelect extends IFormItem { - options: Array<{ key?: string | number, value: string | number, label: string, text?: string }>; -} - -interface IFormCustom extends IFormItem { - customFormItem: React.Component; -} - -interface IXFormProps { - formMap: IFormItem[]; - formData: any; - form: any; - formLayout?: any; - layout?: 'inline' | 'horizontal' | 'vertical'; -} - -class XForm extends React.Component { - - private defaultFormLayout = { - labelCol: { span: 6 }, - wrapperCol: { span: 16 }, - }; - - public onUploadFileChange = (e: any) => { - if (Array.isArray(e)) { - return e; - } - return e && e.fileList; - } - - public handleFormItem(formItem: any, formData: any) { - let initialValue = formData[formItem.key] === 0 ? 0 : (formData[formItem.key] || formItem.defaultValue || ''); - let valuePropName = 'value'; - - if (formItem.type === FormItemType.datePicker) { - initialValue = initialValue || null; - } - - // if (formItem.type === FormItemType.checkBox) { - // initialValue = formItem.defaultValue ? [formItem.defaultValue] : []; - // } - - if (formItem.type === FormItemType._switch) { - initialValue = false; - } - - // if (formItem.type === FormItemType.select && formItem.attrs - // && ['tags'].includes(formItem.attrs.mode)) { - // initialValue = formItem.defaultValue ? [formItem.defaultValue] : []; - // } - - if (formItem.type === FormItemType._switch) { - valuePropName = 'checked'; - } - - if (formItem.type === FormItemType.upload) { - valuePropName = 'fileList'; - } - - return { initialValue, valuePropName }; - } - - public render() { - const { form, formData, formMap, formLayout, layout } = this.props; - const { getFieldDecorator } = form; - return ( -
({})}> - {formMap.map(formItem => { - const { initialValue, valuePropName } = this.handleFormItem(formItem, formData); - const getFieldValue = { - initialValue, - rules: formItem.rules || [{ required: false, message: '' }], - valuePropName, - }; - if (formItem.type === FormItemType.upload) { - Object.assign(getFieldValue, { - getValueFromEvent: this.onUploadFileChange, - }); - } - return ( - !formItem.invisible && - - {getFieldDecorator(formItem.key, getFieldValue)( - this.renderFormItem(formItem), - )} - {formItem.renderExtraElement ? formItem.renderExtraElement() : null} - {/* 添加保存时间提示文案 */} - {formItem.attrs?.prompttype ? {formItem.attrs.prompttype} : null} - - ); - })} - - ); - } - - public renderFormItem(item: IFormItem) { - switch (item.type) { - default: - case FormItemType.input: - return ; - case FormItemType.inputPassword: - return ; - case FormItemType.inputNumber: - return ; - case FormItemType.textArea: - return