Compare commits

...

64 Commits

Author SHA1 Message Date
EricZeng
2672502c07 Merge pull request #174 from 17hao/issue-153-authority
Tracking delete account
2021-02-07 16:10:56 +08:00
EricZeng
83440cc3d9 Merge pull request #173 from 17hao/issue-153
Tracking changes applied to app
2021-02-07 16:10:01 +08:00
17hao
8e5f93be1c Tracking delete account 2021-02-07 15:54:41 +08:00
17hao
c1afc07955 Tracking changes applied to app 2021-02-07 15:16:26 +08:00
EricZeng
4a83e14878 Merge pull request #172 from 17hao/issue-153
Tracking changes applied to Kafka cluster
2021-02-07 14:38:38 +08:00
17hao
832320abc6 Improve code's cohesion && save jmx properties 2021-02-07 14:20:57 +08:00
17hao
70c237da72 Tracking changes applied to Kafka cluster 2021-02-07 13:23:22 +08:00
EricZeng
edfcc5c023 Merge pull request #169 from 17hao/issue-153
Record topic operation
2021-02-06 22:30:32 +08:00
17hao
0668debec6 Update pom.xml 2021-02-06 18:46:47 +08:00
17hao
02d6463faa Using JsonUtils instead of fastjson 2021-02-06 18:43:36 +08:00
17hao
1fdb85234c Record editting topic 2021-02-05 12:18:50 +08:00
EricZeng
44b7dd1808 Merge pull request #167 from ZHAOYINRUI/master
更新readme、集群接入手册
2021-02-04 19:21:59 +08:00
ZHAOYINRUI
e983ee3101 Update README.md 2021-02-04 19:10:11 +08:00
ZHAOYINRUI
75e7e81c05 Add files via upload 2021-02-04 19:09:02 +08:00
ZHAOYINRUI
31ce3b9c08 Update add_cluster.md 2021-02-04 19:08:28 +08:00
EricZeng
ed93c50fef modify without logical cluster desc 2021-02-04 16:54:42 +08:00
EricZeng
4845660eb5 Merge pull request #163 from 17hao/issue-160
Issue#160: Remove __consumer_offsets from topic list
2021-02-04 16:42:41 +08:00
17hao
c7919210a2 Fix topic list filter condition 2021-02-04 16:32:31 +08:00
17hao
9491418f3b Update if statements 2021-02-04 12:33:32 +08:00
17hao
e8de403286 Hide __transaction_state in topic list && fix logic error 2021-02-04 12:14:44 +08:00
17hao
dfb625377b Using existing topic name constant 2021-02-03 22:30:38 +08:00
EricZeng
2c0f2a8be6 Merge pull request #166 from ZHAOYINRUI/master
更新集群接入文章、资源申请文章
2021-02-03 21:02:38 +08:00
ZHAOYINRUI
787d3cb3e9 Update resource_apply.md 2021-02-03 20:52:44 +08:00
ZHAOYINRUI
96ca17d26c Add files via upload 2021-02-03 19:43:03 +08:00
ZHAOYINRUI
3dd0f7f2c3 Update add_cluster.md 2021-02-03 19:41:33 +08:00
ZHAOYINRUI
10ba0cf976 Update resource_apply.md 2021-02-03 18:18:02 +08:00
ZHAOYINRUI
276c15cc23 Delete docs/user_guide/resource_apply directory 2021-02-03 18:08:15 +08:00
ZHAOYINRUI
2584b848ad Update resource_apply.md 2021-02-03 18:07:34 +08:00
ZHAOYINRUI
6471efed5f Add files via upload 2021-02-03 18:04:40 +08:00
ZHAOYINRUI
5b7d7ad65d Create resource_apply.md 2021-02-03 18:01:42 +08:00
17hao
712851a8a5 Add braces 2021-02-03 16:06:16 +08:00
17hao
63d291cb47 Remove __consumer_offsets from topic list 2021-02-03 15:50:33 +08:00
EricZeng
f825c92111 Merge pull request #159 from didi/dev_2.2.1
storage support s3
2021-02-03 13:49:52 +08:00
EricZeng
419eb2ea41 Merge pull request #158 from didi/dev
change dockerfile and heml location
2021-02-03 10:09:43 +08:00
zengqiao
89b58dd64e storage support s3 2021-02-02 16:42:20 +08:00
zengqiao
6bc5f81440 change dockerfile and heml location 2021-02-02 15:58:46 +08:00
EricZeng
424f4b7b5e Merge pull request #157 from didi/master
merge master
2021-02-02 15:33:51 +08:00
mrazkong
9271a1caac Merge pull request #118 from yangvipguang/helm-dev
增加Dockerfile 和 简单Helm
2021-02-01 10:50:05 +08:00
杨光
0ee4df03f9 Update Dockerfile 2021-01-31 15:34:15 +08:00
杨光
8ac713ce32 Update Dockerfile 2021-01-31 15:30:18 +08:00
杨光
76b2489fe9 Delete docker-depends/agent/config directory 2021-01-31 15:29:50 +08:00
杨光
6786095154 Delete sources.list 2021-01-31 15:29:31 +08:00
杨光
2c5793ef37 Delete settings 2021-01-31 15:29:19 +08:00
杨光
d483f25b96 Add files via upload
add  jmx prometheus
2021-01-31 15:28:59 +08:00
EricZeng
7118368979 Merge pull request #136 from ZHAOYINRUI/patch-10
Create resource_apply.md
2021-01-29 10:54:11 +08:00
EricZeng
59256c2e80 modify jdbc url
modify jdbc url, add useSSL=false
2021-01-29 10:53:35 +08:00
EricZeng
1fb8a0db1e Merge pull request #146 from ZHAOYINRUI/patch-12
Update README.md
2021-01-29 10:03:48 +08:00
ZHAOYINRUI
07d0c8e8fa Update README.md 2021-01-28 22:02:49 +08:00
EricZeng
98452ead17 Merge pull request #145 from didi/dev
faq add how to resolve topic biz data not exist error desc
2021-01-28 16:20:42 +08:00
zengqiao
d8c9f40377 faq add how to resolve topic biz data not exist error desc 2021-01-28 15:50:31 +08:00
EricZeng
8148d5eec6 Merge pull request #144 from didi/dev
optimize result code
2021-01-28 14:11:00 +08:00
zengqiao
4c429ad604 optimize result code 2021-01-28 12:06:06 +08:00
EricZeng
a9c52de8d5 Merge pull request #143 from ZhaoXinlong/patch-1
correcting some typo
2021-01-27 20:28:32 +08:00
ZhaoXinlong
f648aa1f91 correcting some typo
修改文字错误
2021-01-27 16:31:42 +08:00
EricZeng
eaba388bdd Merge pull request #142 from didi/dev
add connect jmx failed desc
2021-01-27 16:06:39 +08:00
zengqiao
73e6afcbc6 add connect jmx failed desc 2021-01-27 16:01:18 +08:00
EricZeng
8c3b72adf2 Merge pull request #139 from didi/dev
optimize message when login failed
2021-01-26 19:57:50 +08:00
zengqiao
ae18ff4262 optimize login failed message 2021-01-26 16:15:08 +08:00
ZHAOYINRUI
1adc8af543 Create resource_apply.md 2021-01-25 19:27:31 +08:00
EricZeng
7413df6f1e Merge pull request #131 from ZHAOYINRUI/patch-9
Update alarm_rules.md
2021-01-25 18:36:06 +08:00
EricZeng
bda8559190 Merge pull request #135 from didi/master
merge master
2021-01-25 18:34:56 +08:00
ZHAOYINRUI
4e0114ab0d Update alarm_rules.md 2021-01-25 13:24:01 +08:00
杨光
a950be2d95 change password 2021-01-20 17:57:14 +08:00
杨光
ba6f5ab984 add helm and dockerfile 2021-01-20 17:49:56 +08:00
59 changed files with 1245 additions and 285 deletions

View File

@@ -5,66 +5,89 @@
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
## 主要功能特性
### 快速体验
- 体验地址 http://117.51.146.109:8080 账号密码 admin/admin
### 集群监控维度
- 多版本集群管控,支持从`0.10.2``2.x`版本;
- 集群Topic、Broker等多维度历史与实时关键指标查看
### 集群管控维度
- 集群运维包括逻辑Region方式管理集群
- Broker运维包括优先副本选举
- Topic运维包括创建、查询、扩容、修改属性、数据采样及迁移等
- 消费组运维,包括指定时间或指定偏移两种方式进行重置消费偏移
阅读本README文档您可以了解到滴滴Logi-KafkaManager的用户群体、产品定位等信息并通过体验地址快速体验Kafka集群指标监控与运维管控的全流程。<br>若滴滴Logi-KafkaManager已在贵司的生产环境进行使用并想要获得官方更好地支持和指导可以通过[`OCE认证`](http://obsuite.didiyun.com/open/openAuth),加入官方交流平台。
### 用户使用维度
## 1 产品简介
滴滴Logi-KafkaManager脱胎于滴滴内部多年的Kafka运营实践经验是面向Kafka用户、Kafka运维人员打造的共享多租户Kafka云平台。专注于Kafka运维管控、监控告警、资源治理等核心场景经历过大规模集群、海量大数据的考验。内部满意度高达90%的同时,还与多家知名企业达成商业化合作。
- Kafka用户、Kafka研发、Kafka运维 视角区分
- Kafka用户、Kafka研发、Kafka运维 权限区分
### 1.1 快速体验地址
- 体验地址 http://117.51.146.109:8080 账号密码 admin/admin
### 1.2 体验地图
相比较于同类产品的用户视角单一大多为管理员视角滴滴Logi-KafkaManager建立了基于分角色、多场景视角的体验地图。分别是**用户体验地图、运维体验地图、运营体验地图**
#### 1.2.1 用户体验地图
- 平台租户申请&nbsp;&nbsp;申请应用App作为Kafka中的用户名并用 AppID+password作为身份验证
- 集群资源申请&nbsp;&nbsp;:按需申请、按需使用。可使用平台提供的共享集群,也可为应用申请独立的集群
- Topic&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;可根据应用App创建Topic或者申请其他topic的读写权限
- Topic&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Topic数据采样、调整配额、申请分区等操作
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;基于Topic生产消费各环节耗时统计监控不同分位数性能指标
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;:支持将消费偏移重置至指定时间或指定位置
#### 1.2.2 运维体验地图
- 多版本集群管控&nbsp;&nbsp;:支持从`0.10.2``2.x`版本
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;集群Topic、Broker等多维度历史与实时关键指标查看建立健康分体系
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;划分部分Broker作为Region使用Region定义资源划分单位并按照业务、保障能力区分逻辑集群
- Broker&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;:包括优先副本选举等操作
- Topic&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;:包括创建、查询、扩容、修改属性、迁移、下线等
## kafka-manager架构
#### 1.2.3 运营体验地
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;沉淀资源治理方法。针对Topic分区热点、分区不足等高频常见问题沉淀资源治理方法实现资源治理专家化
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;工单体系。Topic创建、调整配额、申请分区等操作由专业运维人员审批规范资源使用保障平台平稳运行
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;成本控制。Topic资源、集群资源按需申请、按需使用。根据流量核算费用帮助企业建设大数据成本核算体系
### 1.3 核心优势
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;:监控多项核心指标,统计不同分位数据,提供种类丰富的指标监控报表,帮助用户、运维人员快速高效定位问题
- 便&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;按照Region定义集群资源划分单位将逻辑集群根据保障等级划分。在方便资源隔离、提高扩展能力的同时实现对服务端的强管控
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;基于滴滴内部多年运营实践沉淀资源治理方法建立健康分体系。针对Topic分区热点、分区不足等高频常见问题实现资源治理专家化
-&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;:与滴滴夜莺监控告警系统打通,集成监控告警、集群部署、集群升级等能力。形成运维生态,凝练专家服务,使运维更高效
### 1.4 滴滴Logi-KafkaManager架构图
![kafka-manager-arch](https://img-ys011.didistatic.com/static/dicloudpub/do1_xgDHNDLj2ChKxctSuf72)
## 相关文档
## 2 相关文档
- [kafka-manager 安装手册](docs/install_guide/install_guide_cn.md)
- [kafka-manager 接入集群](docs/user_guide/add_cluster/add_cluster.md)
- [kafka-manager 用户使用手册](docs/user_guide/user_guide_cn.md)
- [kafka-manager FAQ](docs/user_guide/faq.md)
### 2.1 产品文档
- [滴滴Logi-KafkaManager 安装手册](docs/install_guide/install_guide_cn.md)
- [滴滴Logi-KafkaManager 接入集群](docs/user_guide/add_cluster/add_cluster.md)
- [滴滴Logi-KafkaManager 用户使用手册](docs/user_guide/user_guide_cn.md)
- [滴滴Logi-KafkaManager FAQ](docs/user_guide/faq.md)
## 钉钉交流群
### 2.2 社区文章
- [滴滴云官网产品介绍](https://www.didiyun.com/production/logi-KafkaManager.html)
- [7年沉淀之作--滴滴Logi日志服务套件](https://mp.weixin.qq.com/s/-KQp-Qo3WKEOc9wIR2iFnw)
- [滴滴Logi-KafkaManager 一站式Kafka监控与管控平台](https://mp.weixin.qq.com/s/9qSZIkqCnU6u9nLMvOOjIQ)
- [滴滴Logi-KafkaManager 开源之路](https://xie.infoq.cn/article/0223091a99e697412073c0d64)
- [滴滴Logi-KafkaManager 系列视频教程](https://mp.weixin.qq.com/s/9X7gH0tptHPtfjPPSdGO8g)
- [kafka实践十五滴滴开源Kafka管控平台 Logi-KafkaManager研究--A叶子叶来](https://blog.csdn.net/yezonggang/article/details/113106244)
![dingding_group](./docs/assets/images/common/dingding_group.jpg)
## 3 滴滴Logi开源用户钉钉交流群
![dingding_group](./docs/assets/images/common/dingding_group.jpg)
钉钉群ID32821440
## OCE认证
OCE是一个认证机制和交流平台为Logi-KafkaManager生产用户量身打造我们会为OCE企业提供更好的技术支持比如专属的技术沙龙、企业一对一的交流机会、专属的答疑群等如果贵司Logi-KafkaManager上了生产[快来加入吧](http://obsuite.didiyun.com/open/openAuth)
## 4 OCE认证
OCE是一个认证机制和交流平台滴滴Logi-KafkaManager生产用户量身打造我们会为OCE企业提供更好的技术支持比如专属的技术沙龙、企业一对一的交流机会、专属的答疑群等如果贵司Logi-KafkaManager上了生产[快来加入吧](http://obsuite.didiyun.com/open/openAuth)
## 项目成员
## 5 项目成员
### 内部核心人员
### 5.1 内部核心人员
`iceyuhui``liuyaguang``limengmonty``zhangliangmike``nullhuangyiming``zengqiao``eilenexuzhe``huangjiaweihjw``zhaoyinrui``marzkonglingxu``joysunchao`
### 外部贡献者
### 5.2 外部贡献者
`fangjunyu``zhoutaiyang`
## 协议
## 6 协议
`kafka-manager`基于`Apache-2.0`协议进行分发和使用,更多信息参见[协议文件](./LICENSE)

View File

@@ -0,0 +1,44 @@
FROM openjdk:8-jdk-alpine3.9
LABEL author="yangvipguang"
ENV VERSION 2.1.0
ENV JAR_PATH kafka-manager-web/target
COPY $JAR_PATH/kafka-manager-web-$VERSION-SNAPSHOT.jar /tmp/app.jar
COPY $JAR_PATH/application.yml /km/
RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories
RUN apk add --no-cache --virtual .build-deps \
font-adobe-100dpi \
ttf-dejavu \
fontconfig \
curl \
apr \
apr-util \
apr-dev \
tomcat-native \
&& apk del .build-deps
ENV AGENT_HOME /opt/agent/
WORKDIR /tmp
COPY docker-depends/config.yaml $AGENT_HOME
COPY docker-depends/jmx_prometheus_javaagent-0.14.0.jar $AGENT_HOME
ENV JAVA_AGENT="-javaagent:$AGENT_HOME/jmx_prometheus_javaagent-0.14.0.jar=9999:$AGENT_HOME/config.yaml"
ENV JAVA_HEAP_OPTS="-Xms1024M -Xmx1024M -Xmn100M "
ENV JAVA_OPTS="-verbose:gc \
-XX:+PrintGC -XX:+PrintGCDetails -XX:+PrintHeapAtGC -Xloggc:/tmp/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps \
-XX:MaxMetaspaceSize=256M -XX:+DisableExplicitGC -XX:+UseStringDeduplication \
-XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:-UseContainerSupport"
#-Xlog:gc -Xlog:gc* -Xlog:gc+heap=trace -Xlog:safepoint
EXPOSE 8080 9999
ENTRYPOINT ["sh","-c","java -jar $JAVA_HEAP_OPTS $JAVA_OPTS /tmp/app.jar --spring.config.location=/km/application.yml"]
## 默认不带Prometheus JMX监控需要可以自行取消以下注释并注释上面一行默认Entrypoint 命令。
## ENTRYPOINT ["sh","-c","java -jar $JAVA_AGENT $JAVA_HEAP_OPTS $JAVA_OPTS /tmp/app.jar --spring.config.location=/km/application.yml"]

View File

@@ -0,0 +1,5 @@
---
startDelaySeconds: 0
ssl: false
lowercaseOutputName: false
lowercaseOutputLabelNames: false

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

24
container/helm/Chart.yaml Normal file
View File

@@ -0,0 +1,24 @@
apiVersion: v2
name: didi-km
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -0,0 +1,22 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "didi-km.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "didi-km.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "didi-km.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "didi-km.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "didi-km.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "didi-km.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "didi-km.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "didi-km.labels" -}}
helm.sh/chart: {{ include "didi-km.chart" . }}
{{ include "didi-km.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "didi-km.selectorLabels" -}}
app.kubernetes.io/name: {{ include "didi-km.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "didi-km.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "didi-km.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,88 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: km-cm
data:
application.yml: |
server:
port: 8080
tomcat:
accept-count: 1000
max-connections: 10000
max-threads: 800
min-spare-threads: 100
spring:
application:
name: kafkamanager
datasource:
kafka-manager:
jdbc-url: jdbc:mysql://xxxxx:3306/kafka-manager?characterEncoding=UTF-8&serverTimezone=GMT%2B8&useSSL=false
username: admin
password: admin
driver-class-name: com.mysql.jdbc.Driver
main:
allow-bean-definition-overriding: true
profiles:
active: dev
servlet:
multipart:
max-file-size: 100MB
max-request-size: 100MB
logging:
config: classpath:logback-spring.xml
custom:
idc: cn
jmx:
max-conn: 20
store-metrics-task:
community:
broker-metrics-enabled: true
topic-metrics-enabled: true
didi:
app-topic-metrics-enabled: false
topic-request-time-metrics-enabled: false
topic-throttled-metrics: false
save-days: 7
# 任务相关的开关
task:
op:
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
account:
ldap:
kcm:
enabled: false
storage:
base-url: http://127.0.0.1
n9e:
base-url: http://127.0.0.1:8004
user-token: 12345678
timeout: 300
account: root
script-file: kcm_script.sh
monitor:
enabled: false
n9e:
nid: 2
user-token: 1234567890
mon:
base-url: http://127.0.0.1:8032
sink:
base-url: http://127.0.0.1:8006
rdb:
base-url: http://127.0.0.1:80
notify:
kafka:
cluster-id: 95
topic-name: didi-kafka-notify
order:
detail-url: http://127.0.0.1

View File

@@ -0,0 +1,56 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "didi-km.fullname" . }}
labels:
{{- include "didi-km.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "didi-km.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "didi-km.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "didi-km.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 8080
protocol: TCP
- name: jmx-metrics
containerPort: 9999
protocol: TCP
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,28 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "didi-km.fullname" . }}
labels:
{{- include "didi-km.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "didi-km.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,41 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "didi-km.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "didi-km.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "didi-km.fullname" . }}
labels:
{{- include "didi-km.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "didi-km.selectorLabels" . | nindent 4 }}

View File

@@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "didi-km.serviceAccountName" . }}
labels:
{{- include "didi-km.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "didi-km.fullname" . }}-test-connection"
labels:
{{- include "didi-km.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "didi-km.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View File

@@ -0,0 +1,79 @@
# Default values for didi-km.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: docker.io/yangvipguang/km
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "v18"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: "km"
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 8080
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits:
cpu: 50m
memory: 2048Mi
requests:
cpu: 10m
memory: 200Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

Binary file not shown.

After

Width:  |  Height:  |  Size: 382 KiB

View File

@@ -0,0 +1,101 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
## JMX-连接失败问题解决
集群正常接入Logi-KafkaManager之后即可以看到集群的Broker列表此时如果查看不了Topic的实时流量或者是Broker的实时流量信息时那么大概率就是JMX连接的问题了。
下面我们按照步骤来一步一步的检查。
### 1、问题&说明
**类型一JMX配置未开启**
未开启时,直接到`2、解决方法`查看如何开启即可。
![check_jmx_opened](./assets/connect_jmx_failed/check_jmx_opened.jpg)
**类型二:配置错误**
`JMX`端口已经开启的情况下,有的时候开启的配置不正确,此时也会导致出现连接失败的问题。这里大概列举几种原因:
- `JMX`配置错误:见`2、解决方法`
- 存在防火墙或者网络限制:网络通的另外一台机器`telnet`试一下看是否可以连接上。
- 需要进行用户名及密码的认证:见`3、解决方法 —— 认证的JMX`
错误日志例子:
```
# 错误一: 错误提示的是真实的IP这样的话基本就是JMX配置的有问题了。
2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:192.168.0.1 port:9999.
java.rmi.ConnectException: Connection refused to host: 192.168.0.1; nested exception is:
# 错误二错误提示的是127.0.0.1这个IP这个是机器的hostname配置的可能有问题。
2021-01-27 10:06:20.730 ERROR 50901 --- [ics-Thread-1-62] c.x.k.m.c.utils.jmx.JmxConnectorWrap : JMX connect exception, host:127.0.0.1 port:9999.
java.rmi.ConnectException: Connection refused to host: 127.0.0.1;; nested exception is:
```
### 2、解决方法
这里仅介绍一下比较通用的解决方式,如若有更好的方式,欢迎大家指导告知一下。
修改`kafka-server-start.sh`文件:
```
# 在这个下面增加JMX端口的配置
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
export JMX_PORT=9999 # 增加这个配置, 这里的数值并不一定是要9999
fi
```
&nbsp;
修改`kafka-run-class.sh`文件
```
# JMX settings
if [ -z "$KAFKA_JMX_OPTS" ]; then
KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=${当前机器的IP}"
fi
# JMX port to use
if [ $JMX_PORT ]; then
KAFKA_JMX_OPTS="$KAFKA_JMX_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT"
fi
```
### 3、解决方法 —— 认证的JMX
如果您是直接看的这个部分,建议先看一下上一节:`2、解决方法`以确保`JMX`的配置没有问题了。
在JMX的配置等都没有问题的情况下如果是因为认证的原因导致连接不了的此时可以使用下面介绍的方法进行解决。
**当前这块后端刚刚开发完成,可能还不够完善,有问题随时沟通。**
`Logi-KafkaManager 2.2.0+`之后的版本后端已经支持`JMX`认证方式的连接,但是还没有界面,此时我们可以往`cluster`表的`jmx_properties`字段写入`JMX`的认证信息。
这个数据是`json`格式的字符串,例子如下所示:
```json
{
"maxConn": 10, # KM对单台Broker的最大JMX连接数
"username": "xxxxx", # 用户名
"password": "xxxx", # 密码
"openSSL": true, # 开启SSL, true表示开启ssl, false表示关闭
}
```
&nbsp;
SQL的例子
```sql
UPDATE cluster SET jmx_properties='{ "maxConn": 10, "username": "xxxxx", "password": "xxxx", "openSSL": false }' where id={xxx};
```

View File

@@ -15,7 +15,7 @@
当前因为无法同时兼容`MySQL 8``MySQL 5.7`,因此代码中默认的版本还是`MySQL 5.7`
当前如需使用`MySQL 8`,则按照下述流程进行简单修改代码。
当前如需使用`MySQL 8`,则按照下述流程进行简单修改代码。
- Step1. 修改application.yml中的MySQL驱动类

View File

@@ -5,16 +5,26 @@
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 集群接入
集群的接入总共需要三个步骤,分别是:
1. 接入物理集群
2. 创建Region
3. 创建逻辑集群
## 主要概念讲解
面对大规模集群、业务场景复杂的情况引入Region、逻辑集群的概念
- Region划分部分Broker作为一个 Region用Region定义资源划分的单位提高扩展性和隔离性。如果部分Topic异常也不会影响大面积的Broker
- 逻辑集群逻辑集群由部分Region组成便于对大规模集群按照业务划分、保障能力进行管理
![op_cluster_arch](assets/op_cluster_arch.png)
备注接入集群需要2、3两步是因为普通用户的视角下看到的都是逻辑集群如果没有2、3两步那么普通用户看不到任何信息。
集群的接入总共需要三个步骤,分别是:
1. 接入物理集群:填写机器地址、安全协议等配置信息,接入真实的物理集群
2. 创建Region将部分Broker划分为一个Region
3. 创建逻辑集群逻辑集群由部分Region组成可根据业务划分、保障等级来创建相应的逻辑集群
![op_cluster_flow](assets/op_cluster_flow.png)
**备注接入集群需要2、3两步是因为普通用户的视角下看到的都是逻辑集群如果没有2、3两步那么普通用户看不到任何信息。**
## 1、接入物理集群
@@ -36,4 +46,4 @@
![op_add_logical_cluster](assets/op_add_logical_cluster.jpg)
如上图所示,填写逻辑集群信息,然后点击确定,即可完成逻辑集群的创建。
如上图所示,填写逻辑集群信息,然后点击确定,即可完成逻辑集群的创建。

Binary file not shown.

After

Width:  |  Height:  |  Size: 124 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 105 KiB

View File

@@ -1,4 +1,4 @@
![kafka-manager-logo](../assets/images/common/logo_name.png))
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**

Binary file not shown.

After

Width:  |  Height:  |  Size: 119 KiB

View File

@@ -9,18 +9,41 @@
# FAQ
- 1、Topic申请时没有可选择的集群
- 0、Github图裂问题解决
- 1、Topic申请、新建监控告警等操作时没有可选择的集群
- 2、逻辑集群 & Region的用途
- 3、登录失败
- 4、页面流量信息等无数据
- 5、如何对接夜莺的监控告警功能
- 6、如何使用`MySQL 8`
- 7、`Jmx`连接失败如何解决?
- 8、`topic biz data not exist`错误及处理方式
---
### 1、Topic申请时没有可选择的集群
### 0、Github图裂问题解决
- 参看 [kafka-manager 接入集群](docs/user_guide/add_cluster/add_cluster.md) 手册这里的Region和逻辑集群都必须添加
可以在本地机器`ping github.com`这个地址,获取到`github.com`地址的IP地址
然后将IP绑定到`/etc/hosts`文件中。
例如
```shell
# 在 /etc/hosts文件中增加如下信息
140.82.113.3 github.com
```
---
### 1、Topic申请、新建监控告警等操作时没有可选择的集群
缺少逻辑集群导致的在Topic管理、监控告警、集群管理这三个Tab下面都是普通用户视角普通用户看到的集群都是逻辑集群因此在这三个Tab下进行操作时都需要有逻辑集群。
逻辑集群的创建参看:
- [kafka-manager 接入集群](docs/user_guide/add_cluster/add_cluster.md) 手册这里的Region和逻辑集群都必须添加。
---
@@ -29,7 +52,7 @@
主要用途是进行大集群的管理 & 集群细节的屏蔽。
- 逻辑集群通过逻辑集群概念将集群Broker按业务进行归类方便管理
- Region通过引入Region同时Topic按Region度创建减少Broker间的连接
- Region通过引入Region同时Topic按Region度创建减少Broker间的连接
---
@@ -43,7 +66,7 @@
- 1、检查`Broker JMX`是否正确开启。
如若还未开启,具体可百度一下看如何开启
如若还未开启,具体可百度一下看如何开启,或者参看:[Jmx连接配置&问题解决说明文档](../dev_guide/connect_jmx_failed.md)
![helpcenter](./assets/faq/jmx_check.jpg)
@@ -53,7 +76,7 @@
- 3、数据库时区问题。
检查MySQL的topic_metrics、broker_metrics表,查看是否有数据,如果有数据,那么再检查设置的时区是否正确。
检查MySQL的topic表查看是否有数据如果有数据那么再检查设置的时区是否正确。
---
@@ -66,3 +89,23 @@
### 6、如何使用`MySQL 8`
- 参看 [kafka-manager 使用`MySQL 8`](../dev_guide/use_mysql_8.md) 说明。
---
### 7、`Jmx`连接失败如何解决?
- 参看 [Jmx连接配置&问题解决](../dev_guide/connect_jmx_failed.md) 说明。
---
### 8、`topic biz data not exist`错误及处理方式
**错误原因**
在进行权限审批的时候可能会出现这个错误出现这个错误的原因是因为Topic相关的业务信息没有在DB中存储或者更具体的说就是该Topic不属于任何应用导致的只需要将这些无主的Topic挂在某个应用下面即可。
**解决方式**
可以在`运维管控->集群列表->Topic信息`下面编辑申请权限的Topic为Topic选择一个应用即可。
以上仅仅只是针对单个Topic的场景如果你有非常多的Topic需要进行初始化的那么此时可以在配置管理中增加一个配置来定时的对无主的Topic进行同步具体见[动态配置管理 - 1、Topic定时同步任务](../dev_guide/dynamic_config_manager.md)

View File

@@ -0,0 +1,32 @@
---
![kafka-manager-logo](../assets/images/common/logo_name.png)
**一站式`Apache Kafka`集群指标监控与运维管控平台**
---
# 资源申请文档
## 主要名词解释
- 应用App作为Kafka中的账户使用AppID+password作为身份标识
- 集群:可使用平台提供的共享集群,也可为某一应用申请单独的集群
- Topic可申请创建Topic或申请其他Topic的生产/消费权限。进行生产/消费时通过Topic+AppID进行身份鉴权
![production_consumption_flow](assets/resource_apply/production_consumption_flow.png)
## 应用申请
应用App作为Kafka中的账户使用AppID+password作为身份标识。对Topic进行生产/消费时通过Topic+AppID进行身份鉴权。
用户申请应用经由运维人员审批审批通过后获得AppID和密钥
## 集群申请
可使用平台提供的共享集群,若对隔离性、稳定性、生产消费速率有更高的需求,可对某一应用申请单独的集群
## Topic申请
- 用户可根据已申请的应用创建Topic。创建后应用负责人默认拥有该Topic的生产/消费权限和管理权限
- 也可申请其他Topic的生产、消费权限。经由Topic所属应用的负责人审批后即可拥有相应权限。

View File

@@ -104,5 +104,10 @@
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -12,125 +12,97 @@ public enum ResultStatus {
SUCCESS(Constant.SUCCESS, "success"),
LOGIN_FAILED(1, "login failed, please check username and password"),
/**
* 内部依赖错误, [1000, 1200)
* 操作错误[1000, 2000)
* ------------------------------------------------------------------------------------------
*/
MYSQL_ERROR(1000, "operate database failed"),
CONNECT_ZOOKEEPER_FAILED(1000, "connect zookeeper failed"),
READ_ZOOKEEPER_FAILED(1000, "read zookeeper failed"),
READ_JMX_FAILED(1000, "read jmx failed"),
// 内部依赖错误 —— Kafka特定错误, [1000, 1100)
BROKER_NUM_NOT_ENOUGH(1000, "broker not enough"),
CONTROLLER_NOT_ALIVE(1000, "controller not alive"),
CLUSTER_METADATA_ERROR(1000, "cluster metadata error"),
TOPIC_CONFIG_ERROR(1000, "topic config error"),
/**
* 外部依赖错误, [1200, 1400)
* ------------------------------------------------------------------------------------------
*/
CALL_CLUSTER_TASK_AGENT_FAILED(1000, " call cluster task agent failed"),
CALL_MONITOR_SYSTEM_ERROR(1000, " call monitor-system failed"),
/**
* 外部用户操作错误, [1400, 1600)
* ------------------------------------------------------------------------------------------
*/
PARAM_ILLEGAL(1400, "param illegal"),
OPERATION_FAILED(1401, "operation failed"),
OPERATION_FORBIDDEN(1402, "operation forbidden"),
API_CALL_EXCEED_LIMIT(1403, "api call exceed limit"),
USER_WITHOUT_AUTHORITY(1404, "user without authority"),
CHANGE_ZOOKEEPER_FORBIDDEN(1405, "change zookeeper forbidden"),
// 资源不存在
CLUSTER_NOT_EXIST(10000, "cluster not exist"),
BROKER_NOT_EXIST(10000, "broker not exist"),
TOPIC_NOT_EXIST(10000, "topic not exist"),
PARTITION_NOT_EXIST(10000, "partition not exist"),
ACCOUNT_NOT_EXIST(10000, "account not exist"),
APP_NOT_EXIST(1000, "app not exist"),
ORDER_NOT_EXIST(1000, "order not exist"),
CONFIG_NOT_EXIST(1000, "config not exist"),
IDC_NOT_EXIST(1000, "idc not exist"),
TASK_NOT_EXIST(1110, "task not exist"),
TOPIC_OPERATION_PARAM_NULL_POINTER(1450, "参数错误"),
TOPIC_OPERATION_PARTITION_NUM_ILLEGAL(1451, "分区数错误"),
TOPIC_OPERATION_BROKER_NUM_NOT_ENOUGH(1452, "Broker数不足错误"),
TOPIC_OPERATION_TOPIC_NAME_ILLEGAL(1453, "Topic名称非法"),
TOPIC_OPERATION_TOPIC_EXISTED(1454, "Topic已存在"),
TOPIC_OPERATION_UNKNOWN_TOPIC_PARTITION(1455, "Topic未知"),
TOPIC_OPERATION_TOPIC_CONFIG_ILLEGAL(1456, "Topic配置错误"),
TOPIC_OPERATION_TOPIC_IN_DELETING(1457, "Topic正在删除"),
TOPIC_OPERATION_UNKNOWN_ERROR(1458, "未知错误"),
AUTHORITY_NOT_EXIST(1000, "authority not exist"),
/**
* 参数错误[2000, 3000)
* ------------------------------------------------------------------------------------------
*/
PARAM_ILLEGAL(2000, "param illegal"),
CG_LOCATION_ILLEGAL(2001, "consumer group location illegal"),
ORDER_ALREADY_HANDLED(2002, "order already handled"),
APP_ID_OR_PASSWORD_ILLEGAL(2003, "app or password illegal"),
SYSTEM_CODE_ILLEGAL(2004, "system code illegal"),
CLUSTER_TASK_HOST_LIST_ILLEGAL(2005, "主机列表错误,请检查主机列表"),
JSON_PARSER_ERROR(2006, "json parser error"),
MONITOR_NOT_EXIST(1110, "monitor not exist"),
BROKER_NUM_NOT_ENOUGH(2050, "broker not enough"),
CONTROLLER_NOT_ALIVE(2051, "controller not alive"),
CLUSTER_METADATA_ERROR(2052, "cluster metadata error"),
TOPIC_CONFIG_ERROR(2053, "topic config error"),
QUOTA_NOT_EXIST(1000, "quota not exist, please check clusterId, topicName and appId"),
/**
* 参数错误 - 资源检查错误
* 因为外部系统的问题, 操作时引起的错误, [7000, 8000)
* ------------------------------------------------------------------------------------------
*/
RESOURCE_NOT_EXIST(7100, "资源不存在"),
CLUSTER_NOT_EXIST(7101, "cluster not exist"),
BROKER_NOT_EXIST(7102, "broker not exist"),
TOPIC_NOT_EXIST(7103, "topic not exist"),
PARTITION_NOT_EXIST(7104, "partition not exist"),
ACCOUNT_NOT_EXIST(7105, "account not exist"),
APP_NOT_EXIST(7106, "app not exist"),
ORDER_NOT_EXIST(7107, "order not exist"),
CONFIG_NOT_EXIST(7108, "config not exist"),
IDC_NOT_EXIST(7109, "idc not exist"),
TASK_NOT_EXIST(7110, "task not exist"),
AUTHORITY_NOT_EXIST(7111, "authority not exist"),
MONITOR_NOT_EXIST(7112, "monitor not exist"),
QUOTA_NOT_EXIST(7113, "quota not exist, please check clusterId, topicName and appId"),
CONSUMER_GROUP_NOT_EXIST(7114, "consumerGroup not exist"),
TOPIC_BIZ_DATA_NOT_EXIST(7115, "topic biz data not exist, please sync topic to db"),
// 资源不存在, 已存在, 已被使用
RESOURCE_NOT_EXIST(1200, "资源存在"),
RESOURCE_ALREADY_EXISTED(1200, "资源已经存在"),
RESOURCE_NAME_DUPLICATED(1200, "资源名称重复"),
RESOURCE_ALREADY_USED(1000, "资源早已被使用"),
// 资源已存在
RESOURCE_ALREADY_EXISTED(7200, "资源已经存在"),
TOPIC_ALREADY_EXIST(7201, "topic already existed"),
// 资源重名
RESOURCE_NAME_DUPLICATED(7300, "资源名称重复"),
// 资源已被使用
RESOURCE_ALREADY_USED(7400, "资源早已被使用"),
/**
* 资源参数错误
* 因为外部系统的问题, 操作时引起的错误, [8000, 9000)
* ------------------------------------------------------------------------------------------
*/
CG_LOCATION_ILLEGAL(10000, "consumer group location illegal"),
ORDER_ALREADY_HANDLED(1000, "order already handled"),
MYSQL_ERROR(8010, "operate database failed"),
APP_ID_OR_PASSWORD_ILLEGAL(1000, "app or password illegal"),
SYSTEM_CODE_ILLEGAL(1000, "system code illegal"),
ZOOKEEPER_CONNECT_FAILED(8020, "zookeeper connect failed"),
ZOOKEEPER_READ_FAILED(8021, "zookeeper read failed"),
CLUSTER_TASK_HOST_LIST_ILLEGAL(1000, "主机列表错误,请检查主机列表"),
// 调用集群任务里面的agent失败
CALL_CLUSTER_TASK_AGENT_FAILED(8030, " call cluster task agent failed"),
// 调用监控系统失败
CALL_MONITOR_SYSTEM_ERROR(8040, " call monitor-system failed"),
// 存储相关的调用失败
STORAGE_UPLOAD_FILE_FAILED(8050, "upload file failed"),
STORAGE_FILE_TYPE_NOT_SUPPORT(8051, "File type not support"),
STORAGE_DOWNLOAD_FILE_FAILED(8052, "download file failed"),
///////////////////////////////////////////////////////////////
USER_WITHOUT_AUTHORITY(1000, "user without authority"),
JSON_PARSER_ERROR(1000, "json parser error"),
TOPIC_OPERATION_PARAM_NULL_POINTER(2, "参数错误"),
TOPIC_OPERATION_PARTITION_NUM_ILLEGAL(3, "分区数错误"),
TOPIC_OPERATION_BROKER_NUM_NOT_ENOUGH(4, "Broker数不足错误"),
TOPIC_OPERATION_TOPIC_NAME_ILLEGAL(5, "Topic名称非法"),
TOPIC_OPERATION_TOPIC_EXISTED(6, "Topic已存在"),
TOPIC_OPERATION_UNKNOWN_TOPIC_PARTITION(7, "Topic未知"),
TOPIC_OPERATION_TOPIC_CONFIG_ILLEGAL(8, "Topic配置错误"),
TOPIC_OPERATION_TOPIC_IN_DELETING(9, "Topic正在删除"),
TOPIC_OPERATION_UNKNOWN_ERROR(10, "未知错误"),
TOPIC_EXIST_CONNECT_CANNOT_DELETE(10, "topic exist connect cannot delete"),
EXIST_TOPIC_CANNOT_DELETE(10, "exist topic cannot delete"),
/**
* 工单
*/
CHANGE_ZOOKEEPER_FORBIDEN(100, "change zookeeper forbiden"),
// APP_EXIST_TOPIC_AUTHORITY_CANNOT_DELETE(1000, "app exist topic authority cannot delete"),
UPLOAD_FILE_FAIL(1000, "upload file fail"),
FILE_TYPE_NOT_SUPPORT(1000, "File type not support"),
DOWNLOAD_FILE_FAIL(1000, "download file fail"),
TOPIC_ALREADY_EXIST(17400, "topic already existed"),
CONSUMER_GROUP_NOT_EXIST(17411, "consumerGroup not exist"),
;
private int code;

View File

@@ -2,6 +2,7 @@ package com.xiaojukeji.kafka.manager.common.utils;
import org.apache.commons.lang.StringUtils;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -11,6 +12,20 @@ import java.util.Set;
* @date 20/4/16
*/
public class ValidateUtils {
/**
* 任意一个为空, 则返回true
*/
public static boolean anyNull(Object... objects) {
return Arrays.stream(objects).anyMatch(ValidateUtils::isNull);
}
/**
* 是空字符串或者空
*/
public static boolean anyBlank(String... strings) {
return Arrays.stream(strings).anyMatch(StringUtils::isBlank);
}
/**
* 为空
*/

View File

@@ -0,0 +1,18 @@
package com.xiaojukeji.kafka.manager.common.utils;
import org.junit.Assert;
import org.junit.Test;
import java.util.HashMap;
import java.util.Map;
public class JsonUtilsTest {
@Test
public void testMapToJsonString() {
Map<String, Object> map = new HashMap<>();
map.put("key", "value");
map.put("int", 1);
String expectRes = "{\"key\":\"value\",\"int\":1}";
Assert.assertEquals(expectRes, JsonUtils.toJSONString(map));
}
}

View File

@@ -43,7 +43,7 @@ public interface ClusterService {
ClusterNameDTO getClusterName(Long logicClusterId);
ResultStatus deleteById(Long clusterId);
ResultStatus deleteById(Long clusterId, String operator);
/**
* 获取优先被选举为controller的broker

View File

@@ -1,9 +1,12 @@
package com.xiaojukeji.kafka.manager.service.service;
import com.xiaojukeji.kafka.manager.common.bizenum.ModuleEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.OperateEnum;
import com.xiaojukeji.kafka.manager.common.entity.dto.rd.OperateRecordDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.OperateRecordDO;
import java.util.List;
import java.util.Map;
/**
* @author zhongyuankai
@@ -12,5 +15,7 @@ import java.util.List;
public interface OperateRecordService {
int insert(OperateRecordDO operateRecordDO);
int insert(String operator, ModuleEnum module, String resourceName, OperateEnum operate, Map<String, String> content);
List<OperateRecordDO> queryByCondt(OperateRecordDTO dto);
}

View File

@@ -17,7 +17,7 @@ public interface AppService {
* @param appDO appDO
* @return int
*/
ResultStatus addApp(AppDO appDO);
ResultStatus addApp(AppDO appDO, String operator);
/**
* 删除数据

View File

@@ -60,10 +60,8 @@ public class AppServiceImpl implements AppService {
@Autowired
private OperateRecordService operateRecordService;
@Override
public ResultStatus addApp(AppDO appDO) {
public ResultStatus addApp(AppDO appDO, String operator) {
try {
if (appDao.insert(appDO) < 1) {
LOGGER.warn("class=AppServiceImpl||method=addApp||AppDO={}||msg=add fail,{}",appDO,ResultStatus.MYSQL_ERROR.getMessage());
@@ -75,6 +73,15 @@ public class AppServiceImpl implements AppService {
kafkaUserDO.setOperation(OperationStatusEnum.CREATE.getCode());
kafkaUserDO.setUserType(0);
kafkaUserDao.insert(kafkaUserDO);
Map<String, String> content = new HashMap<>();
content.put("appId", appDO.getAppId());
content.put("name", appDO.getName());
content.put("applicant", appDO.getApplicant());
content.put("password", appDO.getPassword());
content.put("principals", appDO.getPrincipals());
content.put("description", appDO.getDescription());
operateRecordService.insert(operator, ModuleEnum.APP, appDO.getName(), OperateEnum.ADD, content);
} catch (DuplicateKeyException e) {
LOGGER.error("class=AppServiceImpl||method=addApp||errMsg={}||appDO={}|", e.getMessage(), appDO, e);
return ResultStatus.RESOURCE_ALREADY_EXISTED;
@@ -141,6 +148,12 @@ public class AppServiceImpl implements AppService {
appDO.setDescription(dto.getDescription());
if (appDao.updateById(appDO) > 0) {
Map<String, String> content = new HashMap<>();
content.put("appId", appDO.getAppId());
content.put("name", appDO.getName());
content.put("principals", appDO.getPrincipals());
content.put("description", appDO.getDescription());
operateRecordService.insert(operator, ModuleEnum.APP, appDO.getName(), OperateEnum.EDIT, content);
return ResultStatus.SUCCESS;
}
} catch (DuplicateKeyException e) {

View File

@@ -1,7 +1,8 @@
package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.bizenum.DBStatusEnum;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.bizenum.ModuleEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.OperateEnum;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.ClusterDetailDTO;
@@ -16,10 +17,7 @@ import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao;
import com.xiaojukeji.kafka.manager.dao.ControllerDao;
import com.xiaojukeji.kafka.manager.service.cache.LogicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.cache.PhysicalClusterMetadataManager;
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
import com.xiaojukeji.kafka.manager.service.service.ConsumerService;
import com.xiaojukeji.kafka.manager.service.service.RegionService;
import com.xiaojukeji.kafka.manager.service.service.ZookeeperService;
import com.xiaojukeji.kafka.manager.service.service.*;
import com.xiaojukeji.kafka.manager.service.utils.ConfigUtils;
import org.apache.zookeeper.ZooKeeper;
import org.slf4j.Logger;
@@ -66,15 +64,24 @@ public class ClusterServiceImpl implements ClusterService {
@Autowired
private ZookeeperService zookeeperService;
@Autowired
private OperateRecordService operateRecordService;
@Override
public ResultStatus addNew(ClusterDO clusterDO, String operator) {
if (ValidateUtils.isNull(clusterDO) || ValidateUtils.isNull(operator)) {
return ResultStatus.PARAM_ILLEGAL;
}
if (!isZookeeperLegal(clusterDO.getZookeeper())) {
return ResultStatus.CONNECT_ZOOKEEPER_FAILED;
return ResultStatus.ZOOKEEPER_CONNECT_FAILED;
}
try {
Map<String, String> content = new HashMap<>();
content.put("zk address", clusterDO.getZookeeper());
content.put("bootstrap servers", clusterDO.getBootstrapServers());
content.put("security properties", clusterDO.getSecurityProperties());
content.put("jmx properties", clusterDO.getJmxProperties());
operateRecordService.insert(operator, ModuleEnum.CLUSTER, clusterDO.getClusterName(), OperateEnum.ADD, content);
if (clusterDao.insert(clusterDO) <= 0) {
LOGGER.error("add new cluster failed, clusterDO:{}.", clusterDO);
return ResultStatus.MYSQL_ERROR;
@@ -102,9 +109,14 @@ public class ClusterServiceImpl implements ClusterService {
if (!originClusterDO.getZookeeper().equals(clusterDO.getZookeeper())) {
// 不允许修改zk地址
return ResultStatus.CHANGE_ZOOKEEPER_FORBIDEN;
return ResultStatus.CHANGE_ZOOKEEPER_FORBIDDEN;
}
clusterDO.setStatus(originClusterDO.getStatus());
Map<String, String> content = new HashMap<>();
content.put("cluster id", clusterDO.getId().toString());
content.put("security properties", clusterDO.getSecurityProperties());
content.put("jmx properties", clusterDO.getJmxProperties());
operateRecordService.insert(operator, ModuleEnum.CLUSTER, clusterDO.getClusterName(), OperateEnum.EDIT, content);
return updateById(clusterDO);
}
@@ -255,12 +267,15 @@ public class ClusterServiceImpl implements ClusterService {
}
@Override
public ResultStatus deleteById(Long clusterId) {
public ResultStatus deleteById(Long clusterId, String operator) {
List<RegionDO> regionDOList = regionService.getByClusterId(clusterId);
if (!ValidateUtils.isEmptyList(regionDOList)) {
return ResultStatus.OPERATION_FORBIDDEN;
}
try {
Map<String, String> content = new HashMap<>();
content.put("cluster id", clusterId.toString());
operateRecordService.insert(operator, ModuleEnum.CLUSTER, getClusterName(clusterId).getPhysicalClusterName(), OperateEnum.DELETE, content);
if (clusterDao.deleteById(clusterId) <= 0) {
LOGGER.error("delete cluster failed, clusterId:{}.", clusterId);
return ResultStatus.MYSQL_ERROR;

View File

@@ -1,7 +1,10 @@
package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.bizenum.ModuleEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.OperateEnum;
import com.xiaojukeji.kafka.manager.common.entity.dto.rd.OperateRecordDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.OperateRecordDO;
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.dao.OperateRecordDao;
import com.xiaojukeji.kafka.manager.service.service.OperateRecordService;
@@ -10,6 +13,7 @@ import org.springframework.stereotype.Service;
import java.util.Date;
import java.util.List;
import java.util.Map;
/**
* @author zhongyuankai
@@ -25,6 +29,17 @@ public class OperateRecordServiceImpl implements OperateRecordService {
return operateRecordDao.insert(operateRecordDO);
}
@Override
public int insert(String operator, ModuleEnum module, String resourceName, OperateEnum operate, Map<String, String> content) {
OperateRecordDO operateRecordDO = new OperateRecordDO();
operateRecordDO.setOperator(operator);
operateRecordDO.setModuleId(module.getCode());
operateRecordDO.setResource(resourceName);
operateRecordDO.setOperateId(operate.getCode());
operateRecordDO.setContent(JsonUtils.toJSONString(content));
return insert(operateRecordDO);
}
@Override
public List<OperateRecordDO> queryByCondt(OperateRecordDTO dto) {
return operateRecordDao.queryByCondt(

View File

@@ -1,7 +1,10 @@
package com.xiaojukeji.kafka.manager.service.service.impl;
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaClientEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.ModuleEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.OperateEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.TopicAuthorityEnum;
import com.xiaojukeji.kafka.manager.common.constant.KafkaConstant;
import com.xiaojukeji.kafka.manager.common.constant.KafkaMetricsCollections;
import com.xiaojukeji.kafka.manager.common.constant.TopicCreationConstant;
import com.xiaojukeji.kafka.manager.common.entity.Result;
@@ -80,6 +83,9 @@ public class TopicManagerServiceImpl implements TopicManagerService {
@Autowired
private RegionService regionService;
@Autowired
private OperateRecordService operateRecordService;
@Override
public List<TopicDO> listAll() {
try {
@@ -293,6 +299,10 @@ public class TopicManagerServiceImpl implements TopicManagerService {
Map<String, TopicDO> topicMap) {
List<TopicDTO> dtoList = new ArrayList<>();
for (String topicName: PhysicalClusterMetadataManager.getTopicNameList(clusterDO.getId())) {
if (topicName.equals(KafkaConstant.COORDINATOR_TOPIC_NAME) || topicName.equals(KafkaConstant.TRANSACTION_TOPIC_NAME)) {
continue;
}
LogicalClusterDO logicalClusterDO = logicalClusterMetadataManager.getTopicLogicalCluster(
clusterDO.getId(),
topicName
@@ -336,6 +346,12 @@ public class TopicManagerServiceImpl implements TopicManagerService {
if (ValidateUtils.isNull(topicDO)) {
return ResultStatus.TOPIC_NOT_EXIST;
}
Map<String, Object> content = new HashMap<>(2);
content.put("clusterId", clusterId);
content.put("topicName", topicName);
recordOperation(content, topicName, operator);
topicDO.setDescription(description);
if (topicDao.updateByName(topicDO) > 0) {
return ResultStatus.SUCCESS;
@@ -359,6 +375,12 @@ public class TopicManagerServiceImpl implements TopicManagerService {
return ResultStatus.APP_NOT_EXIST;
}
Map<String, Object> content = new HashMap<>(4);
content.put("clusterId", clusterId);
content.put("topicName", topicName);
content.put("appId", appId);
recordOperation(content, topicName, operator);
TopicDO topicDO = topicDao.getByTopicName(clusterId, topicName);
if (ValidateUtils.isNull(topicDO)) {
// 不存在, 则需要插入
@@ -389,6 +411,16 @@ public class TopicManagerServiceImpl implements TopicManagerService {
return ResultStatus.MYSQL_ERROR;
}
private void recordOperation(Map<String, Object> content, String topicName, String operator) {
OperateRecordDO operateRecordDO = new OperateRecordDO();
operateRecordDO.setModuleId(ModuleEnum.TOPIC.getCode());
operateRecordDO.setOperateId(OperateEnum.EDIT.getCode());
operateRecordDO.setResource(topicName);
operateRecordDO.setContent(JsonUtils.toJSONString(content));
operateRecordDO.setOperator(operator);
operateRecordService.insert(operateRecordDO);
}
@Override
public int deleteByTopicName(Long clusterId, String topicName) {
try {

View File

@@ -53,7 +53,7 @@ public class ZookeeperServiceImpl implements ZookeeperService {
}
ZkConfigImpl zkConfig = PhysicalClusterMetadataManager.getZKConfig(clusterId);
if (ValidateUtils.isNull(zkConfig)) {
return Result.buildFrom(ResultStatus.CONNECT_ZOOKEEPER_FAILED);
return Result.buildFrom(ResultStatus.ZOOKEEPER_CONNECT_FAILED);
}
try {
@@ -68,6 +68,6 @@ public class ZookeeperServiceImpl implements ZookeeperService {
} catch (Exception e) {
LOGGER.error("class=ZookeeperServiceImpl||method=getControllerPreferredCandidates||clusterId={}||errMsg={}", clusterId, e.getMessage());
}
return Result.buildFrom(ResultStatus.READ_ZOOKEEPER_FAILED);
return Result.buildFrom(ResultStatus.ZOOKEEPER_READ_FAILED);
}
}

View File

@@ -2,6 +2,7 @@ package com.xiaojukeji.kafka.manager.account;
import com.xiaojukeji.kafka.manager.account.common.EnterpriseStaff;
import com.xiaojukeji.kafka.manager.common.bizenum.AccountRoleEnum;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.account.Account;
import com.xiaojukeji.kafka.manager.common.entity.pojo.AccountDO;
@@ -25,14 +26,14 @@ public interface AccountService {
* @param username 用户名
* @return
*/
AccountDO getAccountDO(String username);
Result<AccountDO> getAccountDO(String username);
/**
* 删除用户
* @param username 用户名
* @return
*/
ResultStatus deleteByName(String username);
ResultStatus deleteByName(String username, String operator);
/**
* 更新账号

View File

@@ -1,5 +1,6 @@
package com.xiaojukeji.kafka.manager.account;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ao.account.Account;
import com.xiaojukeji.kafka.manager.common.entity.dto.normal.LoginDTO;
@@ -11,7 +12,7 @@ import javax.servlet.http.HttpServletResponse;
* @date 20/8/20
*/
public interface LoginService {
Account login(HttpServletRequest request, HttpServletResponse response, LoginDTO dto);
Result<Account> login(HttpServletRequest request, HttpServletResponse response, LoginDTO dto);
void logout(HttpServletRequest request, HttpServletResponse response, Boolean needJump2LoginPage);

View File

@@ -1,5 +1,6 @@
package com.xiaojukeji.kafka.manager.account.component;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.dto.normal.LoginDTO;
import javax.servlet.http.HttpServletRequest;
@@ -18,7 +19,7 @@ public abstract class AbstractSingleSignOn {
protected static final String HEADER_REDIRECT_KEY = "location";
public abstract String loginAndGetLdap(HttpServletRequest request, HttpServletResponse response, LoginDTO dto);
public abstract Result<String> loginAndGetLdap(HttpServletRequest request, HttpServletResponse response, LoginDTO dto);
public abstract void logout(HttpServletRequest request, HttpServletResponse response, Boolean needJump2LoginPage);

View File

@@ -41,7 +41,14 @@ public class BaseEnterpriseStaffService extends AbstractEnterpriseStaffService {
@Override
public List<EnterpriseStaff> searchEnterpriseStaffByKeyWord(String keyWord) {
try {
List<AccountDO> doList = accountDao.searchByNamePrefix(keyWord);
List<AccountDO> doList = null;
if (ValidateUtils.isBlank(keyWord)) {
// 当用户没有任何输入的时候, 返回全部的用户
doList = accountDao.list();
} else {
doList = accountDao.searchByNamePrefix(keyWord);
}
if (ValidateUtils.isEmptyList(doList)) {
return new ArrayList<>();
}

View File

@@ -3,6 +3,7 @@ package com.xiaojukeji.kafka.manager.account.component.sso;
import com.xiaojukeji.kafka.manager.account.AccountService;
import com.xiaojukeji.kafka.manager.account.component.AbstractSingleSignOn;
import com.xiaojukeji.kafka.manager.common.constant.LoginConstant;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.dto.normal.LoginDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.AccountDO;
import com.xiaojukeji.kafka.manager.common.utils.EncryptUtil;
@@ -23,18 +24,21 @@ public class BaseSessionSignOn extends AbstractSingleSignOn {
private AccountService accountService;
@Override
public String loginAndGetLdap(HttpServletRequest request, HttpServletResponse response, LoginDTO dto) {
public Result<String> loginAndGetLdap(HttpServletRequest request, HttpServletResponse response, LoginDTO dto) {
if (ValidateUtils.isBlank(dto.getUsername()) || ValidateUtils.isNull(dto.getPassword())) {
return null;
}
AccountDO accountDO = accountService.getAccountDO(dto.getUsername());
if (ValidateUtils.isNull(accountDO)) {
return null;
Result<AccountDO> accountResult = accountService.getAccountDO(dto.getUsername());
if (ValidateUtils.isNull(accountResult) || accountResult.failed()) {
return new Result<>(accountResult.getCode(), accountResult.getMessage());
}
if (!accountDO.getPassword().equals(EncryptUtil.md5(dto.getPassword()))) {
return null;
if (ValidateUtils.isNull(accountResult.getData())) {
return Result.buildFailure("username illegal");
}
return dto.getUsername();
if (!accountResult.getData().getPassword().equals(EncryptUtil.md5(dto.getPassword()))) {
return Result.buildFailure("password illegal");
}
return Result.buildSuc(accountResult.getData().getUsername());
}
@Override

View File

@@ -6,7 +6,10 @@ import com.xiaojukeji.kafka.manager.account.AccountService;
import com.xiaojukeji.kafka.manager.account.common.EnterpriseStaff;
import com.xiaojukeji.kafka.manager.account.component.AbstractEnterpriseStaffService;
import com.xiaojukeji.kafka.manager.common.bizenum.AccountRoleEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.ModuleEnum;
import com.xiaojukeji.kafka.manager.common.bizenum.OperateEnum;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.account.Account;
import com.xiaojukeji.kafka.manager.common.entity.pojo.AccountDO;
@@ -14,6 +17,7 @@ import com.xiaojukeji.kafka.manager.common.utils.EncryptUtil;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.dao.AccountDao;
import com.xiaojukeji.kafka.manager.service.service.ConfigService;
import com.xiaojukeji.kafka.manager.service.service.OperateRecordService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@@ -47,6 +51,9 @@ public class AccountServiceImpl implements AccountService {
@Autowired
private AbstractEnterpriseStaffService enterpriseStaffService;
@Autowired
private OperateRecordService operateRecordService;
/**
* 用户组织信息
* <username, Staff>
@@ -81,9 +88,12 @@ public class AccountServiceImpl implements AccountService {
}
@Override
public ResultStatus deleteByName(String username) {
public ResultStatus deleteByName(String username, String operator) {
try {
if (accountDao.deleteByName(username) > 0) {
Map<String, String> content = new HashMap<>();
content.put("username", username);
operateRecordService.insert(operator, ModuleEnum.AUTHORITY, username, OperateEnum.DELETE, content);
return ResultStatus.SUCCESS;
}
} catch (Exception e) {
@@ -101,7 +111,7 @@ public class AccountServiceImpl implements AccountService {
return ResultStatus.ACCOUNT_NOT_EXIST;
}
if (!ValidateUtils.isNull(accountDO.getPassword())) {
if (!ValidateUtils.isBlank(accountDO.getPassword())) {
accountDO.setPassword(EncryptUtil.md5(accountDO.getPassword()));
} else {
accountDO.setPassword(oldAccountDO.getPassword());
@@ -117,8 +127,13 @@ public class AccountServiceImpl implements AccountService {
}
@Override
public AccountDO getAccountDO(String username) {
return accountDao.getByName(username);
public Result<AccountDO> getAccountDO(String username) {
try {
return Result.buildSuc(accountDao.getByName(username));
} catch (Exception e) {
LOGGER.warn("class=AccountServiceImpl||method=getAccountDO||username={}||errMsg={}||msg=get account fail", username, e.getMessage());
}
return Result.buildFrom(ResultStatus.MYSQL_ERROR);
}
@Override

View File

@@ -6,6 +6,7 @@ import com.xiaojukeji.kafka.manager.account.LoginService;
import com.xiaojukeji.kafka.manager.common.bizenum.AccountRoleEnum;
import com.xiaojukeji.kafka.manager.common.constant.ApiPrefix;
import com.xiaojukeji.kafka.manager.common.constant.LoginConstant;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ao.account.Account;
import com.xiaojukeji.kafka.manager.common.entity.dto.normal.LoginDTO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
@@ -34,15 +35,15 @@ public class LoginServiceImpl implements LoginService {
private AbstractSingleSignOn singleSignOn;
@Override
public Account login(HttpServletRequest request, HttpServletResponse response, LoginDTO loginDTO) {
String username = singleSignOn.loginAndGetLdap(request, response, loginDTO);
if (ValidateUtils.isBlank(username)) {
public Result<Account> login(HttpServletRequest request, HttpServletResponse response, LoginDTO loginDTO) {
Result<String> userResult = singleSignOn.loginAndGetLdap(request, response, loginDTO);
if (ValidateUtils.isNull(userResult) || userResult.failed()) {
logout(request, response, false);
return null;
return new Result<>(userResult.getCode(), userResult.getMessage());
}
Account account = accountService.getAccountFromCache(username);
Account account = accountService.getAccountFromCache(userResult.getData());
initLoginContext(request, response, account);
return account;
return Result.buildSuc(account);
}
private void initLoginContext(HttpServletRequest request, HttpServletResponse response, Account account) {

View File

@@ -87,6 +87,6 @@ public class ApplyAppOrder extends AbstractAppOrder {
appDO.setDescription(orderDO.getDescription());
appDO.generateAppIdAndPassword(orderDO.getId(), configUtils.getIdc());
appDO.setType(0);
return appService.addApp(appDO);
return appService.addApp(appDO, userName);
}
}

View File

@@ -95,7 +95,7 @@ public class ApplyAuthorityOrder extends AbstractAuthorityOrder {
}
TopicDO topicDO = topicManagerService.getByTopicName(physicalClusterId, orderExtensionDTO.getTopicName());
if (ValidateUtils.isNull(topicDO)) {
return ResultStatus.TOPIC_NOT_EXIST;
return ResultStatus.TOPIC_BIZ_DATA_NOT_EXIST;
}
AppDO appDO = appService.getByAppId(topicDO.getAppId());
if (!appDO.getPrincipals().contains(userName)) {

View File

@@ -68,5 +68,10 @@
<artifactId>spring-test</artifactId>
<version>${spring-version}</version>
</dependency>
<dependency>
<groupId>io.minio</groupId>
<artifactId>minio</artifactId>
</dependency>
</dependencies>
</project>

View File

@@ -4,6 +4,7 @@ import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.dto.normal.KafkaFileDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.KafkaFileDO;
import org.springframework.web.multipart.MultipartFile;
import java.util.List;
@@ -24,7 +25,7 @@ public interface KafkaFileService {
KafkaFileDO getFileByFileName(String fileName);
Result<String> downloadKafkaConfigFile(Long fileId);
Result<MultipartFile> downloadKafkaFile(Long fileId);
String getDownloadBaseUrl();
}

View File

@@ -10,13 +10,20 @@ import org.springframework.web.multipart.MultipartFile;
public abstract class AbstractStorageService {
/**
* 上传
* @param fileName 文件名
* @param fileMd5 文件md5
* @param uploadFile 文件
* @return 上传结果
*/
public abstract boolean upload(String fileName, String fileMd5, MultipartFile uploadFile);
/**
* 下载
* 下载文件
* @param fileName 文件名
* @param fileMd5 文件md5
* @return 文件
*/
public abstract Result<String> download(String fileName, String fileMd5);
public abstract Result<MultipartFile> download(String fileName, String fileMd5);
/**
* 下载base地址

View File

@@ -1,33 +0,0 @@
package com.xiaojukeji.kafka.manager.kcm.component.storage.local;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import com.xiaojukeji.kafka.manager.kcm.component.storage.AbstractStorageService;
import org.springframework.web.multipart.MultipartFile;
/**
* @author zengqiao
* @date 20/9/17
*/
@Service("storageService")
public class Local extends AbstractStorageService {
@Value("${kcm.storage.base-url}")
private String baseUrl;
@Override
public boolean upload(String fileName, String fileMd5, MultipartFile uploadFile) {
return false;
}
@Override
public Result<String> download(String fileName, String fileMd5) {
return Result.buildFrom(ResultStatus.DOWNLOAD_FILE_FAIL);
}
@Override
public String getDownloadBaseUrl() {
return baseUrl;
}
}

View File

@@ -0,0 +1,125 @@
package com.xiaojukeji.kafka.manager.kcm.component.storage.s3;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.kcm.component.storage.AbstractStorageService;
import io.minio.*;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.mock.web.MockMultipartFile;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.annotation.PostConstruct;
import java.io.IOException;
import java.io.InputStream;
@Service("storageService")
public class S3Service extends AbstractStorageService {
private final static Logger LOGGER = LoggerFactory.getLogger(S3Service.class);
@Value("${kcm.s3.endpoint:}")
private String endpoint;
@Value("${kcm.s3.access-key:}")
private String accessKey;
@Value("${kcm.s3.secret-key:}")
private String secretKey;
@Value("${kcm.s3.bucket:}")
private String bucket;
private MinioClient minioClient;
@PostConstruct
public void init() {
try {
if (ValidateUtils.anyBlank(this.endpoint, this.accessKey, this.secretKey, this.bucket)) {
// without config s3
return;
}
minioClient = new MinioClient(endpoint, accessKey, secretKey);
} catch (Exception e) {
LOGGER.error("class=S3Service||method=init||fields={}||errMsg={}", this.toString(), e.getMessage());
}
}
@Override
public boolean upload(String fileName, String fileMd5, MultipartFile uploadFile) {
InputStream inputStream = null;
try {
if (!createBucketIfNotExist()) {
return false;
}
inputStream = uploadFile.getInputStream();
minioClient.putObject(PutObjectArgs.builder()
.bucket(this.bucket)
.object(fileName)
.stream(inputStream, inputStream.available(), -1)
.build()
);
return true;
} catch (Exception e) {
LOGGER.error("class=S3Service||method=upload||fileName={}||errMsg={}||msg=upload failed", fileName, e.getMessage());
} finally {
if (inputStream != null) {
try {
inputStream.close();
} catch (IOException e) {
; // ignore
}
}
}
return false;
}
@Override
public Result<MultipartFile> download(String fileName, String fileMd5) {
try {
final ObjectStat stat = minioClient.statObject(this.bucket, fileName);
InputStream is = minioClient.getObject(this.bucket, fileName);
return Result.buildSuc(new MockMultipartFile(fileName, fileName, stat.contentType(), is));
} catch (Exception e) {
LOGGER.error("class=S3Service||method=download||fileName={}||errMsg={}||msg=download failed", fileName, e.getMessage());
}
return Result.buildFrom(ResultStatus.STORAGE_DOWNLOAD_FILE_FAILED);
}
@Override
public String getDownloadBaseUrl() {
return this.endpoint + "/" + this.bucket;
}
private boolean createBucketIfNotExist() {
try {
boolean found = minioClient.bucketExists(BucketExistsArgs.builder().bucket(this.bucket).build());
if (!found) {
minioClient.makeBucket(MakeBucketArgs.builder().bucket(this.bucket).build());
}
LOGGER.info("class=S3Service||method=createBucketIfNotExist||bucket={}||msg=check and create bucket success", this.bucket);
return true;
} catch (Exception e) {
LOGGER.error("class=S3Service||method=createBucketIfNotExist||bucket={}||errMsg={}||msg=create bucket failed", this.bucket, e.getMessage());
}
return false;
}
@Override
public String toString() {
return "S3Service{" +
"endpoint='" + endpoint + '\'' +
", accessKey='" + accessKey + '\'' +
", secretKey='" + secretKey + '\'' +
", bucket='" + bucket + '\'' +
'}';
}
}

View File

@@ -4,17 +4,18 @@ import com.xiaojukeji.kafka.manager.common.bizenum.KafkaFileEnum;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.dto.normal.KafkaFileDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.KafkaFileDO;
import com.xiaojukeji.kafka.manager.common.utils.CopyUtils;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.dao.KafkaFileDao;
import com.xiaojukeji.kafka.manager.common.entity.pojo.KafkaFileDO;
import com.xiaojukeji.kafka.manager.kcm.component.storage.AbstractStorageService;
import com.xiaojukeji.kafka.manager.kcm.KafkaFileService;
import com.xiaojukeji.kafka.manager.kcm.component.storage.AbstractStorageService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DuplicateKeyException;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;
import java.util.ArrayList;
import java.util.List;
@@ -52,7 +53,7 @@ public class KafkaFileServiceImpl implements KafkaFileService {
kafkaFileDTO.getUploadFile())
) {
kafkaFileDao.deleteById(kafkaFileDO.getId());
return ResultStatus.UPLOAD_FILE_FAIL;
return ResultStatus.STORAGE_UPLOAD_FILE_FAILED;
}
return ResultStatus.SUCCESS;
} catch (DuplicateKeyException e) {
@@ -113,7 +114,7 @@ public class KafkaFileServiceImpl implements KafkaFileService {
if (kafkaFileDao.updateById(kafkaFileDO) <= 0) {
return ResultStatus.MYSQL_ERROR;
}
return ResultStatus.UPLOAD_FILE_FAIL;
return ResultStatus.STORAGE_UPLOAD_FILE_FAILED;
} catch (Exception e) {
LOGGER.error("rollback modify kafka file failed, kafkaFileDTO:{}.", kafkaFileDTO, e);
}
@@ -163,13 +164,13 @@ public class KafkaFileServiceImpl implements KafkaFileService {
}
@Override
public Result<String> downloadKafkaConfigFile(Long fileId) {
public Result<MultipartFile> downloadKafkaFile(Long fileId) {
KafkaFileDO kafkaFileDO = kafkaFileDao.getById(fileId);
if (ValidateUtils.isNull(kafkaFileDO)) {
return Result.buildFrom(ResultStatus.RESOURCE_NOT_EXIST);
}
if (KafkaFileEnum.PACKAGE.getCode().equals(kafkaFileDO.getFileType())) {
return Result.buildFrom(ResultStatus.FILE_TYPE_NOT_SUPPORT);
return Result.buildFrom(ResultStatus.STORAGE_FILE_TYPE_NOT_SUPPORT);
}
return storageService.download(kafkaFileDO.getFileName(), kafkaFileDO.getFileMd5());

View File

@@ -1,8 +1,6 @@
package com.xiaojukeji.kafka.manager.web.api.versionone;
import com.xiaojukeji.kafka.manager.common.constant.Constant;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.account.Account;
import com.xiaojukeji.kafka.manager.common.entity.dto.normal.LoginDTO;
import com.xiaojukeji.kafka.manager.common.entity.vo.common.AccountVO;
@@ -11,8 +9,6 @@ import com.xiaojukeji.kafka.manager.account.LoginService;
import com.xiaojukeji.kafka.manager.common.constant.ApiPrefix;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
@@ -28,26 +24,22 @@ import javax.servlet.http.HttpServletResponse;
@RestController
@RequestMapping(ApiPrefix.API_V1_SSO_PREFIX)
public class LoginController {
private static final Logger LOGGER = LoggerFactory.getLogger(LoginController.class);
@Autowired
private LoginService loginService;
@ApiOperation(value = "登陆", notes = "")
@RequestMapping(value = "login", method = RequestMethod.POST)
@ResponseBody
public Result<AccountVO> login(HttpServletRequest request,
HttpServletResponse response,
@RequestBody LoginDTO dto){
Account account = loginService.login(request, response, dto);
if (ValidateUtils.isNull(account)) {
return Result.buildFrom(ResultStatus.LOGIN_FAILED);
public Result<AccountVO> login(HttpServletRequest request, HttpServletResponse response, @RequestBody LoginDTO dto){
Result<Account> accountResult = loginService.login(request, response, dto);
if (ValidateUtils.isNull(accountResult) || accountResult.failed()) {
return new Result<>(accountResult.getCode(), accountResult.getMessage());
}
AccountVO vo = new AccountVO();
vo.setUsername(account.getUsername());
vo.setChineseName(account.getChineseName());
vo.setDepartment(account.getDepartment());
vo.setRole(account.getAccountRoleEnum().getRole());
vo.setUsername(accountResult.getData().getUsername());
vo.setChineseName(accountResult.getData().getChineseName());
vo.setDepartment(accountResult.getData().getDepartment());
vo.setRole(accountResult.getData().getAccountRoleEnum().getRole());
return new Result<>(vo);
}
@@ -58,28 +50,4 @@ public class LoginController {
loginService.logout(request, response, true);
return new Result();
}
@Deprecated
@ApiOperation(value = "登录检查", notes = "检查SSO返回的Code")
@RequestMapping(value = "xiaojukeji/login-check", method = RequestMethod.POST)
@ResponseBody
public Result<AccountVO> checkCodeAndGetStaffInfo(HttpServletRequest request,
HttpServletResponse response,
@RequestBody LoginDTO dto) {
Result<AccountVO> ra = login(request, response, dto);
if (!Constant.SUCCESS.equals(ra.getCode())) {
LOGGER.info("user login failed, req:{} result:{}.", dto, ra);
} else {
LOGGER.info("user login success, req:{} result:{}.", dto, ra);
}
return ra;
}
@Deprecated
@ApiOperation(value = "登出", notes = "")
@RequestMapping(value = "xiaojukeji/logout", method = RequestMethod.DELETE)
@ResponseBody
public Result logout(HttpServletRequest request, HttpServletResponse response) {
return logoff(request, response);
}
}

View File

@@ -43,7 +43,7 @@ public class OpClusterController {
@RequestMapping(value = "clusters", method = RequestMethod.DELETE)
@ResponseBody
public Result delete(@RequestParam(value = "clusterId") Long clusterId) {
return Result.buildFrom(clusterService.deleteById(clusterId));
return Result.buildFrom(clusterService.deleteById(clusterId, SpringTool.getUserName()));
}
@ApiOperation(value = "修改集群信息")

View File

@@ -2,6 +2,7 @@ package com.xiaojukeji.kafka.manager.web.api.versionone.rd;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.vo.common.AccountVO;
import com.xiaojukeji.kafka.manager.common.utils.SpringTool;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.common.constant.ApiPrefix;
import com.xiaojukeji.kafka.manager.web.converters.AccountConverter;
@@ -35,7 +36,7 @@ public class RdAccountController {
@RequestMapping(value = "accounts", method = RequestMethod.POST)
@ResponseBody
public Result addAccount(@RequestBody AccountDTO dto) {
if (!dto.legal() || ValidateUtils.isNull(dto.getPassword())) {
if (!dto.legal() || ValidateUtils.isBlank(dto.getPassword())) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
}
ResultStatus rs = accountService.createAccount(AccountConverter.convert2AccountDO(dto));
@@ -46,7 +47,7 @@ public class RdAccountController {
@RequestMapping(value = "accounts", method = RequestMethod.DELETE)
@ResponseBody
public Result deleteAccount(@RequestParam("username") String username) {
ResultStatus rs = accountService.deleteByName(username);
ResultStatus rs = accountService.deleteByName(username, SpringTool.getUserName());
return Result.buildFrom(rs);
}

View File

@@ -1,23 +1,30 @@
package com.xiaojukeji.kafka.manager.web.api.versionone.rd;
import com.xiaojukeji.kafka.manager.common.bizenum.KafkaFileEnum;
import com.xiaojukeji.kafka.manager.common.constant.ApiPrefix;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.dto.normal.KafkaFileDTO;
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.KafkaFileVO;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.kcm.component.storage.common.StorageEnum;
import com.xiaojukeji.kafka.manager.common.entity.pojo.KafkaFileDO;
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
import com.xiaojukeji.kafka.manager.kcm.KafkaFileService;
import com.xiaojukeji.kafka.manager.common.entity.vo.rd.KafkaFileVO;
import com.xiaojukeji.kafka.manager.common.utils.JsonUtils;
import com.xiaojukeji.kafka.manager.common.utils.SpringTool;
import com.xiaojukeji.kafka.manager.common.constant.ApiPrefix;
import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils;
import com.xiaojukeji.kafka.manager.kcm.KafkaFileService;
import com.xiaojukeji.kafka.manager.kcm.component.storage.common.StorageEnum;
import com.xiaojukeji.kafka.manager.service.service.ClusterService;
import com.xiaojukeji.kafka.manager.web.converters.KafkaFileConverter;
import io.swagger.annotations.Api;
import io.swagger.annotations.ApiOperation;
import org.apache.tomcat.util.http.fileupload.IOUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;
import javax.servlet.http.HttpServletResponse;
import java.io.InputStream;
import java.net.URLEncoder;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -30,6 +37,8 @@ import java.util.Map;
@RestController
@RequestMapping(ApiPrefix.API_V1_RD_PREFIX)
public class RdKafkaFileController {
private final static Logger LOGGER = LoggerFactory.getLogger(RdKafkaFileController.class);
@Autowired
private ClusterService clusterService;
@@ -71,9 +80,33 @@ public class RdKafkaFileController {
return new Result<>(KafkaFileConverter.convertKafkaFileVOList(kafkaFileDOList, clusterService));
}
@ApiOperation(value = "文件预览", notes = "")
@Deprecated
@ApiOperation(value = "文件下载", notes = "")
@RequestMapping(value = "kafka-files/{fileId}/config-files", method = RequestMethod.GET)
public Result<String> previewKafkaFile(@PathVariable("fileId") Long fileId) {
return kafkaFileService.downloadKafkaConfigFile(fileId);
public Result downloadKafkaFile(@PathVariable("fileId") Long fileId, HttpServletResponse response) {
Result<MultipartFile> multipartFileResult = kafkaFileService.downloadKafkaFile(fileId);
if (multipartFileResult.failed() || ValidateUtils.isNull(multipartFileResult.getData())) {
return multipartFileResult;
}
InputStream is = null;
try {
response.setContentType(multipartFileResult.getData().getContentType());
response.setCharacterEncoding("UTF-8");
response.setHeader("Content-Disposition", "attachment;filename=" + URLEncoder.encode(multipartFileResult.getData().getOriginalFilename(), "UTF-8"));
is = multipartFileResult.getData().getInputStream();
IOUtils.copy(is, response.getOutputStream());
} catch (Exception e) {
LOGGER.error("class=RdKafkaFileController||method=downloadKafkaFile||fileId={}||errMsg={}||msg=modify response failed", fileId, e.getMessage());
} finally {
try {
if (is != null) {
is.close();
}
} catch (Exception e) {
}
}
return Result.buildSuc();
}
}

View File

@@ -39,10 +39,10 @@ public class SwaggerConfig implements WebMvcConfigurer {
private ApiInfo apiInfo() {
return new ApiInfoBuilder()
.title("Kafka云平台-接口文档")
.description("欢迎使用滴滴出行开源kafka-manager")
.title("Logi-KafkaManager 接口文档")
.description("欢迎使用滴滴Logi-KafkaManager")
.contact("huangyiminghappy@163.com")
.version("2.0")
.version("2.2.0")
.build();
}

View File

@@ -11,7 +11,7 @@ spring:
name: kafkamanager
datasource:
kafka-manager:
jdbc-url: jdbc:mysql://127.0.0.1:3306/logi_kafka_manager?characterEncoding=UTF-8&serverTimezone=GMT%2B8
jdbc-url: jdbc:mysql://127.0.0.1:3306/logi_kafka_manager?characterEncoding=UTF-8&useSSL=false&serverTimezone=GMT%2B8
username: admin
password: admin
driver-class-name: com.mysql.jdbc.Driver
@@ -52,8 +52,11 @@ account:
kcm:
enabled: false
storage:
base-url: http://127.0.0.1
s3:
endpoint: 127.0.0.1
access-key: 1234567890
secret-key: 0987654321
bucket: logi-kafka
n9e:
base-url: http://127.0.0.1:8004
user-token: 12345678

View File

@@ -223,6 +223,12 @@
<artifactId>curator-recipes</artifactId>
<version>2.10.0</version>
</dependency>
<dependency>
<groupId>io.minio</groupId>
<artifactId>minio</artifactId>
<version>7.1.0</version>
</dependency>
</dependencies>
</dependencyManagement>
</project>