调整v3.2版本容器化部署信息

This commit is contained in:
zengqiao
2022-12-16 13:39:51 +08:00
parent 860d0b92e2
commit edfa6a9f71
10 changed files with 497 additions and 9 deletions

View File

@@ -0,0 +1,119 @@
server:
port: 80 # 服务端口
tomcat:
accept-count: 1000
max-connections: 10000
spring:
application:
name: know-streaming
profiles:
active: dev
main:
allow-bean-definition-overriding: true
jackson:
time-zone: GMT+8
datasource:
know-streaming: # know-streaming 自身数据库的配置
jdbc-url: jdbc:mariadb://SERVER_MYSQL_ADDRESS/SERVER_MYSQL_DB?useUnicode=true&characterEncoding=utf8&jdbcCompliantTruncation=true&allowMultiQueries=true&useSSL=false&alwaysAutoGeneratedKeys=true&serverTimezone=GMT%2B8&allowPublicKeyRetrieval=true
username: SERVER_MYSQL_USER
password: SERVER_MYSQL_PASSWORD
driver-class-name: org.mariadb.jdbc.Driver
maximum-pool-size: 20
idle-timeout: 30000
connection-test-query: SELECT 1
logi-job: # know-streaming 依赖的 logi-job 模块的数据库的配置,默认与 know-streaming 的数据库配置保持一致即可
enable: true
jdbc-url: jdbc:mariadb://SERVER_MYSQL_ADDRESS/SERVER_MYSQL_DB?useUnicode=true&characterEncoding=utf8&jdbcCompliantTruncation=true&allowMultiQueries=true&useSSL=false&alwaysAutoGeneratedKeys=true&serverTimezone=GMT%2B8&allowPublicKeyRetrieval=true
username: SERVER_MYSQL_USER
password: SERVER_MYSQL_PASSWORD
driver-class-name: org.mariadb.jdbc.Driver
max-lifetime: 60000
init-sql: true
init-thread-num: 20
max-thread-num: 50
log-expire: 3 # 日志保存天数,以天为单位
app-name: know-streaming
claim-strategy: com.didiglobal.logi.job.core.consensual.RandomConsensual
logi-security: # know-streaming 依赖的 logi-security 模块的数据库的配置,默认与 know-streaming 的数据库配置保持一致即可
jdbc-url: jdbc:mariadb://SERVER_MYSQL_ADDRESS/SERVER_MYSQL_DB?useUnicode=true&characterEncoding=utf8&jdbcCompliantTruncation=true&allowMultiQueries=true&useSSL=false&alwaysAutoGeneratedKeys=true&serverTimezone=GMT%2B8&allowPublicKeyRetrieval=true
username: SERVER_MYSQL_USER
password: SERVER_MYSQL_PASSWORD
driver-class-name: org.mariadb.jdbc.Driver
app-name: know-streaming
resource-extend-bean-name: myResourceExtendImpl
login-extend-bean-name: logiSecurityDefaultLoginExtendImpl
logging:
config: classpath:logback-spring.xml
# 线程池大小相关配置
thread-pool:
scheduled:
thread-num: 2 # @Scheduled任务的线程池大小默认是一个
collector: # 采集模块的配置
future-util: # 采集模块线程池配置
num: 3 # 线程池个数
thread-num: 64 # 每个线程池核心线程数
queue-size: 10000 # 每个线程池队列大小
select-suitable-enable: true # 任务是否自动选择合适的线程池,非主要,可不修改
suitable-queue-size: 1000 # 线程池理想的队列大小,非主要,可不修改
task: # 任务模块的配置
metrics: # metrics采集任务配置
thread-num: 18 # metrics采集任务线程池核心线程数
queue-size: 180 # metrics采集任务线程池队列大小
metadata: # metadata同步任务配置
thread-num: 27 # metadata同步任务线程池核心线程数
queue-size: 270 # metadata同步任务线程池队列大小
common: # 剩余其他任务配置
thread-num: 15 # 剩余其他任务线程池核心线程数
queue-size: 150 # 剩余其他任务线程池队列大小
es:
search: # es查询线程池
thread-num: 20 # 线程池大小
queue-size: 10000 # 队列大小
# 客户端池大小相关配置
client-pool:
kafka-consumer:
min-idle-client-num: 2 # 最小空闲客户端数
max-idle-client-num: 20 # 最大空闲客户端数
max-total-client-num: 20 # 最大客户端数
borrow-timeout-unit-ms: 5000 # 租借超时时间,单位秒
kafka-admin:
client-cnt: 1 # 每个Kafka集群创建的KafkaAdminClient数
# ES客户端配置
es:
client:
address: SERVER_ES_ADDRESS
client-cnt: 10
io-thread-cnt: 2
max-retry-cnt: 5
index:
expire: 15 # 索引过期天数15表示超过15天的索引会被KS过期删除
# 普罗米修斯指标导出相关配置
management:
endpoints:
web:
base-path: /metrics
exposure:
include: '*'
health:
elasticsearch:
enabled: false
metrics:
export:
prometheus:
descriptions: true
enabled: true
tags:
application: know-streaming

View File

@@ -0,0 +1,236 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="10 seconds">
<contextName>logback</contextName>
<property name="log.path" value="./logs" />
<!-- 彩色日志 -->
<!-- 彩色日志依赖的渲染类 -->
<conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter" />
<conversionRule conversionWord="wex" converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter" />
<conversionRule conversionWord="wEx" converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter" />
<!-- 彩色日志格式 -->
<property name="CONSOLE_LOG_PATTERN" value="${CONSOLE_LOG_PATTERN:-%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
<!--输出到控制台-->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>info</level>
</filter>
<encoder>
<Pattern>${CONSOLE_LOG_PATTERN}</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!--输出到文件-->
<!-- 时间滚动输出 level为 DEBUG 日志 -->
<appender name="DEBUG_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/log_debug.log</file>
<!--日志文件输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 设置字符集 -->
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 日志归档 -->
<fileNamePattern>${log.path}/log_debug_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>2</maxHistory>
<totalSizeCap>5GB</totalSizeCap>
</rollingPolicy>
<!-- 此日志文件只记录debug级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>debug</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- 时间滚动输出 level为 INFO 日志 -->
<appender name="INFO_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${log.path}/log_info.log</file>
<!--日志文件输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 每天日志归档路径以及格式 -->
<fileNamePattern>${log.path}/log_info_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>7</maxHistory>
</rollingPolicy>
<!-- 此日志文件只记录info级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>info</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- 时间滚动输出 level为 WARN 日志 -->
<appender name="WARN_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${log.path}/log_warn.log</file>
<!--日志文件输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 此处设置字符集 -->
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/log_warn_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>7</maxHistory>
</rollingPolicy>
<!-- 此日志文件只记录warn级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>warn</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- 时间滚动输出 level为 ERROR 日志 -->
<appender name="ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${log.path}/log_error.log</file>
<!--日志文件输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 此处设置字符集 -->
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/log_error_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>7</maxHistory>
</rollingPolicy>
<!-- 此日志文件只记录ERROR级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- Metrics信息收集日志 -->
<appender name="ES_LOGGER" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/es/es.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/es/es_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<maxHistory>3</maxHistory>
</rollingPolicy>
</appender>
<!-- Metrics信息收集日志 -->
<appender name="METRIC_LOGGER" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/metric/metrics.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/metric/metrics_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<maxHistory>3</maxHistory>
</rollingPolicy>
</appender>
<!-- Task信息收集日志 -->
<appender name="TASK_LOGGER" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/task/task.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/task/task_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<maxHistory>3</maxHistory>
</rollingPolicy>
</appender>
<!-- level为 DEBUG 日志,时间滚动输出 -->
<appender name="logIJobLogger" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/logIJob/logIJob.log</file>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>${log.path}/logIJob/logIJob.log.%d{yyyy-MM-dd}</FileNamePattern>
<maxHistory>${maxHistory}</maxHistory>
</rollingPolicy>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<logger name="ES_LOGGER" level="ERROR" additivity="false">
<appender-ref ref="ES_LOGGER"/>
</logger>
<logger name="METRIC_LOGGER" level="ERROR" additivity="false">
<appender-ref ref="METRIC_LOGGER"/>
</logger>
<logger name="TASK_LOGGER" level="ERROR" additivity="false">
<appender-ref ref="TASK_LOGGER"/>
</logger>
<logger name="com.didiglobal.logi.job" level="ERROR" additivity="false">
<appender-ref ref="logIJobLogger" />
</logger>
<logger name="org.apache.zookeeper" level="WARN" additivity="false" />
<logger name="org.apache.ibatis" level="INFO" additivity="false" />
<logger name="org.mybatis.spring" level="INFO" additivity="false" />
<logger name="com.github.miemiedev.mybatis.paginator" level="INFO" additivity="false" />
<root level="debug">
<appender-ref ref="CONSOLE" />
<appender-ref ref="DEBUG_FILE" />
<appender-ref ref="INFO_FILE" />
<appender-ref ref="WARN_FILE" />
<appender-ref ref="ERROR_FILE" />
<!--<appender-ref ref="METRICS_LOG" />-->
</root>
<!--生产环境:输出到文件-->
<!--<springProfile name="pro">-->
<!--<root level="info">-->
<!--<appender-ref ref="CONSOLE" />-->
<!--<appender-ref ref="DEBUG_FILE" />-->
<!--<appender-ref ref="INFO_FILE" />-->
<!--<appender-ref ref="ERROR_FILE" />-->
<!--<appender-ref ref="WARN_FILE" />-->
<!--</root>-->
<!--</springProfile>-->
</configuration>

View File

@@ -1,3 +1,107 @@
-- KS-KM自身的SQLKS-KM依赖 Logi-Job 和 Logi-Security因此另外两个ddl sql文件也需要执行
DROP TABLE IF EXISTS `ks_kc_connect_cluster`;
CREATE TABLE `ks_kc_connect_cluster` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'Connect集群ID',
`kafka_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Kafka集群ID',
`name` varchar(128) NOT NULL DEFAULT '' COMMENT '集群名称',
`group_name` varchar(128) NOT NULL DEFAULT '' COMMENT '集群Group名称',
`cluster_url` varchar(1024) NOT NULL DEFAULT '' COMMENT '集群地址',
`member_leader_url` varchar(1024) NOT NULL DEFAULT '' COMMENT 'URL地址',
`version` varchar(64) NOT NULL DEFAULT '' COMMENT 'connect版本',
`jmx_properties` text COMMENT 'JMX配置',
`state` tinyint(4) NOT NULL DEFAULT '1' COMMENT '集群使用的消费组状态,也表示集群状态:-1 Unknown,0 ReBalance,1 Active,2 Dead,3 Empty',
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '接入时间',
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uniq_id_group_name` (`id`,`group_name`),
UNIQUE KEY `uniq_name_kafka_cluster` (`name`,`kafka_cluster_phy_id`),
KEY `idx_kafka_cluster_phy_id` (`kafka_cluster_phy_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Connect集群信息表';
DROP TABLE IF EXISTS `ks_kc_connector`;
CREATE TABLE `ks_kc_connector` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`kafka_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Kafka集群ID',
`connect_cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Connect集群ID',
`connector_name` varchar(512) NOT NULL DEFAULT '' COMMENT 'Connector名称',
`connector_class_name` varchar(512) NOT NULL DEFAULT '' COMMENT 'Connector类',
`connector_type` varchar(32) NOT NULL DEFAULT '' COMMENT 'Connector类型',
`state` varchar(45) NOT NULL DEFAULT '' COMMENT '状态',
`topics` text COMMENT '访问过的Topics',
`task_count` int(11) NOT NULL DEFAULT '0' COMMENT '任务数',
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uniq_connect_cluster_id_connector_name` (`connect_cluster_id`,`connector_name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Connector信息表';
DROP TABLE IF EXISTS `ks_kc_worker`;
CREATE TABLE `ks_kc_worker` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`kafka_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Kafka集群ID',
`connect_cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Connect集群ID',
`member_id` varchar(512) NOT NULL DEFAULT '' COMMENT '成员ID',
`host` varchar(128) NOT NULL DEFAULT '' COMMENT '主机名',
`jmx_port` int(16) NOT NULL DEFAULT '-1' COMMENT 'Jmx端口',
`url` varchar(1024) NOT NULL DEFAULT '' COMMENT 'URL信息',
`leader_url` varchar(1024) NOT NULL DEFAULT '' COMMENT 'leaderURL信息',
`leader` int(16) NOT NULL DEFAULT '0' COMMENT '状态: 1是leader0不是leader',
`worker_id` varchar(128) NOT NULL COMMENT 'worker地址',
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uniq_cluster_id_member_id` (`connect_cluster_id`,`member_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='worker信息表';
DROP TABLE IF EXISTS `ks_kc_worker_connector`;
CREATE TABLE `ks_kc_worker_connector` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`kafka_cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Kafka集群ID',
`connect_cluster_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT 'Connect集群ID',
`connector_name` varchar(512) NOT NULL DEFAULT '' COMMENT 'Connector名称',
`worker_member_id` varchar(256) NOT NULL DEFAULT '',
`task_id` int(16) NOT NULL DEFAULT '-1' COMMENT 'Task的ID',
`state` varchar(128) DEFAULT NULL COMMENT '任务状态',
`worker_id` varchar(128) DEFAULT NULL COMMENT 'worker信息',
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uniq_relation` (`connect_cluster_id`,`connector_name`,`task_id`,`worker_member_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Worker和Connector关系表';
DROP TABLE IF EXISTS `ks_km_zookeeper`;
CREATE TABLE `ks_km_zookeeper` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '物理集群ID',
`host` varchar(128) NOT NULL DEFAULT '' COMMENT 'zookeeper主机名',
`port` int(16) NOT NULL DEFAULT '-1' COMMENT 'zookeeper端口',
`role` varchar(16) NOT NULL DEFAULT '' COMMENT '角色, leader follower observer',
`version` varchar(128) NOT NULL DEFAULT '' COMMENT 'zookeeper版本',
`status` int(16) NOT NULL DEFAULT '0' COMMENT '状态: 1存活0未存活11存活但是4字命令使用不了',
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uniq_cluster_phy_id_host_port` (`cluster_phy_id`,`host`, `port`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Zookeeper信息表';
DROP TABLE IF EXISTS `ks_km_group`;
CREATE TABLE `ks_km_group` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
`cluster_phy_id` bigint(20) NOT NULL DEFAULT '-1' COMMENT '集群id',
`name` varchar(192) COLLATE utf8_bin NOT NULL DEFAULT '' COMMENT 'Group名称',
`member_count` int(11) unsigned NOT NULL DEFAULT '0' COMMENT '成员数',
`topic_members` text CHARACTER SET utf8 COMMENT 'group消费的topic列表',
`partition_assignor` varchar(255) CHARACTER SET utf8 NOT NULL COMMENT '分配策略',
`coordinator_id` int(11) NOT NULL COMMENT 'group协调器brokerId',
`type` int(11) NOT NULL COMMENT 'group类型 0consumer 1connector',
`state` varchar(64) CHARACTER SET utf8 NOT NULL DEFAULT '' COMMENT '状态',
`create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
`update_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '修改时间',
PRIMARY KEY (`id`),
UNIQUE KEY `uniq_cluster_phy_id_name` (`cluster_phy_id`,`name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='Group信息表';
DROP TABLE IF EXISTS `ks_km_broker`;
CREATE TABLE `ks_km_broker` (
`id` bigint(20) unsigned NOT NULL AUTO_INCREMENT COMMENT 'id',
@@ -255,6 +359,7 @@ CREATE TABLE `ks_km_physical_cluster` (
`kafka_version` varchar(32) NOT NULL DEFAULT '' COMMENT 'kafka版本',
`client_properties` text COMMENT 'Kafka客户端配置',
`jmx_properties` text COMMENT 'JMX配置',
`zk_properties` text COMMENT 'ZK配置',
`description` text COMMENT '备注',
`auth_type` int(11) NOT NULL DEFAULT '0' COMMENT '认证类型,-1未知0:无认证,',
`run_state` tinyint(4) NOT NULL DEFAULT '1' COMMENT '运行状态, 0表示未监控, 1监控中有ZK2:监控中无ZK',
@@ -352,7 +457,6 @@ CREATE TABLE `ks_km_app_node` (
PRIMARY KEY (`id`),
KEY `idx_app_host` (`app_name`,`host_name`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='km集群部署的node信息';
-- Logi-Job模块的sql安装KS-KM需要执行该sql
@@ -677,13 +781,21 @@ CREATE TABLE `logi_security_config`
KEY `idx_group_name` (`value_group`,`value_name`)
) ENGINE=InnoDB AUTO_INCREMENT=1592 DEFAULT CHARSET=utf8 COMMENT='logi配置项';
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_CONNECTOR_FAILED_TASK_COUNT', '{\"value\" : 1}', 'connector失败状态的任务数量', 'admin');
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_CONNECTOR_UNASSIGNED_TASK_COUNT', '{\"value\" : 1}', 'connector未被分配的任务数量', 'admin');
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_CONNECT_CLUSTER_TASK_STARTUP_FAILURE_PERCENTAGE', '{\"value\" : 0.05}', 'Connect集群任务启动失败概率', 'admin');
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_CLUSTER_NO_CONTROLLER','{ \"value\": 1, \"weight\": 30 } ','集群Controller数正常','know-streaming');
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_BROKER_REQUEST_QUEUE_FULL','{ \"value\": 10, \"weight\": 20 } ','Broker-RequestQueueSize指标','know-streaming');
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_BROKER_NETWORK_PROCESSOR_AVG_IDLE_TOO_LOW','{ \"value\": 0.8, \"weight\": 20 } ','Broker-NetworkProcessorAvgIdlePercent指标','know-streaming');
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_GROUP_RE_BALANCE_TOO_FREQUENTLY','{\n \"latestMinutes\": 10,\n \"detectedTimes\": 8,\n \"weight\": 10\n}\n','Group的re-balance频率','know-streaming');
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_TOPIC_NO_LEADER','{ \"value\": 1, \"weight\": 10 } ','Topic 无Leader数','know-stream');
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`,`value_group`,`value_name`,`value`,`description`,`operator`) VALUES (-1,'HEALTH','HC_TOPIC_UNDER_REPLICA_TOO_LONG','{ \"latestMinutes\": 10, \"detectedTimes\": 8, \"weight\": 10 } ','Topic 未同步持续时间','know-streaming');
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_BRAIN_SPLIT', '{ \"value\": 1} ', 'ZK 脑裂', 'admin');
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_OUTSTANDING_REQUESTS', '{ \"amount\": 100, \"ratio\":0.8} ', 'ZK Outstanding 请求堆积数', 'admin');
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_WATCH_COUNT', '{ \"amount\": 100000, \"ratio\": 0.8 } ', 'ZK WatchCount 数', 'admin');
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_ALIVE_CONNECTIONS', '{ \"amount\": 10000, \"ratio\": 0.8 } ', 'ZK 连接数', 'admin');
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_APPROXIMATE_DATA_SIZE', '{ \"amount\": 524288000, \"ratio\": 0.8 } ', 'ZK 数据大小(Byte)', 'admin');
INSERT INTO `ks_km_platform_cluster_config` (`cluster_id`, `value_group`, `value_name`, `value`, `description`, `operator`) VALUES ('-1', 'HEALTH', 'HC_ZK_SENT_RATE', '{ \"amount\": 500000, \"ratio\": 0.8 } ', 'ZK 发包数', 'admin');
-- 初始化权限
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1593', '多集群管理', '0', '0', '1', '多集群管理', '0', 'know-streaming');
INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('1595', '系统管理', '0', '0', '1', '系统管理', '0', 'know-streaming');
@@ -734,6 +846,7 @@ INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `l
-- 初始化用户
-- INSERT INTO `logi_security_user` (`id`, `user_name`, `pw`, `real_name`, `is_delete`, `app_name`) VALUES ('1', 'admin', 'V1ZkU2RHRlhOSGxOUkVsNVdETjBRVlp0Y0V0T1IwWnlaVEZ6YWxGRVJrRkpNVEU1VTJwYVUySkhlRzlSU0RBOWUwQldha28wWVd0N1d5TkFNa0FqWFgxS05sSnNiR2hBZlE9PXtAVmpKNGFre1sjQDNAI119SjZSbGxoQH0=Mv{#cdRgJ45Lqx}3IubEW87!==', '系统管理员', '0', 'know-streaming');
INSERT INTO `logi_security_user` (`id`, `user_name`, `pw`, `real_name`, `is_delete`, `app_name`) VALUES ('1', 'admin', 'V1ZkU2RHRlhOVGRSUmxweFUycFNhR0V6ZEdKSk1FRjRVVU5PWkdaVmJ6SlZiWGh6WVVWQ09YdEFWbXBLTkdGcmUxc2pRREpBSTExOVNqWlNiR3hvUUgwPXtAVmpKNGFre1sjQDNAI119SjZSbGxoQH0=Mv{#cdRgJ45Lqx}3IubEW87!==', '系统管理员', '0', 'know-streaming');
-- 初始化角色
@@ -782,3 +895,4 @@ INSERT INTO `logi_security_config`
(`value_group`,`value_name`,`value`,`edit`,`status`,`memo`,`is_delete`,`app_name`,`operator`)
VALUES
('SECURITY.LOGIN','SECURITY.TRICK_USERS','[\n \"admin\"\n]',1,1,'允许跳过登录的用户',0,'know-streaming','admin');

View File

@@ -1,16 +1,16 @@
apiVersion: v2
name: knowstreaming-manager
description: knowstreaming-manager Helm chart
description: knowstreaming-manager Helm chart
type: application
version: 0.1.5
version: 0.1.9
maintainers:
- email: didicloud@didiglobal.com
name: didicloud
appVersion: "3.0.0-beta.3"
appVersion: "3.2.0"
dependencies:
- name: knowstreaming-web

View File

@@ -175,6 +175,7 @@ antiAffinityTopologyKey: "kubernetes.io/hostname"
# and that they will never end up on the same node. Setting this to soft will do this "best effort"
antiAffinity: ""
#antiAffinity: "hard"
# This is the node affinity settings as defined in
# https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
nodeAffinity: {}

View File

@@ -8,7 +8,7 @@ image:
repository: knowstreaming/knowstreaming-ui
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "latest"
tag: "0.1.0"
imagePullSecrets: []
nameOverride: ""

View File

@@ -21,7 +21,7 @@ spec:
{{- include "ksmysql.selectorLabels" . | nindent 8 }}
spec:
containers:
- image: knowstreaming/knowstreaming-mysql:latest
- image: knowstreaming/knowstreaming-mysql:0.7.0
name: {{ .Chart.Name }}
env:
- name: MYSQL_DATABASE

View File

@@ -41,6 +41,7 @@ data:
idle-timeout: 30000
connection-test-query: SELECT 1
logi-job:
enable: true
{{ if .Values.ksmysql.enabled }}
jdbc-url: jdbc:mariadb://{{ .Values.ksmysql.service.name }}:{{ .Values.ksmysql.service.port }}/{{ .Values.ksmysql.mysql.dbname }}?useUnicode=true&characterEncoding=utf8&jdbcCompliantTruncation=true&allowMultiQueries=true&useSSL=false&alwaysAutoGeneratedKeys=true&serverTimezone=GMT%2B8&allowPublicKeyRetrieval=true
username: {{ .Values.ksmysql.mysql.username }}
@@ -77,6 +78,10 @@ data:
config: classpath:logback-spring.xml
thread-pool:
es:
search: # es查询线程池
thread-num: 20 # 线程池大小
queue-size: 10000 # 队列大小
scheduled:
thread-num: 2 # @Scheduled任务的线程池大小默认是一个
collector: # 采集模块的配置
@@ -104,18 +109,25 @@ data:
max-idle-client-num: 20 # 最大空闲客户端数
max-total-client-num: 20 # 最大客户端数
borrow-timeout-unit-ms: 5000 # 租借超时时间,单位秒
kafka-admin:
client-cnt: 1 # 每个Kafka集群创建的KafkaAdminClient数
es:
index:
expire: 15 # 索引过期天数15表示超过15天的索引会被KS过期删除
client:
{{ if .Values.elasticsearch.enabled }}
address: elasticsearch-master:9200
{{- else }}
address: {{ .Values.elasticsearch.esClientAddress }}:{{ .Values.elasticsearch.esProt }}
pass: {{ .Values.elasticsearch.userPass }}
{{- end }}
client-cnt: 10
io-thread-cnt: 2
max-retry-cnt: 5
# 普罗米修斯指标导出相关配置
management:
endpoints:
@@ -151,6 +163,7 @@ data:
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_group_metric -d '{"order":10,"index_patterns":["ks_kafka_group_metric*"],"settings":{"index":{"number_of_shards":"10"}},"mappings":{"properties":{"group":{"type":"keyword"},"partitionId":{"type":"long"},"routingValue":{"type":"text","fields":{"keyword":{"ignore_above":256,"type":"keyword"}}},"clusterPhyId":{"type":"long"},"topic":{"type":"keyword"},"metrics":{"properties":{"HealthScore":{"type":"float"},"Lag":{"type":"float"},"OffsetConsumed":{"type":"float"},"HealthCheckTotal":{"type":"float"},"HealthCheckPassed":{"type":"float"}}},"groupMetric":{"type":"keyword"},"key":{"type":"text","fields":{"keyword":{"ignore_above":256,"type":"keyword"}}},"timestamp":{"format":"yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis","index":true,"type":"date","doc_values":true}}},"aliases":{}}' && \
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_partition_metric -d '{"order":10,"index_patterns":["ks_kafka_partition_metric*"],"settings":{"index":{"number_of_shards":"10"}},"mappings":{"properties":{"brokerId":{"type":"long"},"partitionId":{"type":"long"},"routingValue":{"type":"text","fields":{"keyword":{"ignore_above":256,"type":"keyword"}}},"clusterPhyId":{"type":"long"},"topic":{"type":"keyword"},"metrics":{"properties":{"LogStartOffset":{"type":"float"},"Messages":{"type":"float"},"LogEndOffset":{"type":"float"}}},"key":{"type":"text","fields":{"keyword":{"ignore_above":256,"type":"keyword"}}},"timestamp":{"format":"yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis","index":true,"type":"date","doc_values":true}}},"aliases":{}}' && \
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_replication_metric -d '{"order":10,"index_patterns":["ks_kafka_replication_metric*"],"settings":{"index":{"number_of_shards":"10"}},"mappings":{"properties":{"timestamp":{"format":"yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis","index":true,"type":"date","doc_values":true}}},"aliases":{}}' && \
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_zookeeper_metric -d '{"order":10,"index_patterns":["ks_kafka_zookeeper_metric*"],"settings":{"index":{"number_of_shards":"10"}},"mappings":{"properties":{"routingValue":{"type":"text","fields":{"keyword":{"ignore_above":256,"type":"keyword"}}},"clusterPhyId":{"type":"long"},"metrics":{"properties":{"AvgRequestLatency":{"type":"double"},"MinRequestLatency":{"type":"double"},"MaxRequestLatency":{"type":"double"},"OutstandingRequests":{"type":"double"},"NodeCount":{"type":"double"},"WatchCount":{"type":"double"},"NumAliveConnections":{"type":"double"},"PacketsReceived":{"type":"double"},"PacketsSent":{"type":"double"},"EphemeralsCount":{"type":"double"},"ApproximateDataSize":{"type":"double"},"OpenFileDescriptorCount":{"type":"double"},"MaxFileDescriptorCount":{"type":"double"}}},"key":{"type":"text","fields":{"keyword":{"ignore_above":256,"type":"keyword"}}},"timestamp":{"format":"yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis","type":"date"}}},"aliases":{}}' && \
curl -s -o /dev/null -X POST -H 'cache-control: no-cache' -H 'content-type: application/json' http://${esaddr}:${port}/_template/ks_kafka_topic_metric -d '{"order":10,"index_patterns":["ks_kafka_topic_metric*"],"settings":{"index":{"number_of_shards":"10"}},"mappings":{"properties":{"brokerId":{"type":"long"},"routingValue":{"type":"text","fields":{"keyword":{"ignore_above":256,"type":"keyword"}}},"topic":{"type":"keyword"},"clusterPhyId":{"type":"long"},"metrics":{"properties":{"BytesIn_min_15":{"type":"float"},"Messages":{"type":"float"},"BytesRejected":{"type":"float"},"PartitionURP":{"type":"float"},"HealthCheckTotal":{"type":"float"},"ReplicationCount":{"type":"float"},"ReplicationBytesOut":{"type":"float"},"ReplicationBytesIn":{"type":"float"},"FailedFetchRequests":{"type":"float"},"BytesIn_min_5":{"type":"float"},"HealthScore":{"type":"float"},"LogSize":{"type":"float"},"BytesOut":{"type":"float"},"BytesOut_min_15":{"type":"float"},"FailedProduceRequests":{"type":"float"},"BytesIn":{"type":"float"},"BytesOut_min_5":{"type":"float"},"MessagesIn":{"type":"float"},"TotalProduceRequests":{"type":"float"},"HealthCheckPassed":{"type":"float"}}},"brokerAgg":{"type":"keyword"},"key":{"type":"text","fields":{"keyword":{"ignore_above":256,"type":"keyword"}}},"timestamp":{"format":"yyyy-MM-dd HH:mm:ss Z||yyyy-MM-dd HH:mm:ss||yyyy-MM-dd HH:mm:ss.SSS Z||yyyy-MM-dd HH:mm:ss.SSS||yyyy-MM-dd HH:mm:ss,SSS||yyyy/MM/dd HH:mm:ss||yyyy-MM-dd HH:mm:ss,SSS Z||yyyy/MM/dd HH:mm:ss,SSS Z||epoch_millis","index":true,"type":"date","doc_values":true}}},"aliases":{}}' || \
exit 1
for i in {0..6};do
@@ -160,6 +173,8 @@ data:
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_group_metric${logdate} && \
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_partition_metric${logdate} && \
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_replication_metric${logdate} && \
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_zookeeper_metric${logdate} && \
curl -s -o /dev/null -X PUT http://${esaddr}:${port}/ks_kafka_topic_metric${logdate} || \
exit 2
done

View File

@@ -33,6 +33,8 @@ spec:
- name: init-config
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
command: ['/bin/bash', '/conf/init_es_index.sh']
resources:
{{- toYaml .Values.resources | nindent 10 }}
volumeMounts:
- name: configmap
mountPath: /conf

View File

@@ -3,7 +3,7 @@ replicaCount: 2
image:
repository: knowstreaming/knowstreaming-manager
pullPolicy: IfNotPresent
tag: "latest"
tag: "0.7.0"
imagePullSecrets: []
nameOverride: ""
@@ -73,7 +73,7 @@ knowstreaming-web:
image:
repository: knowstreaming/knowstreaming-ui
pullPolicy: IfNotPresent
tag: "latest"
tag: "0.7.0"
service:
type: NodePort
@@ -93,6 +93,7 @@ elasticsearch:
#------------------------------------------------------------------
esClientAddress: 10.96.64.13
esProt: 8061
userPass: "username:password" #ES账号密码如果有账号密码按照username:password 的格式填写,没有则不需要填写
#------------------------------------------------------------------
replicas: 3
minimumMasterNodes: 2