diff --git a/distribution/conf/application.yml.example b/distribution/conf/application.yml.example index ee0290d8..f777ce31 100644 --- a/distribution/conf/application.yml.example +++ b/distribution/conf/application.yml.example @@ -85,6 +85,9 @@ client-pool: borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒 account: + jump-login: + gateway-api: false # 网关接口 + third-part-api: false # 第三方接口 ldap: enabled: false url: ldap://127.0.0.1:389/ @@ -98,19 +101,20 @@ account: auth-user-registration: true auth-user-registration-role: normal -kcm: - enabled: false - s3: +kcm: # 集群安装部署,仅安装broker + enabled: false # 是否开启 + s3: # s3 存储服务 endpoint: s3.didiyunapi.com access-key: 1234567890 secret-key: 0987654321 bucket: logi-kafka - n9e: - base-url: http://127.0.0.1:8004 - user-token: 12345678 - timeout: 300 - account: root - script-file: kcm_script.sh + n9e: # 夜莺 + base-url: http://127.0.0.1:8004 # 夜莺job服务地址 + user-token: 12345678 # 用户的token + timeout: 300 # 当台操作的超时时间 + account: root # 操作时使用的账号 + script-file: kcm_script.sh # 脚本,已内置好,在源码的kcm模块内,此处配置无需修改 + logikm-url: http://127.0.0.1:8080 # logikm部署地址,部署时kcm_script.sh会调用logikm检查部署中的一些状态 monitor: enabled: false diff --git a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ApiPrefix.java b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ApiPrefix.java index b0f84405..5422076c 100644 --- a/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ApiPrefix.java +++ b/kafka-manager-common/src/main/java/com/xiaojukeji/kafka/manager/common/constant/ApiPrefix.java @@ -20,12 +20,6 @@ public class ApiPrefix { // open public static final String API_V1_THIRD_PART_PREFIX = API_V1_PREFIX + "third-part/"; - // 开放给OP的接口, 后续对 应的接口的集群都需要是物理集群 - public static final String API_V1_THIRD_PART_OP_PREFIX = API_V1_THIRD_PART_PREFIX + "op/"; - - // 开放给Normal的接口, 后续对应的接口的集群,都需要是逻辑集群 - public static final String API_V1_THIRD_PART_NORMAL_PREFIX = API_V1_THIRD_PART_PREFIX + "normal/"; - // gateway public static final String GATEWAY_API_V1_PREFIX = "/gateway" + API_V1_PREFIX; diff --git a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/LoginServiceImpl.java b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/LoginServiceImpl.java index f49f7dca..f0299d87 100644 --- a/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/LoginServiceImpl.java +++ b/kafka-manager-extends/kafka-manager-account/src/main/java/com/xiaojukeji/kafka/manager/account/impl/LoginServiceImpl.java @@ -14,6 +14,7 @@ import com.xiaojukeji.kafka.manager.common.utils.ValidateUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Service; import javax.servlet.http.Cookie; @@ -27,7 +28,13 @@ import javax.servlet.http.HttpSession; */ @Service("loginService") public class LoginServiceImpl implements LoginService { - private final static Logger LOGGER = LoggerFactory.getLogger(LoginServiceImpl.class); + private static final Logger LOGGER = LoggerFactory.getLogger(LoginServiceImpl.class); + + @Value(value = "${account.jump-login.gateway-api:false}") + private Boolean jumpLoginGatewayApi; + + @Value(value = "${account.jump-login.third-part-api:false}") + private Boolean jumpLoginThirdPartApi; @Autowired private AccountService accountService; @@ -75,8 +82,10 @@ public class LoginServiceImpl implements LoginService { return false; } - if (classRequestMappingValue.equals(ApiPrefix.API_V1_SSO_PREFIX)) { - // 白名单接口直接true + if (classRequestMappingValue.equals(ApiPrefix.API_V1_SSO_PREFIX) || + (jumpLoginGatewayApi != null && jumpLoginGatewayApi && classRequestMappingValue.equals(ApiPrefix.GATEWAY_API_V1_PREFIX)) || + (jumpLoginThirdPartApi != null && jumpLoginThirdPartApi && classRequestMappingValue.equals(ApiPrefix.API_V1_THIRD_PART_PREFIX))) { + // 登录接口 or 允许跳过且是跳过类型的接口,则直接跳过登录 return true; } diff --git a/kafka-manager-extends/kafka-manager-kcm/src/main/java/com/xiaojukeji/kafka/manager/kcm/component/agent/n9e/N9e.java b/kafka-manager-extends/kafka-manager-kcm/src/main/java/com/xiaojukeji/kafka/manager/kcm/component/agent/n9e/N9e.java index 6e3fa677..d0a2503b 100644 --- a/kafka-manager-extends/kafka-manager-kcm/src/main/java/com/xiaojukeji/kafka/manager/kcm/component/agent/n9e/N9e.java +++ b/kafka-manager-extends/kafka-manager-kcm/src/main/java/com/xiaojukeji/kafka/manager/kcm/component/agent/n9e/N9e.java @@ -37,21 +37,24 @@ import java.util.Map; public class N9e extends AbstractAgent { private static final Logger LOGGER = LoggerFactory.getLogger(N9e.class); - @Value("${kcm.n9e.base-url}") + @Value("${kcm.n9e.base-url:}") private String baseUrl; - @Value("${kcm.n9e.user-token}") + @Value("${kcm.n9e.user-token:12345678}") private String userToken; - @Value("${kcm.n9e.account}") + @Value("${kcm.n9e.account:root}") private String account; - @Value("${kcm.n9e.timeout}") + @Value("${kcm.n9e.timeout:300}") private Integer timeout; - @Value("${kcm.n9e.script-file}") + @Value("${kcm.n9e.script-file:kcm_script.sh}") private String scriptFile; + @Value("${kcm.n9e.logikm-url:}") + private String logiKMUrl; + private String script; private static final String CREATE_TASK_URI = "/api/job-ce/tasks"; @@ -219,7 +222,8 @@ public class N9e extends AbstractAgent { sb.append(creationTaskData.getKafkaPackageUrl()).append(",,"); sb.append(creationTaskData.getServerPropertiesName().replace(KafkaFileEnum.SERVER_CONFIG.getSuffix(), "")).append(",,"); sb.append(creationTaskData.getServerPropertiesMd5()).append(",,"); - sb.append(creationTaskData.getServerPropertiesUrl()); + sb.append(creationTaskData.getServerPropertiesUrl()).append(",,"); + sb.append(this.logiKMUrl); N9eCreationTask n9eCreationTask = new N9eCreationTask(); n9eCreationTask.setTitle(Constant.TASK_TITLE_PREFIX + "-集群ID:" + creationTaskData.getClusterId()); diff --git a/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh b/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh index ffd54a20..16ffb80c 100644 --- a/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh +++ b/kafka-manager-extends/kafka-manager-kcm/src/main/resources/kcm_script.sh @@ -18,12 +18,13 @@ p_kafka_server_properties_name=${7} #server配置名 p_kafka_server_properties_md5=${8} #server配置MD5 p_kafka_server_properties_url=${9} #server配置文件下载地址 +p_kafka_manager_url=${10} #LogiKM地址 + #----------------------------------------配置信息------------------------------------------------------# g_base_dir='/home' g_cluster_task_dir=${g_base_dir}"/kafka_cluster_task/task_${p_task_id}" #部署升级路径 g_rollback_version=${g_cluster_task_dir}"/rollback_version" #回滚版本 g_new_kafka_package_name='' #最终的包名 -g_kafka_manager_addr='' #kafka-manager地址 g_local_ip=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:"` g_hostname=${g_local_ip} @@ -47,7 +48,7 @@ function dchat_alarm() { # 检查并初始化环境 function check_and_init_env() { - if [ -z "${p_task_id}" -o -z "${p_cluster_task_type}" -o -z "${p_kafka_package_url}" -o -z "${p_cluster_id}" -o -z "${p_kafka_package_name}" -o -z "${p_kafka_package_md5}" -o -z "${p_kafka_server_properties_name}" -o -z "${p_kafka_server_properties_md5}" ]; then + if [ -z "${p_task_id}" -o -z "${p_cluster_task_type}" -o -z "${p_kafka_package_url}" -o -z "${p_cluster_id}" -o -z "${p_kafka_package_name}" -o -z "${p_kafka_package_md5}" -o -z "${p_kafka_server_properties_name}" -o -z "${p_kafka_server_properties_md5}" -o -z "${p_kafka_manager_url}" ]; then ECHO_LOG "存在为空的参数不合法, 退出集群任务" dchat_alarm "存在为空的参数不合法, 退出集群任务" exit 1 @@ -72,11 +73,11 @@ function check_and_init_env() { # 检查并等待集群所有的副本处于同步的状态 function check_and_wait_broker_stabled() { - under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${g_kafka_manager_addr}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l` + under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${p_kafka_manager_url}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l` while [ "$under_replication_count" -ne 1 ]; do ECHO_LOG "存在${under_replication_count}个副本未同步, sleep 10s" sleep 10 - under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${g_kafka_manager_addr}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l` + under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${p_kafka_manager_url}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l` done ECHO_LOG "集群副本都已经处于同步的状态, 可以进行集群升级" } @@ -324,6 +325,7 @@ ECHO_LOG " p_kafka_package_name=${p_kafka_package_name}" ECHO_LOG " p_kafka_package_md5=${p_kafka_package_md5}" ECHO_LOG " p_kafka_server_properties_name=${p_kafka_server_properties_name}" ECHO_LOG " p_kafka_server_properties_md5=${p_kafka_server_properties_md5}" +ECHO_LOG " p_kafka_manager_url=${p_kafka_manager_url}" @@ -342,7 +344,7 @@ fi ECHO_LOG "停kafka服务" stop_kafka_server -ECHO_LOG "停5秒, 确保" +ECHO_LOG "再停5秒, 确保端口已释放" sleep 5 if [ "${p_cluster_task_type}" == "0" ];then diff --git a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/api/versionone/thirdpart/ThirdPartBrokerController.java b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/api/versionone/thirdpart/ThirdPartBrokerController.java index 790b85be..8469afec 100644 --- a/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/api/versionone/thirdpart/ThirdPartBrokerController.java +++ b/kafka-manager-web/src/main/java/com/xiaojukeji/kafka/manager/web/api/versionone/thirdpart/ThirdPartBrokerController.java @@ -32,7 +32,7 @@ import java.util.stream.Collectors; */ @Api(tags = "开放接口-Broker相关接口(REST)") @RestController -@RequestMapping(ApiPrefix.API_V1_THIRD_PART_OP_PREFIX) +@RequestMapping(ApiPrefix.API_V1_THIRD_PART_PREFIX) public class ThirdPartBrokerController { @Autowired private BrokerService brokerService; @@ -44,7 +44,7 @@ public class ThirdPartBrokerController { private ClusterService clusterService; @ApiOperation(value = "Broker信息概览", notes = "") - @RequestMapping(value = "{clusterId}/brokers/{brokerId}/overview", method = RequestMethod.GET) + @GetMapping(value = "{clusterId}/brokers/{brokerId}/overview") @ResponseBody public Result getBrokerOverview(@PathVariable Long clusterId, @PathVariable Integer brokerId) { @@ -70,7 +70,7 @@ public class ThirdPartBrokerController { } @ApiOperation(value = "BrokerRegion信息", notes = "所有集群的") - @RequestMapping(value = "broker-regions", method = RequestMethod.GET) + @GetMapping(value = "broker-regions") @ResponseBody public Result> getBrokerRegions() { List clusterDOList = clusterService.list(); diff --git a/kafka-manager-web/src/main/resources/application.yml b/kafka-manager-web/src/main/resources/application.yml index 4f83afb5..9cd51d46 100644 --- a/kafka-manager-web/src/main/resources/application.yml +++ b/kafka-manager-web/src/main/resources/application.yml @@ -79,6 +79,9 @@ client-pool: borrow-timeout-unit-ms: 3000 # 租借超时时间,单位毫秒 account: + jump-login: + gateway-api: false # 网关接口 + third-part-api: false # 第三方接口 ldap: enabled: false url: ldap://127.0.0.1:389/ @@ -92,19 +95,20 @@ account: auth-user-registration: true auth-user-registration-role: normal -kcm: - enabled: false - s3: +kcm: # 集群安装部署,仅安装broker + enabled: false # 是否开启 + s3: # s3 存储服务 endpoint: s3.didiyunapi.com access-key: 1234567890 secret-key: 0987654321 bucket: logi-kafka - n9e: - base-url: http://127.0.0.1:8004 - user-token: 12345678 - timeout: 300 - account: root - script-file: kcm_script.sh + n9e: # 夜莺 + base-url: http://127.0.0.1:8004 # 夜莺job服务地址 + user-token: 12345678 # 用户的token + timeout: 300 # 当台操作的超时时间 + account: root # 操作时使用的账号 + script-file: kcm_script.sh # 脚本,已内置好,在源码的kcm模块内,此处配置无需修改 + logikm-url: http://127.0.0.1:8080 # logikm部署地址,部署时kcm_script.sh会调用logikm检查部署中的一些状态 monitor: enabled: false