v2.1版本更新

This commit is contained in:
zengqiao
2020-12-19 00:27:16 +08:00
parent 3fea5c9c8c
commit 49280a8617
75 changed files with 1098 additions and 148 deletions

View File

@@ -94,6 +94,7 @@ public class N9e extends AbstractAgent {
);
N9eResult zr = JSON.parseObject(response, N9eResult.class);
if (!ValidateUtils.isBlank(zr.getErr())) {
LOGGER.warn("class=N9e||method=createTask||param={}||errMsg={}||msg=call create task fail", JsonUtils.toJSONString(param),zr.getErr());
return null;
}
return Long.valueOf(zr.getDat().toString());
@@ -110,7 +111,7 @@ public class N9e extends AbstractAgent {
String response = null;
try {
response = HttpUtils.postForString(
response = HttpUtils.putForString(
baseUrl + ACTION_TASK_URI.replace("{taskId}", taskId.toString()),
JSON.toJSONString(param),
buildHeader()
@@ -119,6 +120,7 @@ public class N9e extends AbstractAgent {
if (ValidateUtils.isBlank(zr.getErr())) {
return true;
}
LOGGER.warn("class=N9e||method=actionTask||param={}||errMsg={}||msg=call action task fail", JSON.toJSONString(param),zr.getErr());
return false;
} catch (Exception e) {
LOGGER.error("action task failed, taskId:{}, action:{}.", taskId, action, e);
@@ -134,7 +136,7 @@ public class N9e extends AbstractAgent {
String response = null;
try {
response = HttpUtils.postForString(
response = HttpUtils.putForString(
baseUrl + ACTION_HOST_TASK_URI.replace("{taskId}", taskId.toString()),
JSON.toJSONString(param),
buildHeader()
@@ -143,6 +145,7 @@ public class N9e extends AbstractAgent {
if (ValidateUtils.isBlank(zr.getErr())) {
return true;
}
LOGGER.warn("class=N9e||method=actionHostTask||param={}||errMsg={}||msg=call action host task fail", JSON.toJSONString(param),zr.getErr());
return false;
} catch (Exception e) {
LOGGER.error("action task failed, taskId:{} action:{} hostname:{}.", taskId, action, hostname, e);
@@ -265,6 +268,7 @@ public class N9e extends AbstractAgent {
while ((line = bufferedReader.readLine()) != null) {
stringBuilder.append(line);
stringBuilder.append("\n");
}
return stringBuilder.toString();
} catch (IOException e) {

View File

@@ -2,6 +2,7 @@ package com.xiaojukeji.kafka.manager.kcm.component.storage.local;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import com.xiaojukeji.kafka.manager.kcm.component.storage.AbstractStorageService;
import org.springframework.web.multipart.MultipartFile;
@@ -12,6 +13,9 @@ import org.springframework.web.multipart.MultipartFile;
*/
@Service("storageService")
public class Local extends AbstractStorageService {
@Value("${kcm.storage.base-url}")
private String baseUrl;
@Override
public boolean upload(String fileName, String fileMd5, MultipartFile uploadFile) {
return false;
@@ -24,6 +28,6 @@ public class Local extends AbstractStorageService {
@Override
public String getDownloadBaseUrl() {
return "";
return baseUrl;
}
}

View File

@@ -56,6 +56,7 @@ public class KafkaFileServiceImpl implements KafkaFileService {
}
return ResultStatus.SUCCESS;
} catch (DuplicateKeyException e) {
LOGGER.error("class=KafkaFileServiceImpl||method=uploadKafkaFile||errMsg={}||kafkaFileDTO={}||username={}", e.getMessage(), kafkaFileDTO, username, e);
return ResultStatus.RESOURCE_ALREADY_EXISTED;
} catch (Exception e) {
LOGGER.error("upload kafka file failed, kafkaFileDTO:{}.", kafkaFileDTO, e);
@@ -93,6 +94,7 @@ public class KafkaFileServiceImpl implements KafkaFileService {
return ResultStatus.MYSQL_ERROR;
}
} catch (DuplicateKeyException e) {
LOGGER.error("class=KafkaFileServiceImpl||method=modifyKafkaFile||errMsg={}||kafkaFileDTO={}||userName={}", e.getMessage(), kafkaFileDTO, userName, e);
return ResultStatus.RESOURCE_NAME_DUPLICATED;
} catch (Exception e) {
LOGGER.error("modify kafka file failed, kafkaFileDTO:{}.", kafkaFileDTO, e);

View File

@@ -24,7 +24,7 @@ public class ClusterHostTaskService extends AbstractClusterTaskService {
CreationTaskData dto = new CreationTaskData();
for (String hostname: clusterHostTaskDTO.getHostList()) {
if (!NetUtils.hostnameLegal(hostname)) {
return Result.buildFrom(ResultStatus.PARAM_ILLEGAL);
return Result.buildFrom(ResultStatus.CLUSTER_TASK_HOST_LIST_ILLEGAL);
}
}
dto.setHostList(clusterHostTaskDTO.getHostList());

View File

@@ -19,12 +19,13 @@ p_kafka_server_properties_md5=${8} #server配置MD5
p_kafka_server_properties_url=${9} #server配置文件下载地址
#----------------------------------------配置信息------------------------------------------------------#
g_hostname=`hostname`
g_base_dir='/home/km'
g_base_dir='/home'
g_cluster_task_dir=${g_base_dir}"/kafka_cluster_task/task_${p_task_id}" #部署升级路径
g_rollback_version=${g_cluster_task_dir}"/rollback_version" #回滚版本
g_new_kafka_package_name='' #最终的包名
g_kafka_manager_addr='' #kafka-manager地址
g_local_ip=`ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:"`
g_hostname=${g_local_ip}
#----------------------------------------操作函数------------------------------------------------------#
@@ -71,11 +72,11 @@ function check_and_init_env() {
# 检查并等待集群所有的副本处于同步的状态
function check_and_wait_broker_stabled() {
under_replication_count=`curl -s -G -d "hostname="#{g_hostname} ${g_kafka_manager_addr}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l`
under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${g_kafka_manager_addr}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l`
while [ "$under_replication_count" -ne 1 ]; do
ECHO_LOG "存在${under_replication_count}个副本未同步, sleep 10s"
sleep 10
under_replication_count=`curl -s ${g_kafka_manager_addr}/api/v1/${p_cluster_id}/overview | python -m json.tool | grep false |wc -l`
under_replication_count=`curl -s -G -d "hostname="${g_hostname} ${g_kafka_manager_addr}/api/v1/third-part/${p_cluster_id}/broker-stabled | python -m json.tool | grep true |wc -l`
done
ECHO_LOG "集群副本都已经处于同步的状态, 可以进行集群升级"
}
@@ -137,6 +138,9 @@ function prepare_cluster_task_files() {
exit 1
fi
# listeners配置换成当前机器的IP写到server.properties最后一行
echo "listeners=SASL_PLAINTEXT://${g_local_ip}:9093,PLAINTEXT://${g_local_ip}:9092" >> "${g_cluster_task_dir}/${p_kafka_package_name}/config/server.properties"
# 将MD5信息写到包中
echo "package_md5:${p_kafka_package_md5} server_properties_md5:${p_kafka_package_md5}" > "${g_cluster_task_dir}/${p_kafka_package_name}/package_and_properties.md5"
}