This commit is contained in:
zengqiao
2020-03-19 17:59:34 +08:00
commit 229140f067
407 changed files with 46207 additions and 0 deletions

47
dao/pom.xml Normal file
View File

@@ -0,0 +1,47 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>kafka-manager-dao</artifactId>
<version>1.0.0-SNAPSHOT</version>
<packaging>jar</packaging>
<parent>
<artifactId>kafka-manager</artifactId>
<groupId>com.xiaojukeji.kafka</groupId>
<version>1.0.0-SNAPSHOT</version>
</parent>
<properties>
<!-- maven properties -->
<maven.test.skip>true</maven.test.skip>
<downloadSources>true</downloadSources>
<!-- compiler settings properties -->
<java_source_version>1.8</java_source_version>
<java_target_version>1.8</java_target_version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<file_encoding>UTF-8</file_encoding>
</properties>
<dependencies>
<dependency>
<groupId>com.xiaojukeji.kafka</groupId>
<artifactId>kafka-manager-common</artifactId>
<version>${project.parent.version}</version>
</dependency>
<dependency>
<groupId>org.mybatis.spring.boot</groupId>
<artifactId>mybatis-spring-boot-starter</artifactId>
<version>1.3.2</version>
</dependency>
<dependency>
<groupId>org.mariadb.jdbc</groupId>
<artifactId>mariadb-java-client</artifactId>
<version>2.5.4</version>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,21 @@
package com.xiaojukeji.kafka.manager.dao;
import com.xiaojukeji.kafka.manager.common.entity.po.AccountDO;
import java.util.List;
/**
* @author zengqiao
* @date 19/5/3
*/
public interface AccountDao {
int addNewAccount(AccountDO userDO);
int deleteByName(String username);
int updateAccount(AccountDO userDO);
List<AccountDO> list();
AccountDO getByName(String username);
}

View File

@@ -0,0 +1,17 @@
package com.xiaojukeji.kafka.manager.dao;
import com.xiaojukeji.kafka.manager.common.entity.po.AlarmRuleDO;
import java.util.List;
public interface AlarmRuleDao {
int insert(AlarmRuleDO alarmRuleDO);
int deleteById(Long id);
int updateById(AlarmRuleDO alarmRuleDO);
AlarmRuleDO getById(Long id);
List<AlarmRuleDO> listAll();
}

View File

@@ -0,0 +1,17 @@
package com.xiaojukeji.kafka.manager.dao;
import com.xiaojukeji.kafka.manager.common.entity.po.BrokerDO;
import java.util.List;
/**
* @author zengqiao
* @date 19/4/21
*/
public interface BrokerDao {
int replace(BrokerDO brokerInfoDO);
int deleteById(Long clusterId, Integer brokerId);
List<BrokerDO> getDead(Long clusterId);
}

View File

@@ -0,0 +1,25 @@
package com.xiaojukeji.kafka.manager.dao;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
import java.util.Date;
import java.util.List;
/**
* @author tukun
* @date 2015/11/6.
*/
public interface BrokerMetricsDao {
/**
* 批量插入数据
*/
int batchAdd(List<BrokerMetrics> brokerMetricsList);
/**
* 根据时间区间获取Broker监控数据
*/
List<BrokerMetrics> getBrokerMetricsByInterval(Long clusterId, Integer brokerId, Date startTime, Date endTime);
int deleteBeforeTime(Date endTime);
}

View File

@@ -0,0 +1,17 @@
package com.xiaojukeji.kafka.manager.dao;
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
import java.util.List;
public interface ClusterDao {
int insert(ClusterDO clusterDO);
int deleteById(Long id);
int updateById(ClusterDO clusterDO);
ClusterDO getById(Long id);
List<ClusterDO> listAll();
}

View File

@@ -0,0 +1,14 @@
package com.xiaojukeji.kafka.manager.dao;
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterMetricsDO;
import java.util.Date;
import java.util.List;
public interface ClusterMetricsDao {
int batchAdd(List<ClusterMetricsDO> clusterMetricsList);
List<ClusterMetricsDO> getClusterMetricsByInterval(long clusterId, Date startTime, Date endTime);
int deleteBeforeTime(Date endTime);
}

View File

@@ -0,0 +1,11 @@
package com.xiaojukeji.kafka.manager.dao;
import com.xiaojukeji.kafka.manager.common.entity.po.ControllerDO;
import java.util.List;
public interface ControllerDao {
int insert(ControllerDO controllerDO);
List<ControllerDO> getByClusterId(Long clusterId);
}

View File

@@ -0,0 +1,38 @@
package com.xiaojukeji.kafka.manager.dao;
import com.xiaojukeji.kafka.manager.common.entity.po.MigrationTaskDO;
import java.util.List;
/**
* migrate topic task dao
* @author zengqiao_cn@163.com
* @date 19/4/16
*/
public interface MigrationTaskDao {
/**
* 增加一个迁移任务
*/
int addMigrationTask(MigrationTaskDO migrationTaskDO);
/**
* 查询迁移任务
*/
MigrationTaskDO getById(Long id);
/**
* 查询所有的迁移任务
*/
List<MigrationTaskDO> listAll();
/**
* 查询所有的迁移任务
*/
List<MigrationTaskDO> getByStatus(Integer status);
/**
* 修改任务
*/
int updateById(Long id, Integer status, Long throttle);
}

View File

@@ -0,0 +1,11 @@
package com.xiaojukeji.kafka.manager.dao;
import com.xiaojukeji.kafka.manager.common.entity.po.OperationHistoryDO;
/**
* @author arthur
* @date 2017/7/20.
*/
public interface OperationHistoryDao {
int insert(OperationHistoryDO operationHistoryDO);
}

View File

@@ -0,0 +1,17 @@
package com.xiaojukeji.kafka.manager.dao;
import com.xiaojukeji.kafka.manager.common.entity.po.OrderPartitionDO;
import java.util.List;
public interface OrderPartitionDao {
int insert(OrderPartitionDO orderPartitionDO);
int deleteById(Long id);
int updateById(OrderPartitionDO orderPartitionDO);
OrderPartitionDO getById(Long id);
List<OrderPartitionDO> list();
}

View File

@@ -0,0 +1,19 @@
package com.xiaojukeji.kafka.manager.dao;
import com.xiaojukeji.kafka.manager.common.entity.po.OrderTopicDO;
import java.util.List;
public interface OrderTopicDao {
int insert(OrderTopicDO orderTopicDO);
int deleteById(Long id);
int updateById(OrderTopicDO orderTopicDO);
OrderTopicDO getById(Long id);
List<OrderTopicDO> list();
List<OrderTopicDO> getByUsername(String username);
}

View File

@@ -0,0 +1,19 @@
package com.xiaojukeji.kafka.manager.dao;
import com.xiaojukeji.kafka.manager.common.entity.po.RegionDO;
import java.util.List;
public interface RegionDao {
int insert(RegionDO regionDO);
int deleteById(Long id);
int updateById(RegionDO regionDO);
RegionDO getById(Long id);
List<RegionDO> getByClusterId(Long clusterId);
List<RegionDO> listAll();
}

View File

@@ -0,0 +1,19 @@
package com.xiaojukeji.kafka.manager.dao;
import com.xiaojukeji.kafka.manager.common.entity.po.TopicDO;
import java.util.List;
public interface TopicDao {
int replace(TopicDO topicDO);
int deleteById(Long id);
int deleteByName(Long clusterId, String topicName);
TopicDO getByTopicName(Long clusterId, String topicName);
List<TopicDO> getByClusterId(Long clusterId);
List<TopicDO> list();
}

View File

@@ -0,0 +1,15 @@
package com.xiaojukeji.kafka.manager.dao;
import com.xiaojukeji.kafka.manager.common.entity.po.TopicFavoriteDO;
import java.util.List;
public interface TopicFavoriteDao {
int batchAdd(List<TopicFavoriteDO> topicFavoriteDOList);
Boolean batchDelete(List<Long> idList);
List<TopicFavoriteDO> getByUserName(String username);
List<TopicFavoriteDO> getByUserNameAndClusterId(String username, Long clusterId);
}

View File

@@ -0,0 +1,24 @@
package com.xiaojukeji.kafka.manager.dao;
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
import java.util.Date;
import java.util.List;
/**
* @author tukun
* @date 2015/11/11.
*/
public interface TopicMetricsDao {
/**
* 批量插入数据
*/
int batchAdd(List<TopicMetrics> topicMetricsDOList);
/**
* 根据时间区间获取topic监控数据
*/
List<TopicMetrics> getTopicMetricsByInterval(Long clusterId, String topicName, Date startTime, Date endTime);
int deleteBeforeTime(Date endTime);
}

View File

@@ -0,0 +1,51 @@
package com.xiaojukeji.kafka.manager.dao.impl;
import com.xiaojukeji.kafka.manager.common.entity.bizenum.DBStatusEnum;
import com.xiaojukeji.kafka.manager.common.entity.po.AccountDO;
import com.xiaojukeji.kafka.manager.dao.AccountDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.util.List;
/**
* @author zengqiao
* @date 19/5/3
*/
@Repository("accountDao")
public class AccountDaoImpl implements AccountDao {
@Autowired
private SqlSessionTemplate sqlSession;
public void setSqlSession(SqlSessionTemplate sqlSession) {
this.sqlSession = sqlSession;
}
@Override
public int addNewAccount(AccountDO accountDO) {
accountDO.setStatus(DBStatusEnum.NORMAL.getStatus());
return sqlSession.insert("AccountDao.insert", accountDO);
}
@Override
public int deleteByName(String username) {
return sqlSession.delete("AccountDao.deleteByName", username);
}
@Override
public int updateAccount(AccountDO accountDO) {
return sqlSession.insert("AccountDao.insert", accountDO);
}
@Override
public List<AccountDO> list() {
return sqlSession.selectList("AccountDao.list");
}
@Override
public AccountDO getByName(String username) {
return sqlSession.selectOne("AccountDao.getByName", username);
}
}

View File

@@ -0,0 +1,55 @@
package com.xiaojukeji.kafka.manager.dao.impl;
import com.xiaojukeji.kafka.manager.common.entity.po.AlarmRuleDO;
import com.xiaojukeji.kafka.manager.common.entity.po.query.AlarmRuleQueryOption;
import com.xiaojukeji.kafka.manager.dao.AlarmRuleDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.util.List;
/**
* @author zengqiao
* @date 19/6/23
*/
@Repository("alarmRuleDao")
public class AlarmRuleDaoImpl implements AlarmRuleDao {
@Autowired
private SqlSessionTemplate sqlSession;
public void setSqlSession(SqlSessionTemplate sqlSession) {
this.sqlSession = sqlSession;
}
@Override
public int insert(AlarmRuleDO alarmRuleDO) {
return sqlSession.insert("AlarmRuleDao.insert", alarmRuleDO);
}
@Override
public int deleteById(Long id) {
return sqlSession.delete("AlarmRuleDao.deleteById", id);
}
@Override
public int updateById(AlarmRuleDO alarmRuleDO) {
return sqlSession.update("AlarmRuleDao.updateById", alarmRuleDO);
}
@Override
public AlarmRuleDO getById(Long id) {
AlarmRuleQueryOption alarmRuleQueryOption = new AlarmRuleQueryOption();
alarmRuleQueryOption.setId(id);
List<AlarmRuleDO> alarmRuleDOList = sqlSession.selectList("AlarmRuleDao.getByOption", alarmRuleQueryOption);
if (alarmRuleDOList == null || alarmRuleDOList.isEmpty()) {
return null;
}
return alarmRuleDOList.get(0);
}
@Override
public List<AlarmRuleDO> listAll() {
return sqlSession.selectList("AlarmRuleDao.getByOption", new AlarmRuleQueryOption());
}
}

View File

@@ -0,0 +1,43 @@
package com.xiaojukeji.kafka.manager.dao.impl;
import com.xiaojukeji.kafka.manager.common.entity.po.BrokerDO;
import com.xiaojukeji.kafka.manager.dao.BrokerDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* @author zengqiao_cn@163.com
* @date 19/4/21
*/
@Repository("brokerDao")
public class BrokerDaoImpl implements BrokerDao {
@Autowired
private SqlSessionTemplate sqlSession;
public void setSqlSession(SqlSessionTemplate sqlSession) {
this.sqlSession = sqlSession;
}
@Override
public int replace(BrokerDO brokerInfoDO) {
return sqlSession.insert("BrokerDao.replace", brokerInfoDO);
}
@Override
public int deleteById(Long clusterId, Integer brokerId) {
Map<String, Object> params = new HashMap<>(2);
params.put("clusterId", clusterId);
params.put("brokerId", brokerId);
return sqlSession.delete("BrokerDao.deleteById", params);
}
@Override
public List<BrokerDO> getDead(Long clusterId) {
return sqlSession.selectList("BrokerDao.getDead", clusterId);
}
}

View File

@@ -0,0 +1,46 @@
package com.xiaojukeji.kafka.manager.dao.impl;
import com.xiaojukeji.kafka.manager.common.entity.metrics.BrokerMetrics;
import com.xiaojukeji.kafka.manager.dao.BrokerMetricsDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* @author tukun
* @date 2015/11/6.
*/
@Repository("brokerMetricsDao")
public class BrokerMetricsImpl implements BrokerMetricsDao {
@Autowired
private SqlSessionTemplate sqlSession;
@Override
public int batchAdd(List<BrokerMetrics> brokerMetricsList) {
return sqlSession.insert("BrokerMetricsDao.batchAdd", brokerMetricsList);
}
@Override
public List<BrokerMetrics> getBrokerMetricsByInterval(Long clusterId,
Integer brokerId,
Date startTime,
Date endTime) {
Map<String, Object> params = new HashMap<>();
params.put("clusterId", clusterId);
params.put("brokerId", brokerId);
params.put("startTime", startTime);
params.put("endTime", endTime);
return sqlSession.selectList("BrokerMetricsDao.getBrokerMetricsByInterval", params);
}
@Override
public int deleteBeforeTime(Date endTime) {
return sqlSession.delete("BrokerMetricsDao.deleteBeforeTime", endTime);
}
}

View File

@@ -0,0 +1,51 @@
package com.xiaojukeji.kafka.manager.dao.impl;
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO;
import com.xiaojukeji.kafka.manager.common.entity.po.query.ClusterQueryOption;
import com.xiaojukeji.kafka.manager.dao.ClusterDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.util.List;
/**
* @author zengqiao
* @date 19/7/26
*/
@Repository("clusterDao")
public class ClusterDaoImpl implements ClusterDao {
@Autowired
private SqlSessionTemplate sqlSession;
public void setSqlSession(SqlSessionTemplate sqlSession) {
this.sqlSession = sqlSession;
}
@Override
public int insert(ClusterDO clusterDO) {
return sqlSession.insert("ClusterDao.insert", clusterDO);
}
@Override
public int deleteById(Long id) {
return sqlSession.delete("ClusterDao.deleteById", id);
}
@Override
public int updateById(ClusterDO clusterDO) {
return sqlSession.update("ClusterDao.updateById", clusterDO);
}
@Override
public ClusterDO getById(Long id) {
ClusterQueryOption clusterQueryOption = new ClusterQueryOption();
clusterQueryOption.setId(id);
return sqlSession.selectOne("ClusterDao.getByOption", clusterQueryOption);
}
@Override
public List<ClusterDO> listAll() {
return sqlSession.selectList("ClusterDao.getByOption", new ClusterQueryOption());
}
}

View File

@@ -0,0 +1,41 @@
package com.xiaojukeji.kafka.manager.dao.impl;
import com.xiaojukeji.kafka.manager.common.entity.po.ClusterMetricsDO;
import com.xiaojukeji.kafka.manager.dao.ClusterMetricsDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@Repository("clusterMetricDao")
public class ClusterMetricsDaoImpl implements ClusterMetricsDao {
@Autowired
private SqlSessionTemplate sqlSession;
public void setSqlSession(SqlSessionTemplate sqlSession) {
this.sqlSession = sqlSession;
}
@Override
public int batchAdd(List<ClusterMetricsDO> clusterMetricsList) {
return sqlSession.insert("ClusterMetricsDao.batchAdd", clusterMetricsList);
}
@Override
public List<ClusterMetricsDO> getClusterMetricsByInterval(long clusterId, Date startTime, Date endTime) {
Map<String, Object> map = new HashMap<String, Object>();
map.put("clusterId", clusterId);
map.put("startTime", startTime);
map.put("endTime", endTime);
return sqlSession.selectList("ClusterMetricsDao.getClusterMetricsByInterval", map);
}
@Override
public int deleteBeforeTime(Date endTime) {
return sqlSession.delete("ClusterMetricsDao.deleteBeforeTime", endTime);
}
}

View File

@@ -0,0 +1,33 @@
package com.xiaojukeji.kafka.manager.dao.impl;
import com.xiaojukeji.kafka.manager.common.entity.po.ControllerDO;
import com.xiaojukeji.kafka.manager.dao.ControllerDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.util.List;
/**
* @author zengqiao
* @date 19/7/15
*/
@Repository("controllerDao")
public class ControllerDaoImpl implements ControllerDao {
@Autowired
private SqlSessionTemplate sqlSession;
public void setSqlSession(SqlSessionTemplate sqlSession) {
this.sqlSession = sqlSession;
}
@Override
public int insert(ControllerDO controllerDO) {
return sqlSession.insert("ControllerDao.insert", controllerDO);
}
@Override
public List<ControllerDO> getByClusterId(Long clusterId) {
return sqlSession.selectList("ControllerDao.getByClusterId", clusterId);
}
}

View File

@@ -0,0 +1,55 @@
package com.xiaojukeji.kafka.manager.dao.impl;
import com.xiaojukeji.kafka.manager.common.entity.po.MigrationTaskDO;
import com.xiaojukeji.kafka.manager.dao.MigrationTaskDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* migrate topic task dao
* @author zengqiao_cn@163.com
* @date 19/4/16
*/
@Repository("migrationTaskDao")
public class MigrationTaskDaoImpl implements MigrationTaskDao {
@Autowired
private SqlSessionTemplate sqlSession;
public void setSqlSession(SqlSessionTemplate sqlSession) {
this.sqlSession = sqlSession;
}
@Override
public int addMigrationTask(MigrationTaskDO migrationTaskDO) {
return sqlSession.insert("MigrationTaskDao.addMigrationTask", migrationTaskDO);
}
@Override
public MigrationTaskDO getById(Long id) {
return sqlSession.selectOne("MigrationTaskDao.getById", id);
}
@Override
public List<MigrationTaskDO> listAll() {
return sqlSession.selectList("MigrationTaskDao.listAll");
}
@Override
public List<MigrationTaskDO> getByStatus(Integer status) {
return sqlSession.selectList("MigrationTaskDao.getByStatus", status);
}
@Override
public int updateById(Long id, Integer status, Long throttle) {
Map<String, Object> params = new HashMap<>();
params.put("id", id);
params.put("status", status);
params.put("throttle", throttle);
return sqlSession.update("MigrationTaskDao.updateById", params);
}
}

View File

@@ -0,0 +1,26 @@
package com.xiaojukeji.kafka.manager.dao.impl;
import com.xiaojukeji.kafka.manager.common.entity.po.OperationHistoryDO;
import com.xiaojukeji.kafka.manager.dao.OperationHistoryDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
/**
* @author arthur
* @date 2017/7/20.
*/
@Repository("operationHistoryDao")
public class OperationHistoryDaoImpl implements OperationHistoryDao {
@Autowired
private SqlSessionTemplate sqlSession;
public void setSqlSession(SqlSessionTemplate sqlSession) {
this.sqlSession = sqlSession;
}
@Override
public int insert(OperationHistoryDO operationHistoryDO) {
return sqlSession.insert("OperationHistoryDao.insert", operationHistoryDO);
}
}

View File

@@ -0,0 +1,48 @@
package com.xiaojukeji.kafka.manager.dao.impl;
import com.xiaojukeji.kafka.manager.common.entity.po.OrderPartitionDO;
import com.xiaojukeji.kafka.manager.dao.OrderPartitionDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.util.List;
/**
* @author zengqiao
* @date 19/6/21
*/
@Repository("orderPartitionDao")
public class OrderPartitionDaoImpl implements OrderPartitionDao {
@Autowired
private SqlSessionTemplate sqlSession;
public void setSqlSession(SqlSessionTemplate sqlSession) {
this.sqlSession = sqlSession;
}
@Override
public int insert(OrderPartitionDO orderPartitionDO) {
return sqlSession.insert("OrderPartitionDao.insert", orderPartitionDO);
}
@Override
public int deleteById(Long id) {
return sqlSession.delete("OrderPartitionDao.deleteById", id);
}
@Override
public int updateById(OrderPartitionDO orderPartitionDO) {
return sqlSession.update("OrderPartitionDao.updateById", orderPartitionDO);
}
@Override
public OrderPartitionDO getById(Long id) {
return sqlSession.selectOne("OrderPartitionDao.getById", id);
}
@Override
public List<OrderPartitionDO> list() {
return sqlSession.selectList("OrderPartitionDao.list");
}
}

View File

@@ -0,0 +1,53 @@
package com.xiaojukeji.kafka.manager.dao.impl;
import com.xiaojukeji.kafka.manager.common.entity.po.OrderTopicDO;
import com.xiaojukeji.kafka.manager.dao.OrderTopicDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.util.List;
/**
* @author zengqiao
* @date 19/6/21
*/
@Repository("orderTopicDao")
public class OrderTopicDaoImpl implements OrderTopicDao {
@Autowired
private SqlSessionTemplate sqlSession;
public void setSqlSession(SqlSessionTemplate sqlSession) {
this.sqlSession = sqlSession;
}
@Override
public int insert(OrderTopicDO orderTopicDO) {
return sqlSession.insert("OrderTopicDao.insert", orderTopicDO);
}
@Override
public int deleteById(Long id) {
return sqlSession.delete("OrderTopicDao.deleteById", id);
}
@Override
public int updateById(OrderTopicDO orderTopicDO) {
return sqlSession.update("OrderTopicDao.updateById", orderTopicDO);
}
@Override
public OrderTopicDO getById(Long id) {
return sqlSession.selectOne("OrderTopicDao.getById", id);
}
@Override
public List<OrderTopicDO> list() {
return sqlSession.selectList("OrderTopicDao.list");
}
@Override
public List<OrderTopicDO> getByUsername(String username) {
return sqlSession.selectList("OrderTopicDao.getByUsername", username);
}
}

View File

@@ -0,0 +1,53 @@
package com.xiaojukeji.kafka.manager.dao.impl;
import com.xiaojukeji.kafka.manager.common.entity.po.RegionDO;
import com.xiaojukeji.kafka.manager.dao.RegionDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.util.List;
/**
* @author zengqiao
* @date 19/6/23
*/
@Repository("regionDao")
public class RegionDaoImpl implements RegionDao {
@Autowired
private SqlSessionTemplate sqlSession;
public void setSqlSession(SqlSessionTemplate sqlSession) {
this.sqlSession = sqlSession;
}
@Override
public int insert(RegionDO regionDO) {
return sqlSession.insert("RegionDao.insert", regionDO);
}
@Override
public int deleteById(Long id) {
return sqlSession.delete("RegionDao.deleteById", id);
}
@Override
public int updateById(RegionDO regionDO) {
return sqlSession.update("RegionDao.updateById", regionDO);
}
@Override
public RegionDO getById(Long id) {
return sqlSession.selectOne("RegionDao.getById", id);
}
@Override
public List<RegionDO> getByClusterId(Long clusterId) {
return sqlSession.selectList("RegionDao.getByClusterId", clusterId);
}
@Override
public List<RegionDO> listAll() {
return sqlSession.selectList("RegionDao.listAll");
}
}

View File

@@ -0,0 +1,61 @@
package com.xiaojukeji.kafka.manager.dao.impl;
import com.xiaojukeji.kafka.manager.common.entity.po.TopicDO;
import com.xiaojukeji.kafka.manager.dao.TopicDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* @author zengqiao
* @date 19/7/12
*/
@Repository("TopicDao")
public class TopicDaoImpl implements TopicDao {
@Autowired
private SqlSessionTemplate sqlSession;
public void setSqlSession(SqlSessionTemplate sqlSession) {
this.sqlSession = sqlSession;
}
@Override
public int replace(TopicDO topicDO) {
return sqlSession.insert("TopicDao.replace", topicDO);
}
@Override
public int deleteById(Long id) {
return sqlSession.delete("TopicDao.deleteById", id);
}
@Override
public int deleteByName(Long clusterId, String topicName) {
Map<String, Object> params = new HashMap<>();
params.put("clusterId", clusterId);
params.put("topicName", topicName);
return sqlSession.delete("TopicDao.deleteByName", params);
}
@Override
public TopicDO getByTopicName(Long clusterId, String topicName) {
Map<String, Object> params = new HashMap<>();
params.put("clusterId", clusterId);
params.put("topicName", topicName);
return sqlSession.selectOne("TopicDao.getByTopicName", params);
}
@Override
public List<TopicDO> getByClusterId(Long clusterId) {
return sqlSession.selectList("TopicDao.getByClusterId", clusterId);
}
@Override
public List<TopicDO> list() {
return sqlSession.selectList("TopicDao.list");
}
}

View File

@@ -0,0 +1,78 @@
package com.xiaojukeji.kafka.manager.dao.impl;
import com.xiaojukeji.kafka.manager.common.entity.po.TopicFavoriteDO;
import com.xiaojukeji.kafka.manager.dao.TopicFavoriteDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import org.springframework.transaction.TransactionStatus;
import org.springframework.transaction.support.TransactionCallback;
import org.springframework.transaction.support.TransactionTemplate;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* @author zengqiao
* @date 19/6/23
*/
@Repository("topicFavoriteDao")
public class TopicFavoriteDaoImpl implements TopicFavoriteDao {
@Autowired
private SqlSessionTemplate sqlSession;
@Autowired
private TransactionTemplate transactionTemplate;
public void setSqlSession(SqlSessionTemplate sqlSession) {
this.sqlSession = sqlSession;
}
@Override
public int batchAdd(List<TopicFavoriteDO> topicFavoriteDOList) {
return sqlSession.insert("TopicFavoriteDao.batchAdd", topicFavoriteDOList);
}
@Override
public Boolean batchDelete(List<Long> idList) {
return transactionTemplate.execute(new TransactionCallback<Boolean>() {
@Override
public Boolean doInTransaction(TransactionStatus status) {
if (idList == null) {
return Boolean.TRUE;
}
for (Long id: idList) {
try {
if (deleteById(id) <= 0) {
status.setRollbackOnly();
return Boolean.FALSE;
}
} catch (Throwable t) {
status.setRollbackOnly();
return Boolean.FALSE;
}
}
return Boolean.TRUE;
}
});
}
private int deleteById(Long id) {
return sqlSession.delete("TopicFavoriteDao.deleteById", id);
}
@Override
public List<TopicFavoriteDO> getByUserName(String username) {
return sqlSession.selectList("TopicFavoriteDao.getByUserName", username);
}
@Override
public List<TopicFavoriteDO> getByUserNameAndClusterId(String username, Long clusterId) {
Map<String, Object> params = new HashMap<>();
params.put("username", username);
params.put("clusterId", clusterId);
return sqlSession.selectList("TopicFavoriteDao.getByUserNameAndClusterId", params);
}
}

View File

@@ -0,0 +1,47 @@
package com.xiaojukeji.kafka.manager.dao.impl;
import com.xiaojukeji.kafka.manager.common.entity.metrics.TopicMetrics;
import com.xiaojukeji.kafka.manager.dao.TopicMetricsDao;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Repository;
import java.util.*;
/**
* dao实现
* @author tukun
* @date 2015/11/11.
*/
@Repository("topicMetricDao")
public class TopicMetricsDaoImpl implements TopicMetricsDao {
@Autowired
private SqlSessionTemplate sqlSession;
public void setSqlSession(SqlSessionTemplate sqlSession) {
this.sqlSession = sqlSession;
}
@Override
public int batchAdd(List<TopicMetrics> topicMetricsList) {
return sqlSession.insert("TopicMetricsDao.batchAdd", topicMetricsList);
}
@Override
public List<TopicMetrics> getTopicMetricsByInterval(Long clusterId,
String topicName,
Date startTime,
Date endTime) {
Map<String, Object> map = new HashMap<>();
map.put("clusterId", clusterId);
map.put("topicName", topicName);
map.put("startTime", startTime);
map.put("endTime", endTime);
return sqlSession.selectList("TopicMetricsDao.getTopicMetricsByInterval", map);
}
@Override
public int deleteBeforeTime(Date endTime) {
return sqlSession.delete("TopicMetricsDao.deleteBeforeTime", endTime);
}
}

View File

@@ -0,0 +1,54 @@
package com.xiaojukeji.kafka.manager.vfs;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.net.URL;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.ibatis.io.VFS;
import org.springframework.core.io.Resource;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.core.io.support.ResourcePatternResolver;
/**
* SpringBoot执行jar包无法找到mybatis的domain的解决
* @author huangyiminghappy@163.com
* @date 2019-04-28
*/
public class SpringBootVFS extends VFS {
private final ResourcePatternResolver resourceResolver;
public SpringBootVFS() {
this.resourceResolver = new PathMatchingResourcePatternResolver(getClass().getClassLoader());
}
@Override
public boolean isValid() {
return true;
}
@Override
protected List<String> list(URL url, String path) throws IOException {
String urlString = url.toString();
String baseUrlString = urlString.endsWith("/") ? urlString : urlString.concat("/");
Resource[] resources = resourceResolver.getResources(baseUrlString + "**/*.class");
return Stream.of(resources)
.map(resource -> preserveSubpackageName(baseUrlString, resource, path))
.collect(Collectors.toList());
}
private static String preserveSubpackageName(final String baseUrlString,
final Resource resource,
final String rootPath) {
try {
return rootPath + (rootPath.endsWith("/") ? "" : "/")
+ resource.getURL().toString().substring(baseUrlString.length());
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
}

View File

@@ -0,0 +1,40 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="AccountDao">
<resultMap id="AccountMap" type="AccountDO">
<id property="id" column="id" />
<result property="username" column="username" />
<result property="password" column="password" />
<result property="role" column="role" />
<result property="status" column="status" />
<result property="gmtCreate" column="gmt_create" />
<result property="gmtModify" column="gmt_modify" />
</resultMap>
<insert id="insert" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.AccountDO">
<![CDATA[
REPLACE account
(username, password, role, status)
VALUES
(#{username}, #{password}, #{role}, #{status})
]]>
</insert>
<delete id="deleteByName" parameterType="java.lang.String">
DELETE FROM account WHERE username = #{username}
</delete>
<select id="getByName" parameterType="java.lang.String" resultMap="AccountMap">
<![CDATA[
SELECT * FROM account WHERE username = #{username} AND status=0
]]>
</select>
<select id="list" resultMap="AccountMap">
<![CDATA[
SELECT * FROM account WHERE status = 0
]]>
</select>
</mapper>

View File

@@ -0,0 +1,57 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="AlarmRuleDao">
<resultMap id="AlarmRuleMap" type="AlarmRuleDO">
<id column="id" jdbcType="BIGINT" property="id" />
<id column="status" jdbcType="INTEGER" property="status" />
<result column="gmt_create" jdbcType="TIMESTAMP" property="gmtCreate" />
<result column="gmt_modify" jdbcType="TIMESTAMP" property="gmtModify" />
<result column="alarm_name" jdbcType="VARCHAR" property="alarmName" />
<result column="strategy_expressions" jdbcType="VARCHAR" property="strategyExpressions" />
<result column="strategy_filters" jdbcType="VARCHAR" property="strategyFilters" />
<result column="strategy_actions" jdbcType="VARCHAR" property="strategyActions" />
<result column="principals" jdbcType="VARCHAR" property="principals" />
</resultMap>
<insert id="insert" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.AlarmRuleDO">
<![CDATA[
INSERT INTO alarm_rule
(alarm_name, strategy_expressions, strategy_filters, strategy_actions, principals)
VALUES (
#{alarmName}, #{strategyExpressions}, #{strategyFilters}, #{strategyActions}, #{principals})
]]>
</insert>
<delete id="deleteById" parameterType="java.lang.Long">
<![CDATA[
DELETE FROM alarm_rule WHERE id = #{id}
]]>
</delete>
<update id="updateById" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.AlarmRuleDO">
<![CDATA[
UPDATE alarm_rule SET
alarm_name=#{alarmName},
strategy_expressions=#{strategyExpressions},
strategy_filters=#{strategyFilters},
strategy_actions=#{strategyActions},
principals=#{principals},
status=#{status}
WHERE id = #{id}
]]>
</update>
<select id="getByOption" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.query.AlarmRuleQueryOption" resultMap="AlarmRuleMap">
SELECT * FROM alarm_rule where status >= 0
<trim>
<if test="id != null">
AND id=#{id}
</if>
<if test="alarmName != null">
AND alarm_name=#{alarmName}
</if>
</trim>
</select>
</mapper>

View File

@@ -0,0 +1,30 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="BrokerDao">
<resultMap id="BrokerDOMap" type="BrokerDO">
<id column="id" jdbcType="BIGINT" property="id" />
<result column="cluster_id" jdbcType="BIGINT" property="clusterId" />
<result column="broker_id" jdbcType="BIGINT" property="brokerId" />
<result column="host" jdbcType="VARCHAR" property="host" />
<result column="port" jdbcType="INTEGER" property="port" />
<result column="timestamp" jdbcType="BIGINT" property="timestamp" />
<result column="status" jdbcType="INTEGER" property="status" />
<result column="gmt_create" jdbcType="TIMESTAMP" property="gmtCreate" />
<result column="gmt_modify" jdbcType="TIMESTAMP" property="gmtModify" />
</resultMap>
<insert id="replace" parameterType="BrokerDO">
REPLACE broker
(cluster_id, broker_id, host, port, timestamp, status)
VALUES
(#{clusterId}, #{brokerId}, #{host}, #{port}, #{timestamp}, #{status})
</insert>
<delete id="deleteById" parameterType="java.util.Map">
DELETE FROM broker WHERE cluster_id = #{clusterId} AND broker_id = #{brokerId}
</delete>
<select id="getDead" parameterType="java.lang.Long" resultMap="BrokerDOMap">
SELECT * from broker where cluster_id = #{clusterId} AND status = -1
</select>
</mapper>

View File

@@ -0,0 +1,48 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="BrokerMetricsDao">
<resultMap id="BrokerMetricsMap" type="BrokerMetrics">
<id property="id" column="id" />
<result property="clusterId" column="cluster_id" />
<result property="brokerId" column="broker_id" />
<result property="bytesInPerSec" column="bytes_in" />
<result property="bytesOutPerSec" column="bytes_out" />
<result property="messagesInPerSec" column="messages_in" />
<result property="bytesRejectedPerSec" column="bytes_rejected" />
<result property="failFetchRequestPerSec" column="fail_fetch_request" />
<result property="failProduceRequestPerSec" column="fail_produce_request" />
<result property="fetchConsumerRequestPerSec" column="fetch_consumer_request" />
<result property="produceRequestPerSec" column="produce_request" />
<result property="requestHandlerAvgIdlePercent" column="request_handler_idl_percent" />
<result property="networkProcessorAvgIdlePercent" column="network_processor_idl_percent" />
<result property="requestQueueSize" column="request_queue_size" />
<result property="responseQueueSize" column="response_queue_size" />
<result property="logFlushRateAndTimeMs" column="log_flush_time" />
<result property="totalTimeProduceMean" column="total_time_produce_mean" />
<result property="totalTimeProduce99Th" column="total_time_produce_99th" />
<result property="totalTimeFetchConsumerMean" column="total_time_fetch_consumer_mean" />
<result property="totalTimeFetchConsumer99Th" column="total_time_fetch_consumer_99th" />
<result property="gmtCreate" column="gmt_create" />
</resultMap>
<insert id="batchAdd" parameterType="java.util.List">
INSERT INTO broker_metrics (cluster_id, broker_id, bytes_in, bytes_out, messages_in, bytes_rejected, fail_fetch_request, fail_produce_request, fetch_consumer_request, produce_request, request_handler_idl_percent, network_processor_idl_percent, request_queue_size, response_queue_size, log_flush_time, total_time_produce_mean, total_time_produce_99th, total_time_fetch_consumer_mean, total_time_fetch_consumer_99th, gmt_create)
VALUES
<foreach item="BrokerMetrics" index="index" collection="list" separator=",">
(#{BrokerMetrics.clusterId}, #{BrokerMetrics.brokerId}, #{BrokerMetrics.bytesInPerSec}, #{BrokerMetrics.bytesOutPerSec}, #{BrokerMetrics.messagesInPerSec}, #{BrokerMetrics.bytesRejectedPerSec}, #{BrokerMetrics.failFetchRequestPerSec}, #{BrokerMetrics.failProduceRequestPerSec}, #{BrokerMetrics.fetchConsumerRequestPerSec}, #{BrokerMetrics.produceRequestPerSec}, #{BrokerMetrics.requestHandlerAvgIdlePercent}, #{BrokerMetrics.networkProcessorAvgIdlePercent}, #{BrokerMetrics.requestQueueSize}, #{BrokerMetrics.responseQueueSize}, #{BrokerMetrics.logFlushRateAndTimeMs}, #{BrokerMetrics.totalTimeProduceMean}, #{BrokerMetrics.totalTimeProduce99Th}, #{BrokerMetrics.totalTimeFetchConsumerMean}, #{BrokerMetrics.totalTimeFetchConsumer99Th}, now())
</foreach>
</insert>
<select id="getBrokerMetricsByInterval" parameterType="java.util.Map" resultMap="BrokerMetricsMap">
<![CDATA[
SELECT * FROM broker_metrics WHERE cluster_id = #{clusterId} AND broker_id = #{brokerId} AND gmt_create BETWEEN #{startTime} AND #{endTime} ORDER BY gmt_create ASC
]]>
</select>
<delete id="deleteBeforeTime" parameterType="java.util.Date">
<![CDATA[
DELETE FROM broker_metrics WHERE gmt_create < #{endTime} LIMIT 1000
]]>
</delete>
</mapper>

View File

@@ -0,0 +1,59 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="ClusterDao">
<resultMap id="ClusterMap" type="ClusterDO">
<id column="id" jdbcType="BIGINT" property="id" />
<id column="status" jdbcType="INTEGER" property="status" />
<result column="gmt_create" jdbcType="TIMESTAMP" property="gmtCreate" />
<result column="gmt_create" jdbcType="TIMESTAMP" property="gmtModify" />
<result column="cluster_name" jdbcType="VARCHAR" property="clusterName" />
<result column="zookeeper" jdbcType="VARCHAR" property="zookeeper" />
<result column="bootstrap_servers" jdbcType="VARCHAR" property="bootstrapServers" />
<result column="kafka_version" jdbcType="VARCHAR" property="kafkaVersion" />
<result column="alarm_flag" jdbcType="INTEGER" property="alarmFlag" />
<result column="security_protocol" jdbcType="VARCHAR" property="securityProtocol" />
<result column="sasl_mechanism" jdbcType="VARCHAR" property="saslMechanism" />
<result column="sasl_jaas_config" jdbcType="VARCHAR" property="saslJaasConfig" />
</resultMap>
<insert id="insert" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO" useGeneratedKeys="true" keyProperty="id">
INSERT INTO cluster (
cluster_name, zookeeper, bootstrap_servers, kafka_version, alarm_flag,
security_protocol, sasl_mechanism, sasl_jaas_config
) VALUES (
#{clusterName}, #{zookeeper}, #{bootstrapServers}, #{kafkaVersion}, #{alarmFlag},
#{securityProtocol}, #{saslMechanism}, #{saslJaasConfig}
)
</insert>
<delete id="deleteById" parameterType="java.lang.Long">
DELETE FROM cluster id = #{id}
</delete>
<update id="updateById" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.ClusterDO">
UPDATE cluster SET
cluster_name=#{clusterName},
zookeeper=#{zookeeper},
bootstrap_servers=#{bootstrapServers},
kafka_version=#{kafkaVersion},
alarm_flag=#{alarmFlag},
security_protocol=#{securityProtocol},
sasl_mechanism=#{saslMechanism},
sasl_jaas_config=#{saslJaasConfig}
WHERE id = #{id}
</update>
<select id="getByOption" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.query.ClusterQueryOption" resultMap="ClusterMap">
SELECT * FROM cluster where status >= 0
<trim>
<if test="id != null">
AND id=#{id}
</if>
<if test="clusterName != null">
AND cluster_name=#{clusterName}
</if>
</trim>
</select>
</mapper>

View File

@@ -0,0 +1,37 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="ClusterMetricsDao">
<resultMap id="ClusterMetricsMap" type="ClusterMetricsDO">
<id property="id" column="id" />
<result property="clusterId" column="cluster_id" />
<result property="topicNum" column="topic_num" />
<result property="partitionNum" column="partition_num" />
<result property="brokerNum" column="broker_num" />
<result property="bytesInPerSec" column="bytes_in" />
<result property="bytesOutPerSec" column="bytes_out" />
<result property="bytesRejectedPerSec" column="bytes_rejected" />
<result property="messagesInPerSec" column="messages_in" />
<result property="gmtCreate" column="gmt_create" />
</resultMap>
<select id="getClusterMetricsByInterval" parameterType="java.util.Map" resultMap="ClusterMetricsMap">
<![CDATA[
SELECT * FROM cluster_metrics WHERE cluster_id = #{clusterId} AND gmt_create BETWEEN #{startTime} AND #{endTime} ORDER BY gmt_create ASC
]]>
</select>
<insert id="batchAdd" parameterType="java.util.List">
INSERT INTO cluster_metrics (cluster_id, topic_num, partition_num, broker_num, bytes_in, bytes_out, bytes_rejected, messages_in, gmt_create)
VALUES
<foreach item="ClusterMetricsDO" index="index" collection="list" separator=",">
(#{ClusterMetricsDO.clusterId}, #{ClusterMetricsDO.topicNum}, #{ClusterMetricsDO.partitionNum}, #{ClusterMetricsDO.brokerNum}, #{ClusterMetricsDO.bytesInPerSec}, #{ClusterMetricsDO.bytesOutPerSec}, #{ClusterMetricsDO.bytesRejectedPerSec}, #{ClusterMetricsDO.messagesInPerSec}, now())
</foreach>
</insert>
<delete id="deleteBeforeTime" parameterType="java.util.Date">
<![CDATA[
DELETE FROM cluster_metrics WHERE gmt_create < #{endTime} LIMIT 100
]]>
</delete>
</mapper>

View File

@@ -0,0 +1,28 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="ControllerDao">
<resultMap id="ControllerMap" type="ControllerDO">
<id column="id" jdbcType="BIGINT" property="id" />
<result column="gmt_create" jdbcType="TIMESTAMP" property="gmtCreate" />
<result column="cluster_id" jdbcType="BIGINT" property="clusterId" />
<result column="broker_id" jdbcType="INTEGER" property="brokerId" />
<result column="host" jdbcType="VARCHAR" property="host" />
<result column="timestamp" jdbcType="BIGINT" property="timestamp" />
<result column="version" jdbcType="INTEGER" property="version" />
</resultMap>
<insert id="insert" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.ControllerDO">
INSERT INTO controller (
cluster_id, broker_id, host, timestamp, version
) VALUES (
#{clusterId}, #{brokerId}, #{host}, #{timestamp}, #{version}
)
</insert>
<select id="getByClusterId" parameterType="java.lang.Long" resultMap="ControllerMap">
SELECT * FROM controller WHERE cluster_id=#{clusterId} ORDER BY gmt_create DESC
</select>
</mapper>

View File

@@ -0,0 +1,46 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="MigrationTaskDao">
<resultMap id="MigrationTaskDOMap" type="MigrationTaskDO">
<id property="id" column="id" />
<result property="clusterId" column="cluster_id" />
<result property="topicName" column="topic_name" />
<result property="reassignmentJson" column="reassignment_json" />
<result property="throttle" column="real_throttle" />
<result property="status" column="status" />
<result property="gmtCreate" column="gmt_create" />
<result property="gmtModify" column="gmt_modify" />
</resultMap>
<insert id="addMigrationTask" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.MigrationTaskDO">
<selectKey resultType="java.lang.Long" order="AFTER" keyProperty="id">
SELECT LAST_INSERT_ID()
</selectKey>
<![CDATA[
INSERT INTO migration_task
(cluster_id, topic_name, reassignment_json, real_throttle, operator, description, status)
VALUES
(#{clusterId}, #{topicName}, #{reassignmentJson}, #{throttle}, #{operator}, #{description}, #{status})
]]>
</insert>
<select id="getById" parameterType="java.lang.Long" resultMap="MigrationTaskDOMap">
SELECT * FROM migration_task WHERE id=#{id}
</select>
<select id="listAll" resultMap="MigrationTaskDOMap">
SELECT * FROM migration_task ORDER BY gmt_create DESC
</select>
<select id="getByStatus" parameterType="java.lang.Integer" resultMap="MigrationTaskDOMap">
SELECT * FROM migration_task WHERE status=#{status}
</select>
<update id="updateById" parameterType="java.util.Map">
UPDATE migration_task
SET
real_throttle=#{throttle},
status=#{status}
WHERE id=#{id}
</update>
</mapper>

View File

@@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="OperationHistoryDao">
<resultMap id="OperationHistoryMap" type="OperationHistoryDO">
<id column="id" property="id" />
<result column="cluster_id" property="clusterId" />
<result column="topic_name" property="topicName" />
<result column="operator" property="operator" />
<result column="operation" property="operation" />
<result column="gmt_create" property="gmtCreate" />
</resultMap>
<insert id="insert" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.OperationHistoryDO">
INSERT INTO operation_history (
cluster_id, topic_name, operator, operation)
VALUES (
#{clusterId}, #{topicName}, #{operator}, #{operation}
)
</insert>
</mapper>

View File

@@ -0,0 +1,56 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="OrderPartitionDao">
<resultMap id="OrderPartitionMap" type="OrderPartitionDO">
<id column="id" property="id" />
<id column="status" property="status" />
<result column="gmt_create" property="gmtCreate" />
<result column="gmt_modify" property="gmtModify" />
<result column="cluster_id" property="clusterId" />
<result column="cluster_name" property="clusterName" />
<result column="topic_name" property="topicName" />
<result column="applicant" property="applicant" />
<result column="peak_bytes_in" property="peakBytesIn" />
<result column="description" property="description" />
<result column="order_status" property="orderStatus" />
<result column="approver" property="approver" />
<result column="opinion" property="opinion" />
</resultMap>
<insert id="insert" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.OrderPartitionDO">
INSERT INTO order_partition (
cluster_id, cluster_name, topic_name, applicant,
peak_bytes_in, description, order_status)
VALUES (
#{clusterId}, #{clusterName}, #{topicName}, #{applicant},
#{peakBytesIn}, #{description}, #{orderStatus}
)
</insert>
<delete id="deleteById" parameterType="java.lang.Long">
DELETE FROM order_partition WHERE id=#{id}
</delete>
<update id="updateById" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.OrderPartitionDO">
UPDATE order_partition SET
cluster_id=#{clusterId},
cluster_name=#{clusterName},
topic_name=#{topicName},
applicant=#{applicant},
peak_bytes_in=#{peakBytesIn},
description=#{description},
order_status=#{orderStatus},
approver=#{approver},
opinion=#{opinion}
WHERE id=#{id}
</update>
<select id="getById" parameterType="java.lang.Long" resultMap="OrderPartitionMap">
SELECT * FROM order_partition WHERE id=#{id}
</select>
<select id="list" resultMap="OrderPartitionMap">
SELECT * FROM order_partition ORDER BY gmt_create DESC
</select>
</mapper>

View File

@@ -0,0 +1,75 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="OrderTopicDao">
<resultMap id="OrderTopicMap" type="OrderTopicDO">
<id column="id" property="id" />
<id column="status" property="status" />
<result column="gmt_create" property="gmtCreate" />
<result column="gmt_modify" property="gmtModify" />
<result column="cluster_id" property="clusterId" />
<result column="cluster_name" property="clusterName" />
<result column="topic_name" property="topicName" />
<result column="retention_time" property="retentionTime" />
<result column="partition_num" property="partitionNum" />
<result column="replica_num" property="replicaNum" />
<result column="regions" property="regions" />
<result column="brokers" property="brokers" />
<result column="applicant" property="applicant" />
<result column="principals" property="principals" />
<result column="peak_bytes_in" property="peakBytesIn" />
<result column="description" property="description" />
<result column="order_status" property="orderStatus" />
<result column="approver" property="approver" />
<result column="opinion" property="opinion" />
</resultMap>
<insert id="insert" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.OrderTopicDO">
INSERT INTO order_topic (
cluster_id, cluster_name, topic_name,
retention_time, partition_num, replica_num,
applicant, peak_bytes_in, description, principals
)
VALUES (
#{clusterId}, #{clusterName}, #{topicName},
#{retentionTime}, #{partitionNum}, #{replicaNum},
#{applicant}, #{peakBytesIn}, #{description}, #{principals}
)
</insert>
<delete id="deleteById" parameterType="java.lang.Long">
DELETE FROM order_topic WHERE id=#{id}
</delete>
<update id="updateById" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.OrderTopicDO">
UPDATE order_topic SET
cluster_id=#{clusterId},
cluster_name=#{clusterName},
topic_name=#{topicName},
retention_time=#{retentionTime},
partition_num=#{partitionNum},
replica_num=#{replicaNum},
regions=#{regions},
brokers=#{brokers},
applicant=#{applicant},
principals=#{principals},
peak_bytes_in=#{peakBytesIn},
description=#{description},
order_status=#{orderStatus},
approver=#{approver},
opinion=#{opinion}
WHERE id=#{id}
</update>
<select id="getById" parameterType="java.lang.Long" resultMap="OrderTopicMap">
SELECT * FROM order_topic WHERE id=#{id}
</select>
<select id="list" resultMap="OrderTopicMap">
SELECT * FROM order_topic ORDER BY gmt_create DESC
</select>
<select id="getByUsername" parameterType="java.lang.String" resultMap="OrderTopicMap">
SELECT * FROM order_topic WHERE applicant=#{username} ORDER BY gmt_create DESC
</select>
</mapper>

View File

@@ -0,0 +1,51 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="RegionDao">
<resultMap id="RegionMap" type="RegionDO">
<id column="id" jdbcType="BIGINT" property="id" />
<result column="status" jdbcType="INTEGER" property="status" />
<result column="gmt_create" jdbcType="TIMESTAMP" property="gmtCreate" />
<result column="gmt_modify" jdbcType="TIMESTAMP" property="gmtModify" />
<result column="cluster_id" jdbcType="BIGINT" property="clusterId" />
<result column="region_name" jdbcType="VARCHAR" property="regionName" />
<result column="broker_list" jdbcType="VARCHAR" property="brokerList" />
<result column="description" jdbcType="VARCHAR" property="description" />
<result column="operator" jdbcType="VARCHAR" property="operator" />
</resultMap>
<insert id="insert" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.RegionDO">
REPLACE region
(region_name, cluster_id, broker_list, description, operator)
VALUES
(#{regionName}, #{clusterId}, #{brokerList}, #{description}, #{operator})
</insert>
<delete id="deleteById" parameterType="java.lang.Long">
DELETE FROM region WHERE id = #{id}
</delete>
<update id="updateById" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.RegionDO">
UPDATE region SET
region_name=#{regionName},
cluster_id=#{clusterId},
broker_list=#{brokerList},
description=#{description},
operator=#{operator},
status=#{status},
level=#{level}
WHERE id=#{id}
</update>
<select id="getById" parameterType="java.lang.Long" resultMap="RegionMap">
SELECT * FROM region WHERE id=#{id}
</select>
<select id="getByClusterId" parameterType="java.lang.Long" resultMap="RegionMap">
SELECT * FROM region WHERE cluster_id=#{clusterId}
</select>
<select id="listAll" resultMap="RegionMap">
SELECT * FROM region
</select>
</mapper>

View File

@@ -0,0 +1,41 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="TopicDao">
<resultMap id="TopicMap" type="TopicDO">
<id column="id" jdbcType="BIGINT" property="id" />
<result column="cluster_id" jdbcType="BIGINT" property="clusterId" />
<result column="topic_name" jdbcType="VARCHAR" property="topicName" />
<result column="principals" jdbcType="VARCHAR" property="principals" />
<result column="description" jdbcType="VARCHAR" property="description" />
<result column="status" jdbcType="INTEGER" property="status" />
<result column="gmt_create" jdbcType="TIMESTAMP" property="gmtCreate" />
<result column="gmt_modify" jdbcType="TIMESTAMP" property="gmtModify" />
</resultMap>
<insert id="replace" parameterType="com.xiaojukeji.kafka.manager.common.entity.po.TopicDO">
REPLACE topic
(cluster_id, topic_name, principals, description, status)
VALUES
(#{clusterId}, #{topicName}, #{principals}, #{description}, #{status})
</insert>
<delete id="deleteById" parameterType="java.lang.Long">
DELETE FROM topic WHERE id = #{id}
</delete>
<delete id="deleteByName" parameterType="java.util.Map">
DELETE FROM topic WHERE cluster_id = #{clusterId} and topic_name = #{topicName}
</delete>
<select id="getByTopicName" parameterType="java.util.Map" resultMap="TopicMap">
SELECT * FROM topic WHERE cluster_id = #{clusterId} AND topic_name = #{topicName}
</select>
<select id="getByClusterId" parameterType="java.lang.Long" resultMap="TopicMap">
SELECT * FROM topic WHERE cluster_id = #{clusterId}
</select>
<select id="list" resultMap="TopicMap">
SELECT * FROM topic
</select>
</mapper>

View File

@@ -0,0 +1,35 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="TopicFavoriteDao">
<resultMap id="TopicFavoriteMap" type="TopicFavoriteDO">
<id column="id" jdbcType="BIGINT" property="id" />
<result column="status" jdbcType="INTEGER" property="status" />
<result column="gmt_modify" jdbcType="TIMESTAMP" property="gmtModify" />
<result column="gmt_create" jdbcType="TIMESTAMP" property="gmtCreate" />
<result column="cluster_id" jdbcType="BIGINT" property="clusterId" />
<result column="topic_name" jdbcType="VARCHAR" property="topicName" />
<result column="username" jdbcType="VARCHAR" property="username" />
</resultMap>
<insert id="batchAdd" parameterType="java.util.List">
REPLACE topic_favorite (cluster_id, topic_name, username)
VALUES
<foreach item="TopicFavoriteDO" index="index" collection="list" separator=",">
(#{TopicFavoriteDO.clusterId}, #{TopicFavoriteDO.topicName}, #{TopicFavoriteDO.username})
</foreach>
</insert>
<delete id="deleteById" parameterType="java.lang.Long">
DELETE FROM topic_favorite WHERE id=#{id}
</delete>
<select id="getByUserName" parameterType="java.lang.String" resultMap="TopicFavoriteMap">
SELECT * FROM topic_favorite WHERE username=#{username}
</select>
<select id="getByUserNameAndClusterId" parameterType="java.util.Map" resultMap="TopicFavoriteMap">
SELECT * FROM topic_favorite WHERE username=#{username} AND cluster_id=#{clusterId}
</select>
</mapper>

View File

@@ -0,0 +1,37 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="TopicMetricsDao">
<resultMap id="TopicMetricsMap" type="TopicMetrics">
<id column="id" jdbcType="BIGINT" property="id" />
<result column="cluster_id" jdbcType="BIGINT" property="clusterId" />
<result column="topic_name" jdbcType="VARCHAR" property="topicName" />
<result column="bytes_in" jdbcType="DOUBLE" property="bytesInPerSec" />
<result column="bytes_out" jdbcType="DOUBLE" property="bytesOutPerSec" />
<result column="messages_in" jdbcType="DOUBLE" property="messagesInPerSec" />
<result column="bytes_rejected" jdbcType="DOUBLE" property="bytesRejectedPerSec" />
<result column="total_produce_requests" jdbcType="DOUBLE" property="totalProduceRequestsPerSec" />
<result column="gmt_create" jdbcType="TIMESTAMP" property="gmtCreate" />
</resultMap>
<insert id="batchAdd" parameterType="java.util.List">
INSERT INTO topic_metrics
(cluster_id, topic_name, messages_in, bytes_in, bytes_out, bytes_rejected, total_produce_requests, gmt_create)
values
<foreach item="TopicMetrics" index="index" collection="list" separator=",">
(#{TopicMetrics.clusterId}, #{TopicMetrics.topicName}, #{TopicMetrics.messagesInPerSec}, #{TopicMetrics.bytesInPerSec}, #{TopicMetrics.bytesOutPerSec}, #{TopicMetrics.bytesRejectedPerSec}, #{TopicMetrics.totalProduceRequestsPerSec}, now())
</foreach>
</insert>
<select id="getTopicMetricsByInterval" parameterType="java.util.Map" resultMap="TopicMetricsMap">
<![CDATA[
SELECT * FROM topic_metrics WHERE cluster_id = #{clusterId} AND topic_name = #{topicName} AND gmt_create BETWEEN #{startTime} AND #{endTime}
]]>
</select>
<delete id="deleteBeforeTime" parameterType="java.util.Date">
<![CDATA[
DELETE FROM topic_metrics WHERE gmt_create < #{endTime} LIMIT 2000
]]>
</delete>
</mapper>

View File

@@ -0,0 +1,18 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE configuration
PUBLIC "-//mybatis.org//DTD Config 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-config.dtd">
<configuration>
<settings>
<setting name="mapUnderscoreToCamelCase" value="true" />
<setting name="vfsImpl" value="com.xiaojukeji.kafka.manager.vfs.SpringBootVFS" />
<setting name="lazyLoadingEnabled" value="true" />
<setting name="aggressiveLazyLoading" value="false" />
</settings>
<typeAliases>
<package name="com.xiaojukeji.kafka.manager.common.entity"/>
</typeAliases>
</configuration>

View File

@@ -0,0 +1,95 @@
<!--<?xml version="1.0" encoding="UTF-8"?>-->
<!--<beans xmlns="http://www.springframework.org/schema/beans"-->
<!--xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"-->
<!--xmlns:context="http://www.springframework.org/schema/context"-->
<!--xmlns:tx="http://www.springframework.org/schema/tx"-->
<!--xsi:schemaLocation="http://www.springframework.org/schema/beans-->
<!--http://www.springframework.org/schema/beans/spring-beans-3.0.xsd-->
<!--http://www.springframework.org/schema/context-->
<!--http://www.springframework.org/schema/context/spring-context-3.0.xsd-->
<!--http://www.springframework.org/schema/tx-->
<!--http://www.springframework.org/schema/tx/spring-tx-3.0.xsd-->
<!--http://www.springframework.org/schema/aop-->
<!--http://www.springframework.org/schema/aop/spring-aop-3.0.xsd-->
<!--">-->
<!--<context:component-scan base-package="com.xiaojukeji.kafka.manager.dao.*" />-->
<!--&lt;!&ndash; 第一个数据源 &ndash;&gt;-->
<!--<bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close">-->
<!--&lt;!&ndash; 基本属性 url、user、password &ndash;&gt;-->
<!--<property name="url" value="${spring.datasource.kafka-manager.url}" />-->
<!--<property name="username" value="${spring.datasource.kafka-manager.username}" />-->
<!--<property name="password" value="${spring.datasource.kafka-manager.password}" />-->
<!--&lt;!&ndash; 配置初始化大小、最小、最大 &ndash;&gt;-->
<!--<property name="initialSize" value="2" />-->
<!--<property name="minIdle" value="2" />-->
<!--<property name="maxActive" value="10" />-->
<!--&lt;!&ndash; 配置获取连接等待超时的时间 &ndash;&gt;-->
<!--<property name="maxWait" value="60000" />-->
<!--&lt;!&ndash; 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒 &ndash;&gt;-->
<!--<property name="timeBetweenEvictionRunsMillis" value="60000" />-->
<!--&lt;!&ndash; 配置一个连接在池中最小生存的时间,单位是毫秒 &ndash;&gt;-->
<!--<property name="minEvictableIdleTimeMillis" value="120000" />-->
<!--<property name="validationQuery" value="SELECT 1" />-->
<!--<property name="testWhileIdle" value="true" />-->
<!--<property name="testOnBorrow" value="false" />-->
<!--<property name="testOnReturn" value="false" />-->
<!--&lt;!&ndash; 打开PSCache并且指定每个连接上PSCache的大小 &ndash;&gt;-->
<!--<property name="poolPreparedStatements" value="false" />-->
<!--<property name="maxPoolPreparedStatementPerConnectionSize" value="20" />-->
<!--&lt;!&ndash; 配置监控统计拦截的filters &ndash;&gt;-->
<!--<property name="filters" value="stat" />-->
<!--<property name="proxyFilters">-->
<!--<list>-->
<!--<ref bean="log-filter"/>-->
<!--</list>-->
<!--</property>-->
<!--</bean>-->
<!--<bean id="log-filter" class="com.alibaba.druid.filter.logging.Log4j2Filter">-->
<!--&lt;!&ndash; 所有连接相关的日志 &ndash;&gt;-->
<!--<property name="connectionLogEnabled" value="false"/>-->
<!--&lt;!&ndash; 所有Statement相关的日志 &ndash;&gt;-->
<!--<property name="statementLogEnabled" value="false"/>-->
<!--&lt;!&ndash; 是否显示结果集 &ndash;&gt;-->
<!--<property name="resultSetLogEnabled" value="true"/>-->
<!--&lt;!&ndash; 是否显示SQL语句 &ndash;&gt;-->
<!--<property name="statementExecutableSqlLogEnable" value="true"/>-->
<!--</bean>-->
<!--&lt;!&ndash; Sql会话工厂 &ndash;&gt;-->
<!--<bean id="sqlSessionFactory" class="org.mybatis.spring.SqlSessionFactoryBean">-->
<!--<property name="dataSource" ref="dataSource" />-->
<!--<property name="mapperLocations" value="classpath:mapper/*.xml" />-->
<!--<property name="configLocation" value="classpath:mybatis-config.xml" />-->
<!--</bean>-->
<!--&lt;!&ndash; Sql会话对象 &ndash;&gt;-->
<!--<bean id="sqlSession" class="org.mybatis.spring.SqlSessionTemplate">-->
<!--<constructor-arg index="0" ref="sqlSessionFactory" />-->
<!--</bean>-->
<!--<bean id="transactionManager"-->
<!--class="org.springframework.jdbc.datasource.DataSourceTransactionManager">-->
<!--<property name="dataSource" ref="dataSource" />-->
<!--</bean>-->
<!--<bean id="transactionTemplate"-->
<!--class="org.springframework.transaction.support.TransactionTemplate">-->
<!--<property name="transactionManager" ref="transactionManager" />-->
<!--</bean>-->
<!--<tx:annotation-driven transaction-manager="transactionManager" />-->
<!--</beans>-->