monitor、openapi、account模块下的单元测试

This commit is contained in:
didi
2022-01-07 11:43:31 +08:00
parent 61672637dc
commit ec19c3b4dd
21 changed files with 2276 additions and 20 deletions

View File

@@ -0,0 +1,148 @@
package com.xiaojukeji.kafka.manager.openapi;
import com.xiaojukeji.kafka.manager.common.bizenum.ConsumeHealthEnum;
import com.xiaojukeji.kafka.manager.common.entity.Result;
import com.xiaojukeji.kafka.manager.common.entity.ResultStatus;
import com.xiaojukeji.kafka.manager.common.entity.ao.PartitionOffsetDTO;
import com.xiaojukeji.kafka.manager.common.entity.pojo.ClusterDO;
import com.xiaojukeji.kafka.manager.openapi.common.dto.OffsetResetDTO;
import com.xiaojukeji.kafka.manager.openapi.config.BaseTest;
import com.xiaojukeji.kafka.manager.service.service.ConsumerService;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
import org.springframework.beans.factory.annotation.Autowired;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import java.lang.reflect.Array;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* @author wyc
* @date 2022/1/6
*/
public class ThirdPartServiceTest extends BaseTest {
private final static Long REAL_CLUSTER_ID_IN_MYSQL = 1L;
private final static String REAL_TOPIC_IN_ZK = "topic_a";
private final static String REAL_PHYSICAL_CLUSTER_NAME = "cluster1";
private final static String ZOOKEEPER = "10.190.12.242:2181,10.190.25.160:2181,10.190.25.41:2181/wyc";
private final static String BOOTSTRAP_SERVERS = "10.190.12.242:9093,10.190.25.160:9093,10.190.25.41:9093";
private final static String SECURITY_PROPERTIES = "{ \t\"security.protocol\": \"SASL_PLAINTEXT\", \t\"sasl.mechanism\": \"PLAIN\", \t\"sasl.jaas.config\": \"org.apache.kafka.common.security.plain.PlainLoginModule required username=\\\"dkm_admin\\\" password=\\\"km_kMl4N8as1Kp0CCY\\\";\" }";
private final static String JMX_PROPERTIES = "{\n" + "\t\"maxConn\": 100000\n" + "}";
private final static Integer STATUS = 1;
private final static String REAL_APP_ID = "dkm_admin";
// 要求消费topic_a这个topic的消费者所属的消费者组是group.demo
private final static String REAL_CONSUMER_GROUP_ID = "group.demo";
@Autowired
@InjectMocks
private ThirdPartService thirdPartService;
@Mock
private ConsumerService consumerService;
@BeforeMethod
public void init() {
MockitoAnnotations.initMocks(this);
}
private ClusterDO getClusterDO() {
ClusterDO clusterDO = new ClusterDO();
clusterDO.setId(REAL_CLUSTER_ID_IN_MYSQL);
clusterDO.setClusterName(REAL_PHYSICAL_CLUSTER_NAME);
clusterDO.setBootstrapServers(BOOTSTRAP_SERVERS);
clusterDO.setJmxProperties(JMX_PROPERTIES);
clusterDO.setSecurityProperties(SECURITY_PROPERTIES);
clusterDO.setStatus(STATUS);
clusterDO.setZookeeper(ZOOKEEPER);
return clusterDO;
}
private PartitionOffsetDTO getPartitionOffsetDTO() {
PartitionOffsetDTO dto = new PartitionOffsetDTO();
dto.setPartitionId(0);
dto.setTimestamp(0L);
dto.setOffset(0L);
return dto;
}
private OffsetResetDTO getOffsetResetDTO() {
OffsetResetDTO dto = new OffsetResetDTO();
dto.setAppId(REAL_APP_ID);
dto.setClusterId(REAL_CLUSTER_ID_IN_MYSQL);
dto.setConsumerGroup(REAL_CONSUMER_GROUP_ID);
dto.setTopicName(REAL_TOPIC_IN_ZK);
dto.setTimestamp(0L);
dto.setPartitionOffsetDTOList(new ArrayList<>(Arrays.asList(getPartitionOffsetDTO())));
dto.setLocation("broker");
return dto;
}
@Test(description = "CLUSTER_NOT_EXIST")
public void checkConsumeHealth2ClusterNotExistTest() {
// maxDelayTime = 24h,也就是如果消费组当前的offset介于24小时前这一时间戳对应的offset之后则认为消费者是健康的
Assert.assertEquals(thirdPartService.checkConsumeHealth(-1L, REAL_TOPIC_IN_ZK, REAL_CONSUMER_GROUP_ID, 60 * 60 * 24 * 1000L).toString(), Result.buildFrom(ResultStatus.CLUSTER_NOT_EXIST).toString());
}
@Test(description = "TOPIC_NOT_EXIST")
public void checkConsumeHealth2TopicNotExistTest() {
Assert.assertEquals(thirdPartService.checkConsumeHealth(1L, "topic_not_exist", REAL_CONSUMER_GROUP_ID, 60 * 60 * 24 * 1000L).toString(), Result.buildFrom(ResultStatus.TOPIC_NOT_EXIST).toString());
}
@Test(description = "CONSUMER_GROUP_NOT_EXIST")
public void checkConsumeHealth2ConsumerGroupNotExistTest() {
Assert.assertEquals(thirdPartService.checkConsumeHealth(REAL_CLUSTER_ID_IN_MYSQL, REAL_TOPIC_IN_ZK, "group_not_exist", 60 * 60 * 24 * 1000L).toString(), Result.buildFrom(ResultStatus.CONSUMER_GROUP_NOT_EXIST).toString());
}
@Test(description = "HEALTH")
public void checkConsumeHealth2HealthTest() {
// 要求生产者向topic_a发送消息消费者的group.id=group.demo,生产者生产消息,消费者消费,之后让消费者停止,下面测试才能通过
Assert.assertEquals(thirdPartService.checkConsumeHealth(REAL_CLUSTER_ID_IN_MYSQL, REAL_TOPIC_IN_ZK, REAL_CONSUMER_GROUP_ID, 60 * 60 * 24 * 1000L).toString(), new Result<>(ConsumeHealthEnum.HEALTH).toString());
}
@Test
public void resetOffsetsTest() {
ClusterDO clusterDO = getClusterDO();
OffsetResetDTO offsetResetDTO = getOffsetResetDTO();
Mockito.when(consumerService.checkConsumerGroupExist(Mockito.any(), Mockito.any(), Mockito.anyString(),Mockito.anyString())).thenReturn(true);
List<Result> results = thirdPartService.resetOffsets(clusterDO, offsetResetDTO);
System.out.println(results);
}
@Test
public void resetOffset2NullTest1() {
ClusterDO clusterDO = getClusterDO();
Assert.assertNull(thirdPartService.resetOffsets(clusterDO, null));
}
@Test
public void resetOffsetSuccessTest() {
// 要求有消费组group.demo
Result expectedResult = Result.buildSuc();
ClusterDO clusterDO = getClusterDO();
OffsetResetDTO offsetResetDTO = getOffsetResetDTO();
Mockito.when(consumerService.checkConsumerGroupExist(Mockito.any(), Mockito.any(), Mockito.anyString(),Mockito.anyString())).thenReturn(true);
Mockito.when(consumerService.resetConsumerOffset(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any())).thenReturn(new ArrayList<>(Arrays.asList(Result.buildSuc())));
List<Result> results = thirdPartService.resetOffsets(clusterDO, offsetResetDTO);
Assert.assertTrue(!results.isEmpty() && results.stream().allMatch(result -> result.toString().equals(expectedResult.toString())));
}
}

View File

@@ -0,0 +1,11 @@
package com.xiaojukeji.kafka.manager.openapi.config;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.testng.AbstractTransactionalTestNGSpringContextTests;
@SpringBootTest(classes = CoreSpringBootStartUp.class, webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT)
@ContextConfiguration(classes = CoreSpringBootStartUp.class)
public class BaseTest extends AbstractTransactionalTestNGSpringContextTests {
}

View File

@@ -0,0 +1,21 @@
package com.xiaojukeji.kafka.manager.openapi.config;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.boot.web.servlet.ServletComponentScan;
import org.springframework.scheduling.annotation.EnableAsync;
import org.springframework.scheduling.annotation.EnableScheduling;
@EnableAsync
@EnableScheduling
@ServletComponentScan
@EnableAutoConfiguration
@SpringBootApplication(scanBasePackages = {"com.xiaojukeji.kafka.manager"})
public class CoreSpringBootStartUp {
public static void main(String[] args) {
SpringApplication sa = new SpringApplication(CoreSpringBootStartUp.class);
sa.run(args);
}
}

View File

@@ -0,0 +1,51 @@
package com.xiaojukeji.kafka.manager.openapi.config;
import org.apache.ibatis.session.SqlSessionFactory;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.jdbc.DataSourceBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import javax.sql.DataSource;
/**
* @author zengqiao
* @date 20/3/17
*/
@Configuration
public class DataSourceConfig {
@Bean(name = "dataSource")
@ConfigurationProperties(prefix = "spring.datasource.kafka-manager")
@Primary
public DataSource dataSource() {
return DataSourceBuilder.create().build();
}
@Bean(name = "sqlSessionFactory")
@Primary
public SqlSessionFactory sqlSessionFactory(@Qualifier("dataSource") DataSource dataSource) throws Exception {
SqlSessionFactoryBean bean = new SqlSessionFactoryBean();
bean.setDataSource(dataSource);
bean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources("classpath:mapper/*.xml"));
bean.setConfigLocation(new PathMatchingResourcePatternResolver().getResource("classpath:mybatis-config.xml"));
return bean.getObject();
}
@Bean(name = "transactionManager")
@Primary
public DataSourceTransactionManager transactionManager(@Qualifier("dataSource") DataSource dataSource) {
return new DataSourceTransactionManager(dataSource);
}
@Bean(name = "sqlSession")
@Primary
public SqlSessionTemplate sqlSessionTemplate(@Qualifier("sqlSessionFactory") SqlSessionFactory sqlSessionFactory) throws Exception {
return new SqlSessionTemplate(sqlSessionFactory);
}
}

View File

@@ -0,0 +1,98 @@
server:
port: 8080
tomcat:
accept-count: 1000
max-connections: 10000
max-threads: 800
min-spare-threads: 100
spring:
application:
name: kafkamanager
profiles:
active: dev
datasource:
kafka-manager:
jdbc-url: jdbc:mysql://10.190.7.220:3306/user_test?characterEncoding=UTF-8&useSSL=false&serverTimezone=GMT%2B8
username: root
password: 2PHCnL6RRM
driver-class-name: com.mysql.cj.jdbc.Driver
main:
allow-bean-definition-overriding: true
servlet:
multipart:
max-file-size: 100MB
max-request-size: 100MB
logging:
config: classpath:logback-spring.xml
custom:
idc: cn
jmx:
max-conn: 10 # 2.3版本配置不在这个地方生效
store-metrics-task:
community:
broker-metrics-enabled: true
topic-metrics-enabled: true
didi:
app-topic-metrics-enabled: false
topic-request-time-metrics-enabled: false
topic-throttled-metrics: false
save-days: 7
# 任务相关的开关
task:
op:
sync-topic-enabled: false # 未落盘的Topic定期同步到DB中
order-auto-exec: # 工单自动化审批线程的开关
topic-enabled: false # Topic工单自动化审批开关, false:关闭自动化审批, true:开启
app-enabled: false # App工单自动化审批开关, false:关闭自动化审批, true:开启
account:
ldap:
enabled: false
url: ldap://127.0.0.1:389/
basedn: dc=tsign,dc=cn
factory: com.sun.jndi.ldap.LdapCtxFactory
filter: sAMAccountName
security:
authentication: simple
principal: cn=admin,dc=tsign,dc=cn
credentials: admin
auth-user-registration: true
auth-user-registration-role: normal
kcm:
enabled: false
s3:
endpoint: s3.didiyunapi.com
access-key: 1234567890
secret-key: 0987654321
bucket: logi-kafka
n9e:
base-url: http://127.0.0.1:8004
user-token: 12345678
timeout: 300
account: root
script-file: kcm_script.sh
monitor:
enabled: false
n9e:
nid: 2
user-token: 1234567890
mon:
base-url: http://127.0.0.1:8000 # 夜莺v4版本默认端口统一调整为了8000
sink:
base-url: http://127.0.0.1:8000 # 夜莺v4版本默认端口统一调整为了8000
rdb:
base-url: http://127.0.0.1:8000 # 夜莺v4版本默认端口统一调整为了8000
notify:
kafka:
cluster-id: 95
topic-name: didi-kafka-notify
order:
detail-url: http://127.0.0.1

View File

@@ -0,0 +1,63 @@
<assembly>
<id>assembly</id>
<formats>
<format>tar</format>
<format>zip</format>
</formats>
<fileSets>
<fileSet>
<directory>src/main/resources/bin</directory>
<outputDirectory>bin</outputDirectory>
<includes>
<include>control.sh</include>
<include>start.bat</include>
</includes>
<fileMode>0755</fileMode>
</fileSet>
<fileSet>
<directory>src/main/resources</directory>
<outputDirectory>config</outputDirectory>
<includes>
<include>*.properties</include>
<include>*.xml</include>
<include>*.yml</include>
<include>env/dev/*</include>
<include>env/qa/*</include>
<include>env/uat/*</include>
<include>env/prod/*</include>
</includes>
</fileSet>
<fileSet>
<directory>target</directory>
<outputDirectory>lib</outputDirectory>
<includes>
<!--
<include>*release*.jar</include>
-->
<include>kafka-manager-web*.jar</include>
</includes>
<excludes>
<exclude>*sources.jar</exclude>
</excludes>
</fileSet>
<fileSet>
<directory>src/main/resources</directory>
<outputDirectory>logs</outputDirectory>
<fileMode>0755</fileMode>
<excludes>
<exclude>**/*</exclude>
</excludes>
</fileSet>
<!-- <fileSet>
<directory>${project.build.directory}/asciidoc</directory>
<outputDirectory>docs</outputDirectory>
<includes>
<include>md/*</include>
<include>html/*</include>
<include>pdf/*</include>
</includes>
<fileMode>0755</fileMode>
</fileSet>-->
</fileSets>
</assembly>

View File

@@ -0,0 +1,215 @@
<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="10 seconds">
<contextName>logback</contextName>
<property name="log.path" value="./logs" />
<!-- 彩色日志 -->
<!-- 彩色日志依赖的渲染类 -->
<conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter" />
<conversionRule conversionWord="wex" converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter" />
<conversionRule conversionWord="wEx" converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter" />
<!-- 彩色日志格式 -->
<property name="CONSOLE_LOG_PATTERN" value="${CONSOLE_LOG_PATTERN:-%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
<!--输出到控制台-->
<appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>info</level>
</filter>
<encoder>
<Pattern>${CONSOLE_LOG_PATTERN}</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<!--输出到文件-->
<!-- 时间滚动输出 level为 DEBUG 日志 -->
<appender name="DEBUG_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/log_debug.log</file>
<!--日志文件输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 设置字符集 -->
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 日志归档 -->
<fileNamePattern>${log.path}/log_debug_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>7</maxHistory>
</rollingPolicy>
<!-- 此日志文件只记录debug级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>debug</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- 时间滚动输出 level为 INFO 日志 -->
<appender name="INFO_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${log.path}/log_info.log</file>
<!--日志文件输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<!-- 每天日志归档路径以及格式 -->
<fileNamePattern>${log.path}/log_info_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>7</maxHistory>
</rollingPolicy>
<!-- 此日志文件只记录info级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>info</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- 时间滚动输出 level为 WARN 日志 -->
<appender name="WARN_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${log.path}/log_warn.log</file>
<!--日志文件输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 此处设置字符集 -->
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/log_warn_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>7</maxHistory>
</rollingPolicy>
<!-- 此日志文件只记录warn级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>warn</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- 时间滚动输出 level为 ERROR 日志 -->
<appender name="ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
<!-- 正在记录的日志文件的路径及文件名 -->
<file>${log.path}/log_error.log</file>
<!--日志文件输出格式-->
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset> <!-- 此处设置字符集 -->
</encoder>
<!-- 日志记录器的滚动策略,按日期,按大小记录 -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/log_error_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<!--日志文件保留天数-->
<maxHistory>7</maxHistory>
</rollingPolicy>
<!-- 此日志文件只记录ERROR级别的 -->
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
</appender>
<!-- Metrics信息收集日志 -->
<appender name="COLLECTOR_METRICS_LOGGER" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/metrics/collector_metrics.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/metrics/collector_metrics_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<maxHistory>3</maxHistory>
</rollingPolicy>
</appender>
<!-- Metrics信息收集日志 -->
<appender name="API_METRICS_LOGGER" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/metrics/api_metrics.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/metrics/api_metrics_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<maxHistory>3</maxHistory>
</rollingPolicy>
</appender>
<!-- Metrics信息收集日志 -->
<appender name="SCHEDULED_TASK_LOGGER" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${log.path}/metrics/scheduled_tasks.log</file>
<encoder>
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50} - %msg%n</pattern>
<charset>UTF-8</charset>
</encoder>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${log.path}/metrics/scheduled_tasks_%d{yyyy-MM-dd}.%i.log</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<maxFileSize>100MB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
<maxHistory>5</maxHistory>
</rollingPolicy>
</appender>
<logger name="COLLECTOR_METRICS_LOGGER" level="DEBUG" additivity="false">
<appender-ref ref="COLLECTOR_METRICS_LOGGER"/>
</logger>
<logger name="API_METRICS_LOGGER" level="DEBUG" additivity="false">
<appender-ref ref="API_METRICS_LOGGER"/>
</logger>
<logger name="SCHEDULED_TASK_LOGGER" level="DEBUG" additivity="false">
<appender-ref ref="SCHEDULED_TASK_LOGGER"/>
</logger>
<logger name="org.apache.ibatis" level="INFO" additivity="false" />
<logger name="org.mybatis.spring" level="INFO" additivity="false" />
<logger name="com.github.miemiedev.mybatis.paginator" level="INFO" additivity="false" />
<root level="info">
<appender-ref ref="CONSOLE" />
<appender-ref ref="DEBUG_FILE" />
<appender-ref ref="INFO_FILE" />
<appender-ref ref="WARN_FILE" />
<appender-ref ref="ERROR_FILE" />
<!--<appender-ref ref="METRICS_LOG" />-->
</root>
<!--生产环境:输出到文件-->
<!--<springProfile name="pro">-->
<!--<root level="info">-->
<!--<appender-ref ref="CONSOLE" />-->
<!--<appender-ref ref="DEBUG_FILE" />-->
<!--<appender-ref ref="INFO_FILE" />-->
<!--<appender-ref ref="ERROR_FILE" />-->
<!--<appender-ref ref="WARN_FILE" />-->
<!--</root>-->
<!--</springProfile>-->
</configuration>