logback-spring.xml

本文详细介绍了Logback日志框架的配置方法,包括不同输出渠道的设置、日志级别控制及滚动策略等。特别关注了Kafka作为日志收集器的应用场景。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

<?xml version="1.0" encoding="UTF-8"?>
<configuration debug="false">

    <define name="hostname" class="com.test.core.log.property.CanonicalHostNamePropertyDefiner"/>
    <conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter"/>
    <conversionRule conversionWord="wex"
                    converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter"/>
    <conversionRule conversionWord="wEx"
                    converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter"/>
    <property name="CONSOLE_LOG_PATTERN"
              value="${CONSOLE_LOG_PATTERN:-%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) [${hostname}]  %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %L %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>

    <property name="PROJECT_NAME" value="test-service"/>

    <!--定义日志文件的存储地址 勿在 LogBack 的配置中使用相对路径 /app/sv/logs -->
    <springProperty scope="context" name="LOG_HOME" source="log.home" defaultValue="/usr/src/${PROJECT_NAME}/log"/>
    <springProperty scope="context" name="LOG_SERVERS" source="log.kafka.servers" defaultValue=""/>
    <!--    <springProperty scope="context" name="APP_LEVEL" source="log.app.level" defaultValue="INFO"/>
        <springProperty scope="context" name="APP_APPENDER" source="log.app.appender" defaultValue="APPLICATION-APPENDER"/>-->
    <springProperty scope="context" name="ROOT_LEVEL" source="log.root.level" defaultValue="INFO"/>
    <springProperty scope="context" name="ROOT_APPENDER" source="log.root.appender" defaultValue="CONSOLE"/>
    <springProperty scope="context" name="ACCESS_LEVEL" source="log.access.level" defaultValue="INFO"/>
    <springProperty scope="context" name="ACCESS-APPENDER" source="log.access.appender" defaultValue="CONSOLE"/>
    <springProperty scope="context" name="PROFILE_ACTIVE" source="spring.profiles.active" defaultValue="default"/>

    <!-- 控制台输出 -->
    <appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <!--格式化输出:%d表示日期,%thread表示线程名,%-5level:级别从左显示5个字符宽度%msg:日志消息,%n是换行符 -->
            <pattern>${CONSOLE_LOG_PATTERN}</pattern>
            <charset>utf-8</charset>
        </encoder>
    </appender>

    <appender name="STDOUT"
              class="ch.qos.logback.core.rolling.RollingFileAppender">
        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
            <!--日志文件输出的文件名 -->
            <FileNamePattern>${LOG_HOME}/stdout.%d{yyyy-MM-dd}-%i.log</FileNamePattern>
            <!--日志文件保留天数 -->
            <MaxHistory>30</MaxHistory>
            <maxFileSize>500MB</maxFileSize>
        </rollingPolicy>
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <ImmediateFlush>true</ImmediateFlush>
            <!--格式化输出:%d表示日期,%thread表示线程名,%-5level:级别从左显示5个字符宽度%msg:日志消息,%n是换行符 -->
            <!--<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %-5level [%thread] %logger{50} - %msg%n</pattern> -->
            <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %-5level [${hostname}] [%thread] %logger{50} %L %X -%msg%n</pattern>
        </encoder>
    </appender>

    <!-- APPLICATION-APPENDER 当前应用的日志 -->
<!--    <appender name="APPLICATION-APPENDER"
              class="ch.qos.logback.core.rolling.RollingFileAppender">
        <rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
            <FileNamePattern>${LOG_HOME}/application.%d{yyyy-MM-dd}-%i.log</FileNamePattern>
            <MaxHistory>30</MaxHistory>
            <maxFileSize>500MB</maxFileSize>
        </rollingPolicy>
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <ImmediateFlush>true</ImmediateFlush>
            <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %-5level [%thread] %logger{50} %L %X -%msg%n</pattern>
        </encoder>
    </appender>-->

    <appender name="KAFKA-APPENDER" class="com.github.danielwegener.logback.kafka.KafkaAppender">
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %-5level [${hostname}] [%thread] %logger{50} %L %X -%msg%n</pattern>
        </encoder>
        <topic>${PROJECT_NAME}.log_${PROFILE_ACTIVE}</topic>
        <!-- we don't care how the log messages will be partitioned  -->
        <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.HostNameKeyingStrategy" />

        <!-- use async delivery. the application threads are not blocked by logging -->
        <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />

        <!-- each <producerConfig> translates to regular kafka-client config (format: key=value) -->
        <!-- producer configs are documented here: https://kafka.apache.org/documentation.html#newproducerconfigs -->
        <!-- bootstrap.servers is the only mandatory producerConfig -->
        <producerConfig>bootstrap.servers=${LOG_SERVERS}</producerConfig>
        <!-- don't wait for a broker to ack the reception of a batch.  -->
        <producerConfig>acks=0</producerConfig>
        <!-- wait up to 1000ms and collect log messages before sending them as a batch -->
        <producerConfig>linger.ms=1000</producerConfig>
        <!-- even if the producer buffer runs full, do not block the application but start to drop messages -->
        <producerConfig>max.block.ms=0</producerConfig>
        <!-- define a client-id that you use to identify yourself against the kafka broker -->
        <!--<producerConfig>client.id=${HOSTNAME}-${CONTEXT_NAME}-logback-relaxed</producerConfig>-->
    </appender>

    <appender name="AsyncKafkaAppender" class="ch.qos.logback.classic.AsyncAppender">
        <discardingThreshold>20</discardingThreshold>
        <queueSize>2000</queueSize>
        <includeCallerData>false</includeCallerData>
        <neverBlock>true</neverBlock>
        <appender-ref ref="KAFKA-APPENDER"/>
    </appender>

<!--access模块用于记录容器的访问日志-->
    <appender name="KAFKA-ACCESS-APPENDER" class="com.github.danielwegener.logback.kafka.KafkaAppender">
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %-5level [${hostname}] [%thread] %logger{50} %L %X -%msg%n</pattern>
        </encoder>
        <topic>${PROJECT_NAME}.access_${PROFILE_ACTIVE}</topic>
        <!-- we don't care how the log messages will be partitioned  -->
        <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.HostNameKeyingStrategy" />

        <!-- use async delivery. the application threads are not blocked by logging -->
        <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />

        <!-- each <producerConfig> translates to regular kafka-client config (format: key=value) -->
        <!-- producer configs are documented here: https://kafka.apache.org/documentation.html#newproducerconfigs -->
        <!-- bootstrap.servers is the only mandatory producerConfig -->
        <producerConfig>bootstrap.servers=${LOG_SERVERS}</producerConfig>
        <!-- don't wait for a broker to ack the reception of a batch.  -->
        <producerConfig>acks=0</producerConfig>
        <!-- wait up to 1000ms and collect log messages before sending them as a batch -->
        <producerConfig>linger.ms=1000</producerConfig>
        <!-- even if the producer buffer runs full, do not block the application but start to drop messages -->
        <producerConfig>max.block.ms=0</producerConfig>
        <!-- define a client-id that you use to identify yourself against the kafka broker -->
        <!--<producerConfig>client.id=${HOSTNAME}-${CONTEXT_NAME}-logback-relaxed</producerConfig>-->
    </appender>

    <appender name="AsyncKafkaAccessAppender" class="ch.qos.logback.classic.AsyncAppender">
        <discardingThreshold>20</discardingThreshold>
        <queueSize>2000</queueSize>
        <includeCallerData>false</includeCallerData>
        <neverBlock>true</neverBlock>
        <appender-ref ref="KAFKA-ACCESS-APPENDER"/>
    </appender>

    <!--com.test 包路径 additivity: 表示是否 此logger的打印信息不再向上级传递 )-->
    <logger name="com.test" level="${ACCESS_LEVEL}" additivity="false">
        <appender-ref ref="${ACCESS-APPENDER}"/>
    </logger>

    <!-- 日志输出级别-->
    <root level="${ROOT_LEVEL}">
        <appender-ref ref="CONSOLE"/>
        <appender-ref ref="${ROOT_APPENDER}"/>
    </root>
</configuration>

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值