当前位置: 首页 > 工具软件 > Spring Kafka > 使用案例 >

logback-spring kafka日志输出到kafka

晏晨朗
2023-12-01

logback-spring.xml

<?xml version="1.0" encoding="UTF-8"?>
<!-- 从高到地低 OFF 、 FATAL 、 ERROR 、 WARN 、 INFO 、 DEBUG 、 TRACE 、 ALL -->
<!-- 日志输出规则 根据当前ROOT 级别,日志输出时,级别高于root默认的级别时 会输出 -->
<!-- 以下 每个配置的 filter 是过滤掉输出文件里面,会出现高级别文件,依然出现低级别的日志信息,通过filter 过滤只记录本级别的日志 -->
<!-- 属性描述 scan:性设置为true时,配置文件如果发生改变,将会被重新加载,默认值为true scanPeriod:设置监测配置文件是否有修改的时间间隔,如果没有给出时间单位,默认单位是毫秒。当scan为true时,此属性生效。默认的时间间隔为1分钟。
	debug:当此属性设置为true时,将打印出logback内部日志信息,实时查看logback运行状态。默认值为false。 -->
<configuration scan="true" scanPeriod="60 seconds" debug="false">
    <!-- 定义日志文件 输入位置 -->
    <!-- 日志最大的历史 15天 -->
    <property name="maxHistory" value="30"/>
    <springProperty scope="context" name="log.path" source="logging.path" defaultValue="./logs"/>
    <springProperty scope="context" name="maxFileSize" source="logging.file.max-size" defaultValue="50MB"/>
    <springProperty scope="context" name="sentryEnable" source="sentry.enable" defaultValue="false"/>
    <springProperty scope="context" name="sentryDsn" source="sentry.dsn" defaultValue="" />
    <springProperty scope="context" name="applicationName" source="spring.application.name"/>
    <springProperty scope="context" name="serverIp" source="spring.cloud.client.ip-address"/>
    <springProperty scope="context" name="env" source="spring.profiles.active" defaultValue="local"/>
    <springProperty scope="context" name="elkEnable" source="elk.enable" defaultValue="false" />
    <springProperty scope="context" name="elkServers" source="elk.kafka.servers" defaultValue="localhost:9092" />
    <!--    <springProperty scope="context" name="elkHost" source="elk.kafka.host" defaultValue="localhost" />-->
    <!--    <springProperty scope="context" name="elkPort" source="elk.kafka.port" defaultValue="19092" />-->
    <springProperty scope="context" name="elkTopic" source="elk.kafka.topic" defaultValue="logback" />
    <contextName>${applicationName}</contextName>
    <property name="pattern"
              value='%d{yyyy-MM-dd HH:mm:ss.SSS} [${env}] [${serverIp}] [%thread] {"SERVICE":"${applicationName}","X-B3-SpanId":"%X{X-B3-SpanId}","X-B3-TraceId":"%X{X-B3-TraceId}","X-B3-ParentSpanId":"%X{X-B3-ParentSpanId}"} [%level] %logger{30}.%method:%line - %msg%n'/>

    <!-- ConsoleAppender 控制台输出日志 -->
    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
        <!-- 对日志进行格式化 -->
        <encoder>
            <pattern>${pattern}</pattern>
        </encoder>
    </appender>

    <if condition='property("sentryEnable").contains("true")'>
        <then>
            <appender name="Sentry" class="com.getsentry.raven.logback.SentryAppender">
                <dsn>${sentryDsn}</dsn>
                <!-- 过滤器,只记录ERROR级别以上的日志 -->
                <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
                    <level>WARN</level>
                </filter>
            </appender>
        </then>
    </if>

    <if condition='property("elkEnable").contains("true")'>
        <then>
            <appender name="kafkaAppender" class="com.github.danielwegener.logback.kafka.KafkaAppender">
                <encoder>
                    <pattern>${pattern}</pattern>
                </encoder>
                <topic>${elkTopic}</topic>


                <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy" />
                <!--
                <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.ContextNameKeyingStrategy"/>
                -->
                <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy" />

                <!-- Optional parameter to use a fixed partition -->
                <!-- <partition>0</partition> -->

                <!-- Optional parameter to include log timestamps into the kafka message -->
                <!-- <appendTimestamp>true</appendTimestamp> -->

                <!-- each <producerConfig> translates to regular kafka-client config (format: key=value) -->
                <!-- producer configs are documented here: https://kafka.apache.org/documentation.html#newproducerconfigs -->
                <!-- bootstrap.servers is the only mandatory producerConfig -->
                <!--                <producerConfig>bootstrap.servers=${elkHost}:${elkPort}</producerConfig>-->
                <producerConfig>bootstrap.servers=${elkServers}</producerConfig>
                <!-- this is the fallback appender if kafka is not available. -->
                <appender-ref ref="STDOUT" />
            </appender>

            <appender name="kafkaAsync" class="ch.qos.logback.classic.AsyncAppender">
                <!-- 不丢失日志.默认的,如果队列的80%已满,则会丢弃TRACT、DEBUG、INFO级别的日志 -->
                <discardingThreshold>0</discardingThreshold>
                <!-- 更改默认的队列的深度,该值会影响性能.默认值为256 -->
                <queueSize>65536</queueSize>
                <!-- 新增这行为了打印栈堆信息 -->
                <includeCallerData>true</includeCallerData>
                <neverBlock>true</neverBlock>
                <appender-ref ref="kafkaAppender" />
            </appender>
        </then>
    </if>

    <logger name="com" level="INFO" additivity="false">
        <if condition='property("sentryEnable").contains("true")'>
            <then>
                <appender-ref ref="Sentry"/>
            </then>
        </if>
        <if condition='property("elkEnable").contains("true")'>
            <then>
                <appender-ref ref="kafkaAsync" />
            </then>
            <!--            <then>-->
            <!--                <appender-ref ref="kafkaAppender"/>-->
            <!--            </then>-->
        </if>
        <appender-ref ref="STDOUT"/>
    </logger>


    <logger name="org" level="INFO" additivity="false">
        <!--<appender-ref ref="ASYNC_INFO"/>
        <appender-ref ref="ERROR"/>-->
        <if condition='property("sentryEnable").contains("true")'>
            <then>
                <appender-ref ref="Sentry"/>
            </then>
        </if>
        <if condition='property("elkEnable").contains("true")'>
            <then>
                <appender-ref ref="kafkaAsync" />
            </then>
            <!--            <then>-->
            <!--                <appender-ref ref="kafkaAppender"/>-->
            <!--            </then>-->
        </if>
        <appender-ref ref="STDOUT"/>
    </logger>

    <logger name="MONITOR-LOGGER" level="ERROR" additivity="false">
        <!--<appender-ref ref="MONITOR-APPENDER"/>-->
        <if condition='property("sentryEnable").contains("true")'>
            <then>
                <appender-ref ref="Sentry"/>
            </then>
        </if>
        <if condition='property("elkEnable").contains("true")'>
            <then>
                <appender-ref ref="kafkaAsync" />
            </then>
            <!--            <then>-->
            <!--                <appender-ref ref="kafkaAppender"/>-->
            <!--            </then>-->
        </if>
    </logger>


    <!-- 统一日志输出 -->

    <appender name="InfoFile" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <append>true</append>
        <file>${log.path}/${applicationName}-info.log</file>

        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <fileNamePattern>${log.path}/%d{yyyy-MM-dd}/${applicationName}-info.%d{yyyy-MM-dd.HH}-%i.log.gz</fileNamePattern>
            <maxHistory>30</maxHistory>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <maxFileSize>${maxFileSize}</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
        </rollingPolicy>
        <encoder>
            <pattern>${pattern}</pattern>
        </encoder>
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>ERROR</level>
            <onMatch>DENY</onMatch>
            <onMismatch>ACCEPT</onMismatch>
        </filter>
    </appender>

    <appender name="ErrorFile" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <append>true</append>
        <file>${log.path}/${applicationName}-error.log</file>
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <fileNamePattern>${log.path}/%d{yyyy-MM-dd}/${applicationName}-error.%d{yyyy-MM-dd.HH}-%i.log.gz</fileNamePattern>
            <maxHistory>30</maxHistory>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <maxFileSize>${maxFileSize}</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
        </rollingPolicy>
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>${pattern}</pattern>
        </encoder>
        <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
            <level>ERROR</level>
        </filter>
    </appender>

    <appender name="InfoFileAsync" class="ch.qos.logback.classic.AsyncAppender">
        <discardingThreshold>0</discardingThreshold>
        <queueSize>10000</queueSize>
        <appender-ref ref="InfoFile" />
    </appender>


    <appender name="ErrorFileAsync" class="ch.qos.logback.classic.AsyncAppender">
        <discardingThreshold>0</discardingThreshold>
        <queueSize>10000</queueSize>
        <appender-ref ref="ErrorFile" />
    </appender>



    <logger name="com" level="WARN">
        <appender-ref ref="InfoFileAsync"/>
        <appender-ref ref="ErrorFileAsync"/>
    </logger>
    <logger name="org" level="WARN">
        <appender-ref ref="InfoFileAsync"/>
        <appender-ref ref="ErrorFileAsync"/>
    </logger>
    <logger name="com.kunchi" level="INFO">
        <appender-ref ref="InfoFileAsync"/>
        <appender-ref ref="ErrorFileAsync"/>
    </logger>

    <!-- root级别 DEBUG -->
    <root level="DEBUG">
        <appender-ref ref="STDOUT"/>
        <if condition='property("sentryEnable").contains("true")'>
            <then>
                <appender-ref ref="Sentry"/>
            </then>
        </if>
        <if condition='property("elkEnable").contains("true")'>
            <then>
                <appender-ref ref="kafkaAsync" />
                <!--<appender-ref ref="kafkaAppender"/>-->
            </then>
        </if>
    </root>
</configuration>
 类似资料: