当前位置: 首页 > 工具软件 > Miracle Cloud > 使用案例 >

SpringCloud的分布式日志监控部署

狄赞
2023-12-01

ZK集群

  • 环境变量
vim /etc/profile
JAVA_HOME=/usr/java/jdk1.8.0_161
JRE_HOME=/usr/java/jdk1.8.0_161/jre
CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib
ZOOKEEPER_HOME=/data/elk/zookeeper-3.4.14
KAFKA_HOME=/data/elk/kibana-7.4.2-linux-x86_64
PATH=$PATH:$ZOOKEEPER_HOME/bin:$JAVA_HOME/bin:$KAFKA_HOME/bin
export PATH JAVA_HOME ZOOKEEPER_HOME CLASSPATH KAFKA_HOME
source /etc/profile
  • 配置hosts
vim /etc/hosts
10.20.74.78 server-1
10.20.74.77 server-2
10.20.74.76 server-3
  • 到zk的配置目录
    /data/elk/zookeeper-3.4.14/conf
dataDir=/data/elk/zookeeperData
dataLogDir=/data/elk/zookeeperLogData
clientPort=2181
server.1=server-1:2888:3888
server.2=server-2:2888:3888
server.3=server-3:2888:3888
  • 创建zk的dataDir文件夹和dataLogDir文件夹,在/data/elk下
mkdir zookeeperData
  • 在zookeeperData目录下,创建myid文件,并设置节点的唯一标识
touch myid
  • 创建zk日志输出文件夹
mkdir zookeeperLogData
  • 启动zk
zkServer.sh start
  • 如果失败,可能是因为节点之间无法通信,检查防火墙
 systemctl stop firewalld.service
 systemctl disable firewalld.service
 systemctl status firewalld.service

kafka配置

  • 创建kafkaLog
  • config/server.properties
  • host.name 配置本机ip,broker.id与本机zk中的myid文件的标识符一样
broker.id=1
host.name=10.20.74.78
log.dirs=/data/elk/kafkaLog
zookeeper.connect=server-1:2181,server-2:2181,server-3:2181
  • 在kafka的bin下执行启动命令
./kafka-server-start.sh -daemon ../config/server.properties
  • 测试kafka安装是否成功
  • 创建主题
 ./kaftopics.sh --create --zookeeper server-1:2181 --replication-factor 1 --partitions 1 --topic kafkademo
  • 生产消息
 ./kafka-console-producer.sh --broker-list server-2:9092 -topic kafkademo
  • 消费消息
 ./kafka-console-consumer.sh --bootstrap-server server-3:9092 -topic kafkademo --from-beginning

elastic-search配置

  • 修复系统配置
vim /etc/security/limits.conf
  • 新增内容如下
* hard nofile 65536
* soft nproc 2048
* hard nproc 4096
* soft memlock unlimited
* hard memlock unlimited
vim /etc/sysctl.conf
  • 新增内容如下:
  vm.max_map_count=655360
fs.file-max=655360
  • 执行 sysctl -p 让其生效
  • 创建esdata和eslog存储es生产的数据和日志
  • 创建el用户
useradd el
passwd el
chown -R el:el /data/elk/esdata
chown -R el:el /data/elk/eslog
chown -R el:el /data/elk/elasticsearch-7.4.2/
  • 修改config下配置文件elasticsearch.yml
 cluster.name: es-cluster

node.name: node-1
node.master: true
node.data: true

path.data: /data/elk/esdata

path.logs: /data/elk/eslog

network.host: 0.0.0.0

http.port: 9200
transport.tcp.port: 9300

discovery.seed_hosts: ["10.20.74.78", "10.20.74.77","10.20.74.76"]

cluster.initial_master_nodes: ["node-1"]

http.cors.enabled: true
http.cors.allow-origin: "*"

logstash集群配置

  • 在logstash目录下创建logstash.conf
input {
##  tcp {
##    port => 5000
##  }

  kafka {
    id => "my_plugin_id"
    bootstrap_servers => "10.20.74.76:9092,10.20.74.77:9092,10.20.74.78:9092"
    topics => ["cmaelk"]
    group_id => "cmaelk"
    auto_offset_reset => "latest"
  }
}

filter {
        grok {
      match => {"message" => "%{TIMESTAMP_ISO8601:logTime} %{GREEDYDATA:serviceName} %{GREEDYDATA:logThread} %{LOGLEVEL:logLevel} %{GREEDYDATA:loggerClass} - %{GREEDYDATA:logContent}"}
    }
}

output {
        elasticsearch {
                index => "cma-info-log"
                hosts => ["10.20.74.78:9200","10.20.74.77:9200","10.20.74.76:9200"]
                user => "elastic"
                password => "chageme"
        }
}
  • 后台启动logstash
 nohup ./bin/logstash -f logstash.conf &
  • kibana配置
    config中的kibana.yml
server.name: kibana
server.host: "0"
elasticsearch.hosts: [ "http://10.20.74.78:9200","http://10.20.74.77:9200","http://10.20.74.76:9200" ]
xpack.monitoring.ui.container.elasticsearch.enabled: true

## X-Pack security credentials
###
elasticsearch.username: elastic
elasticsearch.password: changeme

项目中配置

  • 将日志信息传送到kafka中
  • pom.xml
<dependency>
    <groupId>org.springframework.boot</groupId>
    <artifactId>spring-boot-starter-web</artifactId>
    <exclusions>
        <exclusion>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-logging</artifactId>
        </exclusion>
    </exclusions>
</dependency>
<dependency>
    <groupId>org.slf4j</groupId>
    <artifactId>slf4j-api</artifactId>
    <version>1.7.7</version>
</dependency>
<dependency>
    <groupId>net.logstash.logback</groupId>
    <artifactId>logstash-logback-encoder</artifactId>
    <version>5.2</version>
</dependency>
<!-- logback -->
<dependency>
    <groupId>ch.qos.logback</groupId>
    <artifactId>logback-core</artifactId>
    <version>1.2.3</version>
</dependency>
<dependency>
    <groupId>ch.qos.logback</groupId>
    <artifactId>logback-classic</artifactId>
    <version>1.2.3</version>
</dependency>
<dependency>
    <groupId>ch.qos.logback</groupId>
    <artifactId>logback-access</artifactId>
    <version>1.2.3</version>
</dependency>
<!-- https://mvnrepository.com/artifact/commons-logging/commons-logging -->
<dependency>
    <groupId>commons-logging</groupId>
    <artifactId>commons-logging</artifactId>
    <version>1.2</version>
</dependency>
<dependency>
    <groupId>com.github.danielwegener</groupId>
    <artifactId>logback-kafka-appender</artifactId>
    <version>0.2.0-RC2</version>
</dependency>
  • logback-spring.xml

<property name="LOG_PREFIX" value="web" />
<appender name="KAFKA" class="com.github.danielwegener.logback.kafka.KafkaAppender">
    <encoder>
        <!--格式化输出:%d表示日期,%thread表示线程名,%-5level:级别从左显示5个字符宽度%msg:日志消息,%n是换行符 -->
        <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} ${LOG_PREFIX} [%thread] %-5level %logger{50} - %msg%n</pattern>
    </encoder>
    <topic>
        cmaelk
    </topic>
    <keyingStrategy class="com.github.danielwegener.logback.kafka.keying.NoKeyKeyingStrategy"></keyingStrategy>
    <deliveryStrategy class="com.github.danielwegener.logback.kafka.delivery.AsynchronousDeliveryStrategy"></deliveryStrategy>
    <producerConfig>bootstrap.servers=10.20.74.78:9092</producerConfig>

</appender>


<springProfile name="local">
        <logger name="org.springframework.web" level="ERROR"/>
      
        <root level="INFO">
            <appender-ref ref="CONSOLE"/>
            <appender-ref ref="KAFKA"/>
        </root>
    </springProfile>
 类似资料: