/ 347浏览

ELK+elastalert+filebeat搭建

1.准备工作

在搭建ELK之前,我们需要做一些准备工作。

正如官方所说的那样 https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html,Elasticsearch默认使用mmapfs目录来存储索引。操作系统默认的mmap计数太低可能导致内存不足,我们可以使用下面这条命令来增加内存
为了防止ElasticSearch启动报错,max file descriptors [65535] for elasticsearch process is too low, increase to at least [65536]
vi /etc/sysctl.conf  
vm.max_map_count=655360
sysctl -p

2.创建目录

创建目录

mkdir -p /app/elk/elasticsearch/data/ /app/elk/kibana/ /app/elk/logstash/pipeline/ /app/elk/elastalert/rules/ /app/elk/elastalert/elastalert_modules/ /app/elk/filebeat/log

授权

chmod 777 app/elk/elasticsearch/data

创建配置文件

touch /app/elk/docker-compose.yml /app/elk/elasticsearch/elasticsearch.yml /app/elk/kibana/kibana.yml /app/elk/logstash/pipeline/logstash.conf /app/elk/logstash/logstash.yml /app/elk/elastalert/rules/smaple_rule.yaml /app/elk/filebeat/filebeat.yml

3.配置文件

elasticsearch.yml

#elasticsearch.yml
cluster.name: "docker-cluster"
network.host: 0.0.0.0
#xpack.license.self_generated.type: trial
#xpack.security.enabled: true
#xpack.monitoring.collection.enabled: true
#开启远程连接
http.cors.enabled: true
http.cors.allow-origin: "*"
http.cors.allow-headers: Authorization,X-Requested-With,Content-Type,Content-Length

kibana.yml

#kibana.yml
## Default Kibana configuration from Kibana base image.
### https://github.com/elastic/kibana/blob/master/src/dev/build/tasks/os_packages/docker_generator/templates/kibana_yml.template.js
server.name: kibana
server.host: 0.0.0.0
##汉化kibana
i18n.locale: "zh-CN"
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
xpack.monitoring.ui.container.elasticsearch.enabled: true
#
### X-Pack security credentials
##
#elasticsearch.username: elastic
#elasticsearch.password: changeme

logstash.conf

input {
	tcp {
		type => "springboot"
		mode => "server"
		host => "0.0.0.0"
		port => 5000
		codec => json_lines
	}
  beats {
	   type => "filebeat"
     port => 5044
         }
}
output {

	if [type] == "springboot" {
	   elasticsearch {
			hosts => "elasticsearch:9200"
			index => "log-springboot-%{+YYYY.MM.dd}"
		}
	}	
	else if [type] == "filebeat" {
      elasticsearch {
       hosts => "elasticsearch:9200"
       index => "log-filebeta-%{+YYYY.MM.dd}"
    }
 }

}

logstash.yml

### https://github.com/elastic/logstash/blob/master/docker/data/logstash/config/logstash-full.yml
http.host: "0.0.0.0"
#elasticsearch 这里写的是你的ip
xpack.monitoring.elasticsearch.hosts: [ "http://192.168.100.3:9200" ]
## X-Pack security credentials
xpack.monitoring.enabled: true
#xpack.monitoring.elasticsearch.username: elastic
#xpack.monitoring.elasticsearch.password: changeme

smaple_rule.yaml

# 是此规则的唯一名称。如果两个规则共享相同的名称,ElastAlert将不会启动。
name: "frequncy-test" 
 
# Alert的类型,有11种告警规则,现在以Frequency方式为例
type: frequency
 
# 查询ElasticSearch中的索引名称,支持通配符
index: log-*
 
# 在1小时内出现1次则触发告警
num_events: 1 
 
# 用于设置在1小时内出现1次则触发告警
timeframe:
  hours: 1
 
# 5 分钟内出现相同告警则不重复发送
#realert:
 # minutes: 5
 
# 用于查找ElasticSearch过滤器列表,查找Envoy名称为envoy1,请求URL以/apis为前缀,耗时在10ms-120ms的请求
filter:
# - term:
#   response_code.keyword: 200
# - query:   
#    "match": {
 #     "message.keyword": {
 #       "query": "解密数据为:null"
 #     }
 # }
 #符合ES查询规则 精准匹配只需要增加query_string, 模糊增加wildcard,可以配置多个
  - query: 
     "bool": {
       "should": [
        {
          "query_string": {
            "default_field": "logLevel",
            "query": "ERROR"
          }
        },
        {
          "wildcard": {
            "message.keyword": "*ERROR*"
          }
        }
      ]
     }
 
    # 告警方式	  
alert:
# - "email"
# - post
 - command

command: ["python", "send.py", "%(message)s"]

 
# Post方式配置 匹配到规则会发送请求,参数为channel,content 值为test和es存储对应message值

#http_post_url: "http://192.168.10.101/elk/test"
#http_post_static_payload:
#    channel: test
#http_post_payload:
#    content: message
#http_post_static_payload:
#  rule_name: "elastalert test" 
#  rule_message: "test"  
# 指定告警的字段,不填写则返回所有字段
#http_post_payload:
 # cluster: cluster
 # user:    test
 # index: _index
 
# Email方式配置
#smtp_host: smtp.qq.com
#smtp_port: 587
#smtp_auth_file: "/opt/config/smtp_auth_file.yaml"
#email_reply_to: "**********@qq.com"
#from_addr: "**********@qq.com"
# 发送到的邮箱地址列表
#email:
# - "***@qq.com"

filebeat.xml

filebeat.inputs:
- type: log
  enabled: true
  paths:
    # 容器内的路径,可以不用修改,映射到这个路径就可以
    - /var/log/*.log

  fields:
    # 额外添加的字段
    project-name: test

  # 需要排除和包括的行(正则表达式)
 # exclude_lines: ['INFO']

 # include_lines: ['ERROR']

  # 这个是用来处理异常产生多行数据时,将多行数据当作一条日志处理,根据自己的异常日志的格式做修改
  multiline.pattern: '^\['
  multiline.negate: true
  multiline.match: after

  ignore_older: 168h

  tail_files: true

output.logstash:
  hosts: ["192.168.100.3:5044"]

docker-compose.yml

version: "3"
services:
  ###配置elasticsearch
  elasticsearch:
    image: elasticsearch:6.8.15
    container_name: elasticsearch 
    ports:
      - "9200:9200"
      - "9300:9300"
    environment:
      discovery.type: single-node
      ##es的密码
      ELASTIC_PASSWORD: changeme
      #设置JVM最大(小)可用内存为1024,这个很重要,我一开始没有设置这个,我的es起不来
      ES_JAVA_OPTS: "-Xmx256m -Xms256m" 
      ES_JAVA_OPTS: "-Xmx256m -Xms256m"
      TZ: "Asia/Shanghai"
    volumes:
       # 这里注意一下  如果你想吧docker中es的数据 映射出来 你本地的 /home/elasticsearch 必须拥有 777权限
      - /app/elk/elasticsearch/data:/usr/share/elasticsearch/data
      - /app/elk/elasticsearch/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
    #network_mode: host
    #不加这个配置可能会导致容器读取挂载文件没有权限 以root用户启动
    privileged: true
  ###配置Logstash
  logstash:
    image: logstash:6.8.15
    container_name: logstash
    ports:
      - "5000:5000/tcp"
      - "5000:5000/udp"
      - "9600:9600"
      - "5044:5044"
    #network_mode: host
    environment:
      discovery.type: single-node
      ES_JAVA_OPTS: "-Xmx256m -Xms256m"
      TZ: "Asia/Shanghai"
    volumes:
    ###将本机目录/opt/elk/logstach/pipeline下的文件映射到docker容器里面
      - /app/elk/logstash/pipeline:/usr/share/logstash/pipeline
      - /app/elk/logstash/logstash.yml:/usr/share/logstash/config/logstash.yml
      
    depends_on:
      - elasticsearch  
    privileged: true     
  ###配置Kibana  64位系统下内存约为1.4GB,32位系统下约为0.7GB
  kibana:
    image: kibana:6.8.15
    container_name: kibana
    ports:
      - "5601:5601"
    volumes:
      ###将本机目录/opt/elk/kibana/kibana.yml下的文件映射到docker容器里面
      - /app/elk/kibana/kibana.yml:/usr/share/kibana/config/kibana.yml
    environment:
      - ELASTICSEARCH_URL=http://elasticsearch:9200
      - TZ=Asia/Shanghai
    #network_mode: host
    depends_on:
      - elasticsearch
    privileged: true
  elastalert:
    image: anjia0532/elastalert-docker
    container_name: elastalert
    volumes:
      ###将本机目录/opt/elk/kibana/kibana.yml下的文件映射到docker容器里面
      - /app/elk/elastalert/rules:/opt/elastalert/rules
      - /app/elk/elastalert/elastalert_modules:/opt/elastalert/elastalert_modules
      - /app/elk/elastalert/bin/send.py:/opt/elastalert/send.py
    environment:
      - ELASTICSEARCH_HOST=192.168.100.3
      - ELASTICSEARCH_PORT=9200 
      - CONTAINER_TIMEZONE=Asia/Shanghai  
      - SET_CONTAINER_TIMEZONE=True
      - TZ=Asia/Shanghai 
      - ELASTALERT_BUFFER_TIME=10  
      - ELASTALERT_RUN_EVERY=1  
      - ELASTICSEARCH_USER=elastic
      - ELASTICSEARCH_PASSWORD=changeme
    #network_mode: host
    depends_on:
      - elasticsearch
    privileged: true

  filebeat:
    image: elastic/filebeat:6.8.15
    container_name: filebeat
    volumes:
      ###将本机目录/opt/elk/kibana/kibana.yml下的文件映射到docker容器里面
      - /app/elk/filebeat/log:/var/log/:ro
      - /app/elk/filebeat/filebeat.yml:/usr/share/filebeat/filebeat.yml
    depends_on:
      - elasticsearch
    environment:
      - TZ=Asia/Shanghai
    privileged: true

4.启动方式

cd /app/elk/
docker-compose up -d

5.springboot配置日志发送

maven安装对应的包

<dependency>
	<groupId>net.logstash.logback</groupId>
	<artifactId>logstash-logback-encoder</artifactId>
	<version>6.4</version>
</dependency>

配置logback.xm

<?xml version="1.0" encoding="UTF-8"?>
<configuration>
    <include resource="org/springframework/boot/logging/logback/base.xml" />
    <appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
        <encoder>
            <pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n</pattern>
        </encoder>
    </appender>
    <appender name="LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
        <!--配置logStash 服务地址-->
        <destination>192.168.100.3:5000</destination>
        <!-- 日志输出编码 -->
        <encoder charset="UTF-8"
                 class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
            <providers>
                <timestamp>
                    <timeZone>UTC</timeZone>
                </timestamp>
                <pattern>
                    <pattern>
                        {
                        "logLevel": "%level",
                        "serviceName": "system-user",
                        "pid": "${PID:-}",
                        "thread": "%thread",
                        "class": "%logger{40}",
                        "message": "%message"
                        }
                    </pattern>
                </pattern>
            </providers>
        </encoder>
    </appender>

    <root level="INFO">
        <appender-ref ref="LOGSTASH" />
        <appender-ref ref="CONSOLE" />
    </root>
</configuration>

发送日志

springboot模式

代码里面使用@@Slf4j 注解
log.error(“测试异常!!!”);

filebeta模式

向/app/elk/filebeat/log/test-log.log 文件中追加日志

5.页面访问

kibana访问地址

部署宿主机ip
http://192.168.100.3:5601/

账号密码

elastic changeme

6.kibana操作

增加索引过滤规则

image.png

查看日志

image.png

7.elastalert使用说明

在es中匹配到过滤规则会触发http_post_url中配置的接口
image.png

附件:阿里云盘soft