LogStash监听多个input文件,并且根据不同文件输出到不同的output,利用grok插件进行文本过滤
# Sample Logstash configuration for creating a simple
# Beats -> Logstash -> Elasticsearch pipeline.
input {
file {
path => ["d:/2/*.txt"]
start_position => "beginning"
type => "A" #type用于output的判断
}
file {
path => ["d:/3/*.txt"]
start_position => "beginning"
type => "B"
}
file {
path => ["d:/1/*/*.txt"]
start_position => "beginning"
type => "C"
}
}
filter {
grok {
match => { "message" => "%{IP:client}" } #filter过滤出ip的文本
}
}
output{
if [type] == "A"{
rabbitmq {
id => "response_id"
exchange => "a-scan-exchange"
key =>"a-scan-file"
exchange_type =>"direct"
host => "localhost"
port => 5672
user =>"user"
password=>"123456"
message_properties => {
"content_type" => "application/json"
"priority" => 1
}
}
}
if [type] == "B"{
rabbitmq {
id => "nmap_id"
exchange => "b-scan-exchange"
key =>"b-scan-file"
exchange_type =>"direct"
host => "localhost"
port => 5672
user =>"user"
password=>"123456"
message_properties => {
"content_type" => "application/json"
"priority" => 1
}
}
}
if [type] == "C"{
http {
http_method => "post"
url => "http://localhost:8765/scan/file"
format => "json_batch"
mapping => {
"message" => "%{message}"
}
}
}
stdout {
codec => rubydebug {
}
}
}