We have an ELK stack set up on an ec2 instance that stopped working a month ago and I just got it working again (the problem was credentials with the cloudwatch logstash plugin).
What is strange is that it seems to be ingesting logs from over two years ago. I'm not very familiar with the ELK stack (I just inherited this old app), is this normal behavior? It will take days to catch up to present time at this rate. None of the old data is being outputted to elastic search, which I suppose makes sense but I don't understand HOW.
Here is the conf file for the logstash cloudwatch plugin:
input {
cloudwatch_logs {
access_key_id => access_here
secret_access_key => secret_here
log_group => [ "xwingui-Prod", "xwingui-Dev", "xwingui-Exp", "xwingui-Staging", "xwingui-Test", "xwingui-Jawn" ]
region => "us-east-2"
sincedb_path => "/var/lib/.sincedb"
}
}
filter {
if "Monitoring - " in [message] {
if "API" in [message] {
grok {
match => { "message" => "API Monitoring - %{GREEDYDATA:json}" }
}
mutate {
add_field => { "monitorType" => "API" }
}
} else if "RUM" in [message] {
grok {
match => { "message" => "RUM Monitoring - %{GREEDYDATA:json}" }
}
mutate {
add_field => { "monitorType" => "RUM" }
}
} else if "PikaWorker" in [message] {
grok {
match => { "message" => "PikaWorker Monitoring - %{GREEDYDATA:json}" }
}
mutate {
add_field => { "monitorType" => "PikaWorker" }
}
} else if "DataAgent" in [message] {
grok {
match => { "message" => "DataAgent Monitoring - %{GREEDYDATA:json}" }
}
mutate {
add_field => { "monitorType" => "DataAgent" }
}
} else if "Database" in [message] {
grok {
match => { "message" => "Database Monitoring - %{GREEDYDATA:json}" }
}
mutate {
add_field => { "monitorType" => "Database" }
}
}
json {
source => "json"
remove_field => "message"
}
mutate {
add_field => { "isMonitor" => True }
}
}
}
output {
elasticsearch {
hosts => [ "localhost:9200" ]
user => user_here
password => pwd_here
}
stdout {
codec => json
}
}