mirror of
https://github.com/Lanakod-Networks/rsyslog_docker.git
synced 2025-07-04 13:26:39 +03:00
first commit
This commit is contained in:
commit
375312784b
41
.env
Normal file
41
.env
Normal file
@ -0,0 +1,41 @@
|
||||
# Project namespace (defaults to the current folder name if not set)
|
||||
#COMPOSE_PROJECT_NAME=myproject
|
||||
|
||||
|
||||
# Password for the 'elastic' user (at least 6 characters)
|
||||
ELASTIC_PASSWORD=changeme
|
||||
|
||||
|
||||
# Password for the 'kibana_system' user (at least 6 characters)
|
||||
KIBANA_PASSWORD=changeme
|
||||
|
||||
|
||||
# Version of Elastic products
|
||||
STACK_VERSION=8.7.1
|
||||
|
||||
|
||||
# Set the cluster name
|
||||
CLUSTER_NAME=rsyslog-cluster
|
||||
|
||||
|
||||
# Set to 'basic' or 'trial' to automatically start the 30-day trial
|
||||
LICENSE=basic
|
||||
#LICENSE=trial
|
||||
|
||||
|
||||
# Port to expose Elasticsearch HTTP API to the host
|
||||
ES_PORT=9200
|
||||
|
||||
|
||||
# Port to expose Kibana to the host
|
||||
KIBANA_PORT=5601
|
||||
|
||||
|
||||
# Increase or decrease based on the available host memory (in bytes)
|
||||
ES_MEM_LIMIT=1073741824
|
||||
KB_MEM_LIMIT=1073741824
|
||||
LS_MEM_LIMIT=1073741824
|
||||
|
||||
|
||||
# SAMPLE Predefined Key only to be used in POC environments
|
||||
ENCRYPTION_KEY=01J7X8AH4HFW7AX79B1WA311Y3172638
|
74
.gitignore
vendored
Normal file
74
.gitignore
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
|
||||
# User-specific stuff
|
||||
.idea/**/workspace.xml
|
||||
.idea/**/tasks.xml
|
||||
.idea/**/usage.statistics.xml
|
||||
.idea/**/dictionaries
|
||||
.idea/**/shelf
|
||||
|
||||
# Generated files
|
||||
.idea/**/contentModel.xml
|
||||
|
||||
# Sensitive or high-churn files
|
||||
.idea/**/dataSources/
|
||||
.idea/**/dataSources.ids
|
||||
.idea/**/dataSources.local.xml
|
||||
.idea/**/sqlDataSources.xml
|
||||
.idea/**/dynamic.xml
|
||||
.idea/**/uiDesigner.xml
|
||||
.idea/**/dbnavigator.xml
|
||||
|
||||
# Gradle
|
||||
.idea/**/gradle.xml
|
||||
.idea/**/libraries
|
||||
|
||||
# Gradle and Maven with auto-import
|
||||
# When using Gradle or Maven with auto-import, you should exclude module files,
|
||||
# since they will be recreated, and may cause churn. Uncomment if using
|
||||
# auto-import.
|
||||
# .idea/artifacts
|
||||
# .idea/compiler.xml
|
||||
# .idea/jarRepositories.xml
|
||||
# .idea/modules.xml
|
||||
# .idea/*.iml
|
||||
# .idea/modules
|
||||
# *.iml
|
||||
# *.ipr
|
||||
|
||||
# CMake
|
||||
cmake-build-*/
|
||||
|
||||
# Mongo Explorer plugin
|
||||
.idea/**/mongoSettings.xml
|
||||
|
||||
# File-based project format
|
||||
*.iws
|
||||
|
||||
# IntelliJ
|
||||
out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Cursive Clojure plugin
|
||||
.idea/replstate.xml
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
|
||||
# Editor-based Rest Client
|
||||
.idea/httpRequests
|
||||
|
||||
# Android studio 3.1+ serialized cache file
|
||||
.idea/caches/build_file_checksums.ser
|
||||
|
||||
#
|
||||
./logs
|
278
README.md
Normal file
278
README.md
Normal file
@ -0,0 +1,278 @@
|
||||
# Рецепт для rsyslog kafka и elk
|
||||
|
||||
## Intro.
|
||||
The stack is popular, but I have not found any suitable articles about connecting Kafka. You can found somewhere about Kafka somewhere about Logstash, but not altogether.
|
||||
|
||||
In this article, we will make a docker-compose file that will launch the entire system, and build an image that simulates an application with logs. We will also consider how you can check each system separately.
|
||||
|
||||
I don't want to create a detailed description of each application. It is just starting point for you to learn rsyslog and ELK
|
||||
|
||||
Don't forget, when adding a new service, you need to rebuild docker-compose:
|
||||
```docker-compose build```
|
||||
|
||||
Github with full project: https://github.com/ArtemMe/rsyslog_kafka_elk
|
||||
|
||||
I split the project on release (tag). Every release is a new service like rsyslog or Kibana
|
||||
|
||||
## Rsyslog. (tag 0.1)
|
||||
We will need two configs: one with the basic settings `/etc/rsyslog.conf`, the second` /etc/rsyslog.d/kafka-sender.conf` optional with settings for our needs
|
||||
|
||||
We will not delve into the rsyslog settings because you can dig into them for a long time. Just remember basic instructions: module, template, action
|
||||
Let's take a look at an example of the file `/etc/rsyslog.d/kafka-sender.conf`:
|
||||
|
||||
```
|
||||
# load module which use for sending message to kafka
|
||||
module(load="omkafka")
|
||||
|
||||
# Declare template for log with name "json_lines" :
|
||||
template(name="json_lines" type="list" option.json="on") {
|
||||
constant(value="{")
|
||||
constant(value="\"timestamp\":\"") property(name="timereported" dateFormat="rfc3339")
|
||||
constant(value="\",\"message\":\"") property(name="msg")
|
||||
constant(value="\",\"host\":\"") property(name="hostname")
|
||||
constant(value="\",\"severity\":\"") property(name="syslogseverity-text")
|
||||
constant(value="\",\"facility\":\"") property(name="syslogfacility-text")
|
||||
constant(value="\",\"syslog-tag\":\"") property(name="syslogtag")
|
||||
constant(value="\"}")
|
||||
}
|
||||
|
||||
# Decalare action to send message to kafka broker in test_topic_1. Note how we use template json_lines and module omkafka
|
||||
action(
|
||||
broker=["host.docker.internal:9092"]
|
||||
type="omkafka"
|
||||
template="json_lines"
|
||||
topic="test_topic_1"
|
||||
action.resumeRetryCount="-1"
|
||||
action.reportsuspension="on"
|
||||
)
|
||||
```
|
||||
Remember topic name: test_topic_1
|
||||
|
||||
You can find the full list of property names there: https://www.rsyslog.com/doc/master/configuration/properties.html
|
||||
|
||||
Also note the main file `/ etc / rsyslog.conf` contains a line like:` $ IncludeConfig /etc/rsyslog.d / *. Conf`
|
||||
This is a directive that tells us where else to read the settings for rsyslog. It is useful to separate common settings from specific ones
|
||||
|
||||
### Create an image for generating logs
|
||||
The image will essentially just start rsyslog. In the future, we will be able to enter this container and generate logs.
|
||||
|
||||
You can find the Docker file in the `/rsyslog` folder. Let's look at the chunk of that file where on the first and second lines we copy our config. On the third line, we mount a folder for logs which will be generated
|
||||
|
||||
```
|
||||
COPY rsyslog.conf /etc/
|
||||
COPY rsyslog.d/*.conf /etc/rsyslog.d/
|
||||
|
||||
VOLUME ["/var/log"]
|
||||
```
|
||||
|
||||
Building
|
||||
```
|
||||
docker build . -t rsyslog_kafka
|
||||
```
|
||||
Launch the container to check image.
|
||||
```
|
||||
docker run rsyslog_kafka
|
||||
```
|
||||
|
||||
To check that we are writing logs, go to our container and call the command:
|
||||
```
|
||||
docker run --rm --network=rsyslog_kafka_elk_elk rsyslog_kafka bash -c `logger -p daemon.debug "This is a test."`
|
||||
```
|
||||
|
||||
Let's look at folder `/logs`:
|
||||
You should find a string like this `This is a test.`.
|
||||
|
||||
Congratulations! You have configured rsyslog in your docker container!
|
||||
|
||||
|
||||
## A bit about networking in docker containers.
|
||||
Let's create our network in the docker-compose.yml file. In the future, each service can be launched to different machines. This is no problem.
|
||||
```
|
||||
networks:
|
||||
elk:
|
||||
driver: bridge
|
||||
```
|
||||
|
||||
## Kafka (tag 0.2)
|
||||
I took this repository as a basis: `https://github.com/wurstmeister/kafka-docker`
|
||||
The resulting service is:
|
||||
```
|
||||
zookeeper:
|
||||
image: wurstmeister/zookeeper:latest
|
||||
ports:
|
||||
- "2181:2181"
|
||||
container_name: zookeeper
|
||||
networks:
|
||||
- elk
|
||||
|
||||
kafka:
|
||||
image: wurstmeister/kafka:0.11.0.1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
environment:
|
||||
# The below only works for a macOS environment if you installed Docker for
|
||||
# Mac. If your Docker engine is using another platform/OS, please refer to
|
||||
# the relevant documentation with regards to finding the Host IP address
|
||||
# for your platform.
|
||||
KAFKA_ADVERTISED_HOST_NAME: docker.for.mac.localhost
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
||||
KAFKA_CREATE_TOPICS: "logstash_logs:1:1"
|
||||
links:
|
||||
- zookeeper
|
||||
depends_on:
|
||||
- zookeeper
|
||||
container_name: kafka
|
||||
networks:
|
||||
- elk
|
||||
```
|
||||
|
||||
We will be able to see what is in the Kafka topic when we launch our containers. First, you need to download Kafka. Here is a cool tutorial `https://kafka.apache.org/quickstart` but if it's short download here `https://www.apache.org/dyn/closer.cgi?path=/kafka/2.7.0/kafka_2.13-2.7.0.tgz `and unpack it to `/app` folder.
|
||||
We need scripts in the `/bin` folder.
|
||||
|
||||
Now, we can connect to the container and execute a script to see if there are any entries inside the topic `test_topic_1` :
|
||||
```
|
||||
docker run --rm --network=rsyslog_kafka_elk_elk -v /app/kafka_2.13-2.7.0:/kafka wurstmeister/kafka:0.11.0.1 bash -c "/kafka/bin/kafka-console-consumer.sh --topic test_topic_1 --from-beginning --bootstrap-server 172.23.0.4:9092"
|
||||
```
|
||||
|
||||
About the command itself: we connect to the rsyslog_kafka_elk_elk network, rsyslog_kafka_elk is the name of the folder where the docker-compose.yml file is located, and elk is the network that we specified. With the -v command, we mount scripts for Kafka into our container.
|
||||
|
||||
The result of command should be something like this:
|
||||
```
|
||||
{"timestamp":"2021-02-27T17:43:38.828970+00:00","message":" action 'action-1-omkafka' resumed (module 'omkafka') [v8.1901.0 try https://www.rsyslog.com/e/2359 ]","host":"c0dcee95ffd0","severity":"info","facility":"syslog","syslog-tag":"rsyslogd:"}
|
||||
```
|
||||
|
||||
### Logstash (tag 0.3)
|
||||
|
||||
Configs are located in the `/ logstash` folder. `logstash.yml` - here we specify parameters for connecting to Elasticsearch
|
||||
|
||||
In the config, there is a setting for Kafka as for an incoming stream and a setting for elasticsearch as for an outgoing stream
|
||||
```
|
||||
input {
|
||||
beats {
|
||||
port => 5044
|
||||
}
|
||||
|
||||
tcp {
|
||||
port => 5000
|
||||
}
|
||||
kafka
|
||||
{
|
||||
bootstrap_servers => "kafka:9092"
|
||||
topics => "test_topic_1"
|
||||
}
|
||||
}
|
||||
|
||||
## Add your filters / logstash plugins configuration here
|
||||
|
||||
output {
|
||||
elasticsearch {
|
||||
hosts => "elasticsearch:9200"
|
||||
user => "elastic"
|
||||
password => "changeme"
|
||||
ecs_compatibility => disabled
|
||||
}
|
||||
|
||||
file {
|
||||
|
||||
path => "/var/logstash/logs/test.log"
|
||||
codec => line { format => "custom format: %{message}"}
|
||||
}
|
||||
}
|
||||
```
|
||||
To monitor what goes into the Elastisearch and check the Logstesh is working properly, I created a file output stream so logs will be written to test.log file. The main thing is not to forget to add volume to docker-compose.yml
|
||||
```
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./logstash/config/logstash.yml
|
||||
target: /usr/share/logstash/config/logstash.yml
|
||||
read_only: true
|
||||
- type: bind
|
||||
source: ./logstash/pipeline
|
||||
target: /usr/share/logstash/pipeline
|
||||
read_only: true
|
||||
- ./logs:/var/logstash/logs
|
||||
```
|
||||
|
||||
Check test.log file in your project. You should find logs from kafka
|
||||
|
||||
### Elasticsearch (tag 0.3)
|
||||
|
||||
This is the simplest configuration. We will launch the trial version, but you can turn on the open source one if you wish. Configs as usual in `/elasticsearch/config/`
|
||||
```
|
||||
## Default Elasticsearch configuration from Elasticsearch base image.
|
||||
## https://github.com/elastic/elasticsearch/blob/master/distribution/docker/src/docker/config/elasticsearch.yml
|
||||
#
|
||||
cluster.name: "docker-cluster"
|
||||
network.host: 0.0.0.0
|
||||
|
||||
## X-Pack settings
|
||||
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html
|
||||
#
|
||||
xpack.license.self_generated.type: trial
|
||||
xpack.security.enabled: true
|
||||
xpack.monitoring.collection.enabled: true
|
||||
```
|
||||
|
||||
Let's check the indexes of the elastic. Take as a basis a cool image of `praqma / network-multitool` and command curl:
|
||||
```
|
||||
docker run --rm --network=rsyslog_kafka_elk_elk praqma/network-multitool bash -c "curl elasticsearch:9200/_cat/indices?s=store.size:desc -u elastic:changeme"
|
||||
```
|
||||
As the result of command:
|
||||
```
|
||||
The directory /usr/share/nginx/html is not mounted.
|
||||
Over-writing the default index.html file with some useful information.
|
||||
% Total % Received % Xferd Average Speed Time Time Time Current
|
||||
Dload Upload Total Spent Left Speed
|
||||
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0green open .monitoring-es-7-2021.02.28 QP1RL9ezRwmCFLe38dnlTg 1 0 1337 442 1.4mb 1.4mb
|
||||
green open .monitoring-es-7-2021.03.07 z0f-K-g7RhqDEbqnupfzPA 1 0 576 428 1.2mb 1.2mb
|
||||
green open .monitoring-logstash-7-2021.03.07 rKMYIZE9Q6mSR6_8SG5kUw 1 0 382 0 340.4kb 340.4kb
|
||||
green open .watches nthHo2KlRhe0HC-8MuT6rA 1 0 6 36 257.1kb 257.1kb
|
||||
green open .monitoring-logstash-7-2021.02.28 x98c3c14ToSqmBSOX8gmSg 1 0 363 0 230.1kb 230.1kb
|
||||
green open .monitoring-alerts-7 nbdSRkOSSGuLTGYv0z2L1Q 1 0 3 5 62.4kb 62.4kb
|
||||
yellow open logstash-2021.03.07-000001 22YB7SzYR2a-BAgDEBY0bg 1 1 18 0 10.6kb 10.6kb
|
||||
green open .triggered_watches sp7csXheQIiH7TGmY-EiIw 1 0 0 12 6.9kb 6.9kb
|
||||
100 784 100 784 0 0 14254 0 --:--:-- --:--:-- --:--:-- 14254
|
||||
```
|
||||
We can see that the indices are being created and our elastic is alive. Let's connect Kibana now
|
||||
|
||||
|
||||
### Kibana (tag 0.4)
|
||||
This is what the service looks like
|
||||
|
||||
```
|
||||
kibana:
|
||||
build:
|
||||
context: kibana/
|
||||
args:
|
||||
ELK_VERSION: $ELK_VERSION
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./kibana/config/kibana.yml
|
||||
target: /usr/share/kibana/config/kibana.yml
|
||||
read_only: true
|
||||
ports:
|
||||
- "5601:5601"
|
||||
networks:
|
||||
- elk
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
```
|
||||
In the `/ kibana` folder we have a docker file to build an image and also settings for kibana:
|
||||
|
||||
```
|
||||
server.name: kibana
|
||||
server.host: 0.0.0.0
|
||||
elasticsearch.hosts: [ "http://elasticsearch:9200" ]
|
||||
monitoring.ui.container.elasticsearch.enabled: true
|
||||
|
||||
## X-Pack security credentials
|
||||
#
|
||||
elasticsearch.username: elastic
|
||||
elasticsearch.password: changeme
|
||||
```
|
||||
|
||||
To enter the Kibana UI, you need to log in to the browser `localhost: 5601` (login/password is elasctic/changeme)
|
||||
In the left menu, find Discover, click on it and create an index. I suggest this `logstash- *`
|
||||
|
||||
<img width="1253" alt="Create index pattern" src="https://user-images.githubusercontent.com/12798761/113514744-191f5f80-9579-11eb-8fb1-3fc9d22236b2.png">
|
||||
|
200
docker-compose.yml
Normal file
200
docker-compose.yml
Normal file
@ -0,0 +1,200 @@
|
||||
version: '3.2'
|
||||
|
||||
services:
|
||||
# Rsyslog
|
||||
syslog:
|
||||
build:
|
||||
context: rsyslog/
|
||||
depends_on:
|
||||
- logstash01
|
||||
ports:
|
||||
# - 514:514/tcp
|
||||
- target: 514
|
||||
published: 514
|
||||
mode: host
|
||||
protocol: tcp
|
||||
- target: 514
|
||||
published: 514
|
||||
mode: host
|
||||
protocol: udp
|
||||
# - 514:514/udp
|
||||
environment:
|
||||
- TZ=Europe/Moscow
|
||||
volumes:
|
||||
- ./logs:/var/log
|
||||
- ./rsyslog/rsyslog.d:/etc/rsyslog.d
|
||||
- ./rsyslog/conf/rsyslog.conf:/etc/rsyslog.conf
|
||||
networks:
|
||||
- elk
|
||||
# ELK Setup
|
||||
setup:
|
||||
image: elasticsearch:${STACK_VERSION}
|
||||
volumes:
|
||||
- certs:/usr/share/elasticsearch/config/certs
|
||||
user: "0"
|
||||
command: >
|
||||
bash -c '
|
||||
if [ x${ELASTIC_PASSWORD} == x ]; then
|
||||
echo "Set the ELASTIC_PASSWORD environment variable in the .env file";
|
||||
exit 1;
|
||||
elif [ x${KIBANA_PASSWORD} == x ]; then
|
||||
echo "Set the KIBANA_PASSWORD environment variable in the .env file";
|
||||
exit 1;
|
||||
fi;
|
||||
if [ ! -f config/certs/ca.zip ]; then
|
||||
echo "Creating CA";
|
||||
bin/elasticsearch-certutil ca --silent --pem -out config/certs/ca.zip;
|
||||
unzip config/certs/ca.zip -d config/certs;
|
||||
fi;
|
||||
if [ ! -f config/certs/certs.zip ]; then
|
||||
echo "Creating certs";
|
||||
echo -ne \
|
||||
"instances:\n"\
|
||||
" - name: es01\n"\
|
||||
" dns:\n"\
|
||||
" - es01\n"\
|
||||
" - localhost\n"\
|
||||
" ip:\n"\
|
||||
" - 127.0.0.1\n"\
|
||||
" - name: kibana\n"\
|
||||
" dns:\n"\
|
||||
" - kibana\n"\
|
||||
" - localhost\n"\
|
||||
" ip:\n"\
|
||||
" - 127.0.0.1\n"\
|
||||
> config/certs/instances.yml;
|
||||
bin/elasticsearch-certutil cert --silent --pem -out config/certs/certs.zip --in config/certs/instances.yml --ca-cert config/certs/ca/ca.crt --ca-key config/certs/ca/ca.key;
|
||||
unzip config/certs/certs.zip -d config/certs;
|
||||
fi;
|
||||
echo "Setting file permissions"
|
||||
chown -R root:root config/certs;
|
||||
find . -type d -exec chmod 750 \{\} \;;
|
||||
find . -type f -exec chmod 640 \{\} \;;
|
||||
echo "Waiting for Elasticsearch availability";
|
||||
until curl -s --cacert config/certs/ca/ca.crt https://es01:9200 | grep -q "missing authentication credentials"; do sleep 30; done;
|
||||
echo "Setting kibana_system password";
|
||||
until curl -s -X POST --cacert config/certs/ca/ca.crt -u "elastic:${ELASTIC_PASSWORD}" -H "Content-Type: application/json" https://es01:9200/_security/user/kibana_system/_password -d "{\"password\":\"${KIBANA_PASSWORD}\"}" | grep -q "^{}"; do sleep 10; done;
|
||||
echo "All done!";
|
||||
'
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "[ -f config/certs/es01/es01.crt ]"]
|
||||
interval: 1s
|
||||
timeout: 5s
|
||||
retries: 120
|
||||
networks:
|
||||
- elk
|
||||
# Elastic
|
||||
es01:
|
||||
depends_on:
|
||||
setup:
|
||||
condition: service_healthy
|
||||
image: elasticsearch:${STACK_VERSION}
|
||||
labels:
|
||||
co.elastic.logs/module: elasticsearch
|
||||
volumes:
|
||||
- esdata01:/usr/share/elasticsearch/data
|
||||
- certs:/usr/share/elasticsearch/config/certs
|
||||
ports:
|
||||
- ${ES_PORT}:9200
|
||||
environment:
|
||||
- node.name=es01
|
||||
- cluster.name=${CLUSTER_NAME}
|
||||
- discovery.type=single-node
|
||||
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
|
||||
- bootstrap.memory_lock=true
|
||||
- xpack.security.enabled=true
|
||||
- xpack.security.http.ssl.enabled=true
|
||||
- xpack.security.http.ssl.key=certs/es01/es01.key
|
||||
- xpack.security.http.ssl.certificate=certs/es01/es01.crt
|
||||
- xpack.security.http.ssl.certificate_authorities=certs/ca/ca.crt
|
||||
- xpack.security.transport.ssl.enabled=true
|
||||
- xpack.security.transport.ssl.key=certs/es01/es01.key
|
||||
- xpack.security.transport.ssl.certificate=certs/es01/es01.crt
|
||||
- xpack.security.transport.ssl.certificate_authorities=certs/ca/ca.crt
|
||||
- xpack.security.transport.ssl.verification_mode=certificate
|
||||
- xpack.license.self_generated.type=${LICENSE}
|
||||
- TZ=Europe/Moscow
|
||||
mem_limit: ${ES_MEM_LIMIT}
|
||||
ulimits:
|
||||
memlock:
|
||||
soft: -1
|
||||
hard: -1
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"curl -s --cacert config/certs/ca/ca.crt https://localhost:9200 | grep -q 'missing authentication credentials'",
|
||||
]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 120
|
||||
networks:
|
||||
- elk
|
||||
# Kibana
|
||||
kibana:
|
||||
depends_on:
|
||||
es01:
|
||||
condition: service_healthy
|
||||
image: kibana:${STACK_VERSION}
|
||||
labels:
|
||||
co.elastic.logs/module: kibana
|
||||
volumes:
|
||||
- certs:/usr/share/kibana/config/certs
|
||||
- kibanadata:/usr/share/kibana/data
|
||||
ports:
|
||||
- 22:22
|
||||
- ${KIBANA_PORT}:5601
|
||||
environment:
|
||||
- SERVERNAME=kibana
|
||||
- ELASTICSEARCH_HOSTS=https://es01:9200
|
||||
- ELASTICSEARCH_USERNAME=kibana_system
|
||||
- ELASTICSEARCH_PASSWORD=${KIBANA_PASSWORD}
|
||||
- ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES=config/certs/ca/ca.crt
|
||||
- XPACK_SECURITY_ENCRYPTIONKEY=${ENCRYPTION_KEY}
|
||||
- XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY=${ENCRYPTION_KEY}
|
||||
- XPACK_REPORTING_ENCRYPTIONKEY=${ENCRYPTION_KEY}
|
||||
- TZ=Europe/Moscow
|
||||
mem_limit: ${KB_MEM_LIMIT}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -s -I http://localhost:5601 | grep -q 'HTTP/1.1 302 Found'"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 120
|
||||
networks:
|
||||
- elk
|
||||
# Logstash
|
||||
logstash01:
|
||||
depends_on:
|
||||
es01:
|
||||
condition: service_healthy
|
||||
kibana:
|
||||
condition: service_healthy
|
||||
image: logstash:${STACK_VERSION}
|
||||
labels:
|
||||
co.elastic.logs/module: logstash
|
||||
user: root
|
||||
volumes:
|
||||
# Binds
|
||||
- ./logstash/pipeline:/usr/share/logstash/pipeline:ro
|
||||
- ./logs:/var/logstash/logs
|
||||
# Volumes
|
||||
- certs:/usr/share/logstash/certs
|
||||
- logstashdata01:/usr/share/logstash/data
|
||||
environment:
|
||||
- xpack.monitoring.enabled=false
|
||||
- ELASTIC_USER=elastic
|
||||
- ELASTIC_PASSWORD=${ELASTIC_PASSWORD}
|
||||
- ELASTIC_HOSTS=https://es01:9200
|
||||
- TZ=Europe/Moscow
|
||||
networks:
|
||||
- elk
|
||||
|
||||
networks:
|
||||
elk:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
certs: {}
|
||||
esdata01: {}
|
||||
kibanadata: {}
|
||||
logstashdata01: {}
|
152981
logs/messages
Normal file
152981
logs/messages
Normal file
File diff suppressed because it is too large
Load Diff
1
logs/syslog-2024-09-16.log
Normal file
1
logs/syslog-2024-09-16.log
Normal file
@ -0,0 +1 @@
|
||||
custom format: action 'action-1-builtin:omfwd' resumed (module 'builtin:omfwd') [v8.2302.0 try https://www.rsyslog.com/e/2359 ]
|
2
logs/user-2024-09-16.log
Normal file
2
logs/user-2024-09-16.log
Normal file
@ -0,0 +1,2 @@
|
||||
custom format: Performance summary for Storport Device (Port = 1, Path = 0, Target = 0, Lun = 0) whose Corresponding Class Disk Device Guid is {08d5f9f7-d48b-14d5-4441-9b8de5d151cd}: Total IO:8443 For latency buckets of 256us, 1ms, 4ms, 16ms, 64ms, 128ms, 256ms, 2000ms, 6000ms, 10000ms, 20000ms, 20000+ms, The IO success counts are 8343, 83, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0. The IO failed counts are 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0. The IO total latency (in 100ns) are 3530685, 406718, 298484, 0, 0, 0, 0, 0, 0, 0, 0, 0. Total Bytes Read:6139392 Total Bytes Written:92268032
|
||||
custom format: Программа обнаружения нехватки системных ресурсов Windows остановлена.
|
36
logstash/pipeline/logstash.conf
Normal file
36
logstash/pipeline/logstash.conf
Normal file
@ -0,0 +1,36 @@
|
||||
input {
|
||||
beats {
|
||||
port => 5044
|
||||
}
|
||||
|
||||
tcp {
|
||||
port => 5000
|
||||
}
|
||||
udp {
|
||||
port => 10514
|
||||
codec => "json_lines"
|
||||
type => "rsyslog"
|
||||
}
|
||||
}
|
||||
|
||||
# filter {
|
||||
# json {
|
||||
# source => "message"
|
||||
# skip_on_invalid_json => true
|
||||
# }
|
||||
# }
|
||||
|
||||
output {
|
||||
elasticsearch {
|
||||
index => "rsyslog-%{+YYYY.MM.dd}"
|
||||
hosts=> "${ELASTIC_HOSTS}"
|
||||
user=> "${ELASTIC_USER}"
|
||||
password=> "${ELASTIC_PASSWORD}"
|
||||
cacert=> "certs/ca/ca.crt"
|
||||
}
|
||||
# file {
|
||||
|
||||
# path => "/var/logstash/logs/%{facility}-%{+YYYY-MM-dd}.log"
|
||||
# codec => line { format => "custom format: %{message}"}
|
||||
# }
|
||||
}
|
29
rsyslog/Dockerfile
Normal file
29
rsyslog/Dockerfile
Normal file
@ -0,0 +1,29 @@
|
||||
FROM debian:stable
|
||||
|
||||
RUN ( \
|
||||
export DEBIAN_FRONTEND=noninteractive; \
|
||||
export BUILD_DEPS=""; \
|
||||
export APP_DEPS="rsyslog rsyslog-elasticsearch rsyslog-gnutls rsyslog-kafka"; \
|
||||
|
||||
set -e -u -x; \
|
||||
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends ${APP_DEPS} ${BUILD_DEPS}; \
|
||||
|
||||
#apt-get remove -y $BUILD_DEPS; \
|
||||
apt-get clean autoclean; \
|
||||
apt-get autoremove --yes; \
|
||||
rm -rf /var/lib/apt /var/lib/dpkg /var/lib/cache /var/lib/log; \
|
||||
)
|
||||
|
||||
COPY conf/rsyslog.conf /etc/
|
||||
COPY rsyslog.d/*.conf /etc/rsyslog.d/
|
||||
|
||||
VOLUME ["/var/log"]
|
||||
|
||||
# UDP listen port
|
||||
EXPOSE 514/udp
|
||||
|
||||
CMD ["/usr/sbin/rsyslogd", "-n"]
|
||||
|
||||
# vim: tabstop=4 shiftwidth=4 expandtab:
|
62
rsyslog/conf/rsyslog.conf
Normal file
62
rsyslog/conf/rsyslog.conf
Normal file
@ -0,0 +1,62 @@
|
||||
# /etc/rsyslog.conf Configuration file for rsyslog.
|
||||
#
|
||||
|
||||
#################
|
||||
#### MODULES ####
|
||||
#################
|
||||
|
||||
module(load="imuxsock" # needs to be done just once
|
||||
SysSock.FlowControl="off" # no blocking when queues fillup
|
||||
SysSock.RateLimit.Interval="0" # turn off rate limiting
|
||||
SysSock.Unlink="on") # unlink when done
|
||||
# create and read log messages from /var/run/rsyslog/dev/log
|
||||
input(type="imuxsock" Socket="/var/run/rsyslog/dev/log" CreatePath="on")
|
||||
|
||||
$DebugFile debug.log
|
||||
$DebugLevel 2
|
||||
|
||||
#$ModLoad imklog # provides kernel logging support
|
||||
#$ModLoad immark # provides --MARK-- message capability
|
||||
|
||||
# provides UDP syslog reception
|
||||
$ModLoad imudp
|
||||
$UDPServerRun 514
|
||||
|
||||
# provides TCP syslog reception
|
||||
$ModLoad imtcp
|
||||
$InputTCPServerRun 514
|
||||
|
||||
|
||||
###########################
|
||||
#### GLOBAL DIRECTIVES ####
|
||||
###########################
|
||||
|
||||
# Use traditional timestamp format.
|
||||
# To enable high precision timestamps, comment out the following line.
|
||||
$ActionFileDefaultTemplate RSYSLOG_FileFormat
|
||||
|
||||
# Set the default permissions for all log files.
|
||||
$FileOwner root
|
||||
$FileGroup adm
|
||||
$FileCreateMode 0640
|
||||
$DirCreateMode 0755
|
||||
$Umask 0022
|
||||
|
||||
# Filter duplicated messages
|
||||
$RepeatedMsgReduction off
|
||||
|
||||
# Turn off escaping control chars
|
||||
$EscapeControlCharactersOnReceive on
|
||||
|
||||
# This decided the size of each line; beyond that the line is truncated
|
||||
$MaxMessageSize 4k
|
||||
|
||||
# Where to place spool and state files
|
||||
$WorkDirectory /var/spool/rsyslog
|
||||
|
||||
# Include all config files in /etc/rsyslog.d/
|
||||
$IncludeConfig /etc/rsyslog.d/*.conf
|
||||
|
||||
# Custom added shit
|
||||
|
||||
$PreserveFQDN on
|
47
rsyslog/rsyslog.d/01-json-template.conf
Normal file
47
rsyslog/rsyslog.d/01-json-template.conf
Normal file
@ -0,0 +1,47 @@
|
||||
template(name="json-template" type="list") {
|
||||
constant(value="{")
|
||||
constant(value="\"@timestamp\":\"") property(name="timereported" dateFormat="rfc3339")
|
||||
constant(value="\",\"@version\":\"1")
|
||||
constant(value="\",\"message\":\"") property(name="msg" format="json")
|
||||
constant(value="\",\"sysloghost\":\"") property(name="hostname")
|
||||
constant(value="\",\"fromhost-ip\":\"") property(name="fromhost-ip")
|
||||
constant(value="\",\"severity\":\"") property(name="syslogseverity-text")
|
||||
constant(value="\",\"facility\":\"") property(name="syslogfacility-text")
|
||||
constant(value="\",\"programname\":\"") property(name="programname")
|
||||
constant(value="\",\"procid\":\"") property(name="procid")
|
||||
constant(value="\",\"syslog-tag\":\"") property(name="syslogtag")
|
||||
constant(value="\"}\n")
|
||||
}
|
||||
|
||||
# if you experience problems, check:
|
||||
# http://www.rsyslog.com/troubleshoot
|
||||
|
||||
# ### MODULES ####
|
||||
|
||||
# module(load="omkafka")
|
||||
|
||||
# template(name="json_lines" type="list" option.json="on") {
|
||||
# constant(value="{")
|
||||
# constant(value="\"timestamp\":\"") property(name="timereported" dateFormat="rfc3339")
|
||||
# constant(value="\",\"message\":\"") property(name="msg")
|
||||
# constant(value="\",\"host\":\"") property(name="hostname")
|
||||
# constant(value="\",\"severity\":\"") property(name="syslogseverity-text")
|
||||
# constant(value="\",\"facility\":\"") property(name="syslogfacility-text")
|
||||
# constant(value="\",\"syslog-tag\":\"") property(name="syslogtag")
|
||||
# constant(value="\"}")
|
||||
# }
|
||||
|
||||
# main_queue(
|
||||
# queue.workerthreads="1" # threads to work on the queue
|
||||
# queue.dequeueBatchSize="1" # max number of messages to process at once
|
||||
# queue.size="1000" # max queue size
|
||||
# )
|
||||
# action(
|
||||
# broker=["kafka:9092"]
|
||||
# type="omkafka"
|
||||
# template="json_lines"
|
||||
# topic="test_topic_1"
|
||||
# action.resumeRetryCount="-1"
|
||||
# action.reportsuspension="on"
|
||||
# )
|
||||
|
1
rsyslog/rsyslog.d/50-default.conf
Normal file
1
rsyslog/rsyslog.d/50-default.conf
Normal file
@ -0,0 +1 @@
|
||||
*.* @logstash01:10514;json-template
|
1
rsyslog/rsyslog.d/60-output.conf
Normal file
1
rsyslog/rsyslog.d/60-output.conf
Normal file
@ -0,0 +1 @@
|
||||
*.* @logstash01:10514;json-template
|
11
rsyslog_kafka_elk.iml
Normal file
11
rsyslog_kafka_elk.iml
Normal file
@ -0,0 +1,11 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="JAVA_MODULE" version="4">
|
||||
<component name="NewModuleRootManager" inherit-compiler-output="true">
|
||||
<exclude-output />
|
||||
<content url="file://$MODULE_DIR$">
|
||||
<sourceFolder url="file://$MODULE_DIR$/src" isTestSource="false" />
|
||||
</content>
|
||||
<orderEntry type="jdk" jdkName="11" jdkType="JavaSDK" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
</module>
|
Loading…
Reference in New Issue
Block a user