Commit 6b5adb6f authored by Markus Scheidgen's avatar Markus Scheidgen
Browse files

Replaced old ELK container with new es 7.9.x based docker-compose. [skip-ci]

parent 83572d05
Pipeline #81395 skipped
......@@ -14,21 +14,6 @@ A custom version of [jboss/keycloak](https://hub.docker.com/r/jboss/keycloak/)
- change config to allow reverse proxy under custom prefix
### ELK (optional)
This image is based on the popular elk-stack docker image:
[github](https://github.com/spujadas/elk-docker),
[readthedocs](http://elk-docker.readthedocs.io/).
Changes
- disabled ssl for beats communication to logstash server
- added tcp input
- simplified elastic search output (don't now how to set metric and other vars yet :-()
- added kibana.yml::server.basePath="/nomad/kibana"
The file `elk/kibana_objects.json` contains an export of nomad specific searches,
visualizations, and dashboard.
### CI runner (optional)
This is the immage that this project uses for its gitlab-ci runner. To build an
......@@ -39,4 +24,4 @@ docker build -t gitlab-registry.mpcdf.mpg.de/nomad-lab/nomad-fair/ci-runner .
docker push gitlab-registry.mpcdf.mpg.de/nomad-lab/nomad-fair/ci-runner
```
This image allows to bash, git, docker, docker-compose, k8s, and helm3.
\ No newline at end of file
This image allows to bash, git, docker, docker-compose, k8s, and helm3.
input {
beats {
port => 5044
}
tcp {
port => 5000
codec => json
}
}
\ No newline at end of file
filter {
if [message] =~ /^.*kube-probe.*$/ {
drop { }
}
}
output {
if [type] == "Logstash" {
elasticsearch {
hosts => ["localhost"]
}
} else {
elasticsearch {
hosts => ["localhost"]
manage_template => false
index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
}
}
}
FROM sebp/elk
# RUN apt-get update -qq \
# && apt-get install -qqy elasticsearch-curator
RUN apt-get update -qq && apt-get install -qqy python3-pip
RUN pip3 install --upgrade pip
RUN pip3 install elasticsearch-curator
ENV LOGSTASH_PATH_CONF /etc/logstash
ADD ./rotate_indices.curator.yml /etc/curator/rotate_indices.yml
ADD ./curator.config.yml /etc/curator/config.yml
ADD ./curator.cron.sh /etc/cron.daily/curator
RUN chmod a+x /etc/cron.daily/curator
ADD ./02-beats-input.conf ${LOGSTASH_PATH_CONF}/conf.d/02-beats-input.conf
ADD ./30-output.conf ${LOGSTASH_PATH_CONF}/conf.d/30-output.conf
ADD ./kibana.yml ${KIBANA_HOME}/config/kibana.yml
\ No newline at end of file
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
client:
hosts:
- 127.0.0.1
port: 9200
url_prefix:
use_ssl: False
certificate:
client_cert:
client_key:
ssl_no_validate: False
http_auth:
timeout: 30
master_only: False
logging:
loglevel: INFO
logfile:
logformat: default
blacklist: ['elasticsearch', 'urllib3']
\ No newline at end of file
#!/bin/sh
curator --config /etc/curator/config.yml /etc/curator/rotate_indices.yml
\ No newline at end of file
# Default Kibana 5 file from https://github.com/elastic/kibana/blob/master/config/kibana.yml
#
# Kibana is served by a back end server. This setting specifies the port to use.
#server.port: 5601
# Specifies the address to which the Kibana server will bind. IP addresses and host names are both valid values.
# The default is 'localhost', which usually means remote machines will not be able to connect.
# To allow connections from remote users, set this parameter to a non-loopback address.
server.host: "0.0.0.0"
# Enables you to specify a path to mount Kibana at if you are running behind a proxy. This only affects
# the URLs generated by Kibana, your proxy is expected to remove the basePath value before forwarding requests
# to Kibana. This setting cannot end in a slash.
server.basePath: "/fairdi/kibana"
# The maximum payload size in bytes for incoming server requests.
#server.maxPayloadBytes: 1048576
This diff is collapsed.
---
# Remember, leave a key empty if there is no value. None will be a string,
# not a Python "NoneType"
#
# Also remember that all examples have 'disable_action' set to True. If you
# want to use this action as a template, be sure to set this to False after
# copying it.
actions:
1:
action: delete_indices
description: >-
Delete indices older than 10 days (based on index name), for logstash-
prefixed indices. Ignore the error if the filter does not result in an
actionable list of indices (ignore_empty_list) and exit cleanly.
options:
ignore_empty_list: True
disable_action: False
filters:
- filtertype: pattern
kind: prefix
value: logstash-
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 10
This is NOMAD's docker-compose based elastic stack implementation. Its main purpose is to capture structured logs from NOMAD backend components. Its a basic 3x elasticsearch cluster + kibana + logstash. The created services and respective docker container names are prefixes `nomad_elk_1`. The default database volumes are `/scratch/fairdi/db/nomad_elk_1/...`.
version: '2.2'
services:
nomad_elk_1_node_1:
image: docker.elastic.co/elasticsearch/elasticsearch:7.9.0
container_name: nomad_elk_1_node_1
environment:
- node.name=nomad_elk_1_node_1
- cluster.name=nomad_elk_1
- discovery.seed_hosts=nomad_elk_1_node_2,nomad_elk_1_node_3
- cluster.initial_master_nodes=nomad_elk_1_node_1,nomad_elk_1_node_2,nomad_elk_1_node_3
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms2048m -Xmx2048m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /scratch/fairdi/db/nomad_elk_1/node_1:/usr/share/elasticsearch/data
ports:
- 9201:9200
networks:
nomad_elk_1:
aliases:
- elasticsearch
nomad_elk_1_node_2:
image: docker.elastic.co/elasticsearch/elasticsearch:7.9.0
container_name: nomad_elk_1_node_2
environment:
- node.name=nomad_elk_1_node_2
- cluster.name=nomad_elk_1
- discovery.seed_hosts=nomad_elk_1_node_1,nomad_elk_1_node_3
- cluster.initial_master_nodes=nomad_elk_1_node_1,nomad_elk_1_node_2,nomad_elk_1_node_3
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms2048m -Xmx2048m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /scratch/fairdi/db/nomad_elk_1/node_2:/usr/share/elasticsearch/data
networks:
- nomad_elk_1
nomad_elk_1_node_3:
image: docker.elastic.co/elasticsearch/elasticsearch:7.9.0
container_name: nomad_elk_1_node_3
environment:
- node.name=nomad_elk_1_node_3
- cluster.name=nomad_elk_1
- discovery.seed_hosts=nomad_elk_1_node_1,nomad_elk_1_node_2
- cluster.initial_master_nodes=nomad_elk_1_node_1,nomad_elk_1_node_2,nomad_elk_1_node_3
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms2048m -Xmx2048m"
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- /scratch/fairdi/db/nomad_elk_1/node_3:/usr/share/elasticsearch/data
networks:
- nomad_elk_1
nomad_elk_1_kibana:
image: docker.elastic.co/kibana/kibana:7.9.0
container_name: nomad_elk_1_kibana
ports:
- 5601:5601
environment:
ELASTICSEARCH_URL: http://nomad_elk_1_node_1:9200
ELASTICSEARCH_HOSTS: http://nomad_elk_1_node_1:9200
volumes:
- ./kibana.yaml:/usr/share/kibana/config/kibana.yml
networks:
- nomad_elk_1
nomad_elk_1_logstash:
image: docker.elastic.co/logstash/logstash:7.9.0
container_name: nomad_elk_1_logstash
expose:
- 5000
ports:
- 5000:5000
volumes:
- ./pipeline/:/usr/share/logstash/pipeline/
- ./logstash.yaml:/usr/share/logstash/config/logstash.yml
networks:
- nomad_elk_1
networks:
nomad_elk_1:
driver: bridge
server.host: "0.0.0.0"
server.basePath: "/fairdi/kibana"
monitoring.elasticsearch.hosts: 'http://elasticsearch:9200'
monitoring.enabled: true
input {
tcp {
port => 5000
codec => json
}
}
filter {
if [gunicorn.agent] =~ /kube-probe/ {
drop { }
}
# old v0.7.x config
if [message] =~ /kube-probe/ {
drop { }
}
if [logger_name] == "gunicorn.access" {
mutate {
add_field => { "message" => "%{gunicorn.request}" }
}
}
}
output {
stdout {
codec => rubydebug
}
elasticsearch {
hosts => ["elasticsearch:9200"]
}
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment