152 lines
3.4 KiB
Django/Jinja
152 lines
3.4 KiB
Django/Jinja
# vi: ft=conf
|
|
[SERVICE]
|
|
# Flush
|
|
# =====
|
|
# set an interval of seconds before to flush records to a destination
|
|
flush 1
|
|
|
|
# Daemon
|
|
# ======
|
|
# instruct Fluent Bit to run in foreground or background mode.
|
|
daemon Off
|
|
|
|
# Log_Level
|
|
# =========
|
|
# Set the verbosity level of the service, values can be:
|
|
#
|
|
# - error
|
|
# - warning
|
|
# - info
|
|
# - debug
|
|
# - trace
|
|
#
|
|
# by default 'info' is set, that means it includes 'error' and 'warning'.
|
|
log_level info
|
|
|
|
# Parsers File
|
|
# ============
|
|
# specify an optional 'Parsers' configuration file
|
|
parsers_file parsers.conf
|
|
|
|
# Plugins File
|
|
# ============
|
|
# specify an optional 'Plugins' configuration file to load external plugins.
|
|
plugins_file plugins.conf
|
|
|
|
# HTTP Server
|
|
# ===========
|
|
# Enable/Disable the built-in HTTP Server for metrics
|
|
http_server Off
|
|
http_listen 0.0.0.0
|
|
http_port 2020
|
|
|
|
# Storage
|
|
# =======
|
|
# Fluent Bit can use memory and filesystem buffering based mechanisms
|
|
#
|
|
# - https://docs.fluentbit.io/manual/administration/buffering-and-storage
|
|
#
|
|
# storage metrics
|
|
# ---------------
|
|
# publish storage pipeline metrics in '/api/v1/storage'. The metrics are
|
|
# exported only if the 'http_server' option is enabled.
|
|
#
|
|
storage.metrics on
|
|
|
|
# storage.path
|
|
# ------------
|
|
# absolute file system path to store filesystem data buffers (chunks).
|
|
#
|
|
# storage.path /tmp/storage
|
|
|
|
# storage.sync
|
|
# ------------
|
|
# configure the synchronization mode used to store the data into the
|
|
# filesystem. It can take the values normal or full.
|
|
#
|
|
# storage.sync normal
|
|
|
|
# storage.checksum
|
|
# ----------------
|
|
# enable the data integrity check when writing and reading data from the
|
|
# filesystem. The storage layer uses the CRC32 algorithm.
|
|
#
|
|
# storage.checksum off
|
|
|
|
# storage.backlog.mem_limit
|
|
# -------------------------
|
|
# if storage.path is set, Fluent Bit will look for data chunks that were
|
|
# not delivered and are still in the storage layer, these are called
|
|
# backlog data. This option configure a hint of maximum value of memory
|
|
# to use when processing these records.
|
|
#
|
|
# storage.backlog.mem_limit 5M
|
|
|
|
{% if 'swarm' in group_names %}
|
|
[INPUT]
|
|
Name forward
|
|
Listen 0.0.0.0
|
|
Port 22222
|
|
{% endif %}
|
|
|
|
[INPUT]
|
|
Name cpu
|
|
Tag cpu
|
|
|
|
# Read interval (sec) Default: 1
|
|
Interval_sec 1
|
|
|
|
[INPUT]
|
|
Name exec
|
|
Tag memory
|
|
Command free -m | tail -2 | tr '\n' ' '
|
|
Interval_Sec 1
|
|
|
|
[INPUT]
|
|
Name exec
|
|
Tag diskfree
|
|
Command df -lt ext4 | tail -n +2
|
|
Interval_Sec 600
|
|
|
|
[OUTPUT]
|
|
Name forward
|
|
Match **
|
|
Host maestro.dmz
|
|
Port {{ fluent_forward_port }}
|
|
|
|
[FILTER]
|
|
Name parser
|
|
Match memory
|
|
Key_Name exec
|
|
Parser free
|
|
|
|
[FILTER]
|
|
Name parser
|
|
Match diskfree
|
|
Key_Name exec
|
|
Parser diskfree
|
|
|
|
{% if 'swarm' in group_names %}
|
|
[FILTER]
|
|
Name parser
|
|
Match docker.traefik_traefik.**
|
|
Key_Name log
|
|
Parser traefiklog
|
|
|
|
[FILTER]
|
|
Name rewrite_tag
|
|
Match docker.traefik_traefik.**
|
|
Rule $host .* access.traefik false
|
|
Emitter_Name traefik_access_emitter
|
|
{% endif %}
|
|
|
|
[FILTER]
|
|
Name record_modifier
|
|
Match **
|
|
Record hostname ${HOSTNAME}
|
|
|
|
[FILTER]
|
|
Name record_modifier
|
|
Match cpu
|
|
Allowlist_key hostname
|
|
Allowlist_key cpu_p
|