This repository has been archived on 2023-12-26. You can view files and clone it, but cannot push or open issues or pull requests.
hypervisors/ansible/roles/fluentbit/fluent-bit.conf.j2

120 lines
2.8 KiB
Text
Raw Normal View History

2023-08-03 11:52:54 +00:00
# vi: ft=conf
[SERVICE]
# Flush
# =====
# set an interval of seconds before to flush records to a destination
flush 1
# Daemon
# ======
# instruct Fluent Bit to run in foreground or background mode.
daemon Off
# Log_Level
# =========
# Set the verbosity level of the service, values can be:
#
# - error
# - warning
# - info
# - debug
# - trace
#
# by default 'info' is set, that means it includes 'error' and 'warning'.
log_level info
# Parsers File
# ============
# specify an optional 'Parsers' configuration file
parsers_file parsers.conf
# Plugins File
# ============
# specify an optional 'Plugins' configuration file to load external plugins.
plugins_file plugins.conf
# HTTP Server
# ===========
# Enable/Disable the built-in HTTP Server for metrics
http_server Off
http_listen 0.0.0.0
http_port 2020
# Storage
# =======
# Fluent Bit can use memory and filesystem buffering based mechanisms
#
# - https://docs.fluentbit.io/manual/administration/buffering-and-storage
#
# storage metrics
# ---------------
# publish storage pipeline metrics in '/api/v1/storage'. The metrics are
# exported only if the 'http_server' option is enabled.
#
storage.metrics on
# storage.path
# ------------
# absolute file system path to store filesystem data buffers (chunks).
#
# storage.path /tmp/storage
# storage.sync
# ------------
# configure the synchronization mode used to store the data into the
# filesystem. It can take the values normal or full.
#
# storage.sync normal
# storage.checksum
# ----------------
# enable the data integrity check when writing and reading data from the
# filesystem. The storage layer uses the CRC32 algorithm.
#
# storage.checksum off
# storage.backlog.mem_limit
# -------------------------
# if storage.path is set, Fluent Bit will look for data chunks that were
# not delivered and are still in the storage layer, these are called
# backlog data. This option configure a hint of maximum value of memory
# to use when processing these records.
#
# storage.backlog.mem_limit 5M
[INPUT]
2023-08-07 20:42:20 +00:00
Name cpu
Tag cpu
2023-08-03 11:52:54 +00:00
# Read interval (sec) Default: 1
2023-08-07 20:42:20 +00:00
Interval_sec 1
[INPUT]
Name exec
Tag memory
Command free -m | tail -2 | tr '\n' ' '
Interval_Sec 1
2023-08-03 11:52:54 +00:00
[OUTPUT]
2023-08-07 20:42:20 +00:00
Name forward
Match *
Host maestro.dmz
Port {{ fluent_forward_port }}
[FILTER]
Name parser
Match memory
Key_Name exec
Parser free
2023-08-03 11:52:54 +00:00
[FILTER]
Name record_modifier
Match *
Record hostname ${HOSTNAME}
[FILTER]
Name record_modifier
2023-08-07 20:42:20 +00:00
Match cpu
2023-08-03 11:52:54 +00:00
Allowlist_key hostname
Allowlist_key cpu_p