enable remote writes to prometheus

fix prometheus volume
This commit is contained in:
Pim Kunis 2023-07-23 15:27:31 +02:00
parent 882578e838
commit 8532135054
3 changed files with 87 additions and 13 deletions

View file

@ -18,12 +18,57 @@ services:
image: quay.io/prometheus/prometheus image: quay.io/prometheus/prometheus
networks: networks:
- traefik - traefik
# volumes: volumes:
# - type: volume - type: volume
# source: data source: data
# target: /prometheus target: /prometheus
# volume: volume:
# nocopy: true nocopy: true
command:
- '--alertmanager.notification-queue-capacity=10000'
- '--alertmanager.timeout='
- '--config.file=/etc/prometheus/prometheus.yml'
- '--enable-feature='
- '--log.format=logfmt'
- '--log.level=info'
- '--query.lookback-delta=5m'
- '--query.max-concurrency=20'
- '--query.max-samples=50000000'
- '--query.timeout=2m'
- '--rules.alert.for-grace-period=10m'
- '--rules.alert.for-outage-tolerance=1h'
- '--rules.alert.resend-delay=1m'
- '--scrape.adjust-timestamps'
- '--scrape.discovery-reload-interval=5s'
- '--scrape.timestamp-tolerance=2ms'
- '--storage.remote.flush-deadline=1m'
- '--storage.remote.read-concurrent-limit=10'
- '--storage.remote.read-max-bytes-in-frame=1048576'
- '--storage.remote.read-sample-limit=50000000'
- '--storage.tsdb.allow-overlapping-blocks'
- '--storage.tsdb.head-chunks-write-queue-size=0'
- '--storage.tsdb.max-block-chunk-segment-size=0B'
- '--storage.tsdb.max-block-duration=1d12h'
- '--storage.tsdb.min-block-duration=2h'
- '--storage.tsdb.path=/prometheus'
- '--storage.tsdb.retention=0s'
- '--storage.tsdb.retention.size=0B'
- '--storage.tsdb.retention.time=0s'
- '--storage.tsdb.samples-per-chunk=120'
- '--storage.tsdb.wal-compression'
- '--storage.tsdb.wal-segment-size=0B'
- '--web.config.file='
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
- '--web.console.templates=/usr/share/prometheus/consoles'
- '--web.cors.origin=.*'
- '--web.enable-remote-write-receiver'
- '--web.external-url='
- '--web.listen-address=0.0.0.0:9090'
- '--web.max-connections=512'
- '--web.page-title=Prometheus Time Series Collection and Processing Server'
- '--web.read-timeout=5m'
- '--web.route-prefix=/'
- '--web.user-assets='
deploy: deploy:
labels: labels:
- traefik.enable=true - traefik.enable=true

View file

@ -0,0 +1,29 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: "prometheus"
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ["localhost:9090"]

View file

@ -1,10 +1,10 @@
#- name: Create prometheus config - name: Create prometheus config
# docker_config: docker_config:
# name: prometheus_config name: prometheus_config
# data: "{{ lookup('file', '{{ role_path }}/prometheus.yml') }}" data: "{{ lookup('file', '{{ role_path }}/prometheus.yml') }}"
# use_ssh_client: true use_ssh_client: true
# rolling_versions: true rolling_versions: true
# register: config register: config
- name: Deploy Docker stack - name: Deploy Docker stack
docker_stack: docker_stack: