From ddd067fa5a5c3d1427592060e3edc98816b78356 Mon Sep 17 00:00:00 2001 From: Enrico Stahn Date: Sun, 18 Feb 2018 22:09:37 +1100 Subject: [PATCH] Add docker-compose.yml and prometheus.yml for testing --- docker-compose.yml | 17 +++++++++++++++++ prometheus.yml | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 54 insertions(+) create mode 100644 docker-compose.yml create mode 100644 prometheus.yml diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..6b783ee --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,17 @@ +version: "3" + +services: + + prometheus: + image: quay.io/prometheus/prometheus:latest + ports: + - 9090:9090 + volumes: + - "./prometheus.yml:/etc/prometheus/prometheus.yml" + + phpfpm: + image: docker-php + ports: + - "9000:9000" + environment: + PHP_FPM_PM_STATUS_PATH: "/status" diff --git a/prometheus.yml b/prometheus.yml new file mode 100644 index 0000000..bb438c8 --- /dev/null +++ b/prometheus.yml @@ -0,0 +1,37 @@ +# my global config +global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + +# Alertmanager configuration +alerting: + alertmanagers: + - static_configs: + - targets: + # - alertmanager:9093 + +# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=` to any timeseries scraped from this config. + - job_name: 'prometheus' + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'phpfpm' + + scrape_interval: 2s + scrape_timeout: 1s + + static_configs: + - targets: ['10.27.5.6:8080']