使用docker部署grafana+prometheus配置
docker-compose-monitor.yml
version: '2' networks: monitor: driver: bridge services: influxdb: image: influxdb:latest container_name: tig-influxdb ports: - "18083:8083" - "18086:8086" - "18090:8090" env_file: - 'env.influxdb' volumes: # Data persistency # sudo mkdir -p ./influxdb/data - ./influxdb/data:/var/lib/influxdb # 配置docker裡的時間為東八區時間 - ./timezone:/etc/timezone:ro - ./localtime:/etc/localtime:ro restart: unless-stopped #停止後自動 telegraf: image: telegraf:latest container_name: tig-telegraf links: - influxdb volumes: - ./telegraf.conf:/etc/telegraf/telegraf.conf:ro - ./timezone:/etc/timezone:ro - ./localtime:/etc/localtime:ro restart: unless-stopped prometheus: image: prom/prometheus container_name: prometheus hostname: prometheus restart: always volumes: - /home/qa/docker/grafana/prometheus.yml:/etc/prometheus/prometheus.yml - /home/qa/docker/grafana/node_down.yml:/etc/prometheus/node_down.yml ports: - '9090:9090' networks: - monitor alertmanager: image: prom/alertmanager container_name: alertmanager hostname: alertmanager restart: always volumes: - /home/qa/docker/grafana/alertmanager.yml:/etc/alertmanager/alertmanager.yml ports: - '9093:9093' networks: - monitor grafana: image: grafana/grafana:6.7.4 container_name: grafana hostname: grafana restart: always ports: - '13000:3000' networks: - monitor node-exporter: image: quay.io/prometheus/node-exporter container_name: node-exporter hostname: node-exporter restart: always ports: - '9100:9100' networks: - monitor cadvisor: image: google/cadvisor:latest container_name: cadvisor hostname: cadvisor restart: always volumes: - /:/rootfs:ro - /var/run:/var/run:rw - /sys:/sys:ro - /var/lib/docker/:/var/lib/docker:ro ports: - '18080:8080' networks: - monitor
alertmanager.yml
global: resolve_timeout: 5m smtp_from: '郵箱' smtp_smarthost: 'smtp.exmail.qq.com:25' smtp_auth_username: '郵箱' smtp_auth_password: '密碼' smtp_require_tls: false smtp_hello: 'qq.com' route: group_by: ['alertname'] group_wait: 5s group_interval: 5s repeat_interval: 5m receiver: 'email' receivers: - name: 'email' email_configs: - to: '收件郵箱' send_resolved: true inhibit_rules: - source_match: severity: 'critical' target_match: severity: 'warning' equal: ['alertname', 'dev', 'instance']
prometheus.yml
global: scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. # scrape_timeout is set to the global default (10s). # Alertmanager configuration alerting: alertmanagers: - static_configs: - targets: ['192.168.32.117:9093'] # - alertmanager:9093 # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. rule_files: - "node_down.yml" # - "node-exporter-alert-rules.yml" # - "first_rules.yml" # - "second_rules.yml" # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: # IO存儲節點組 - job_name: 'io' scrape_interval: 8s static_configs: #端口為node-exporter啟動的端口 - targets: ['192.168.32.117:9100'] - targets: ['192.168.32.196:9100'] - targets: ['192.168.32.136:9100'] - targets: ['192.168.32.193:9100'] - targets: ['192.168.32.153:9100'] - targets: ['192.168.32.185:9100'] - targets: ['192.168.32.190:19100'] - targets: ['192.168.32.192:9100'] # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. - job_name: 'cadvisor' static_configs: #端口為cadvisor啟動的端口 - targets: ['192.168.32.117:18080'] - targets: ['192.168.32.193:8080'] - targets: ['192.168.32.153:8080'] - targets: ['192.168.32.185:8080'] - targets: ['192.168.32.190:18080'] - targets: ['192.168.32.192:18080']
node_down.yml
groups: - name: node_down rules: - alert: InstanceDown expr: up == 0 for: 1m labels: user: test annotations: summary: 'Instance {{ $labels.instance }} down' description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 1 minutes.' #剩餘內存小於10% - alert: 剩餘內存小於10% expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10 for: 2m labels: severity: warning annotations: summary: Host out of memory (instance {{ $labels.instance }}) description: "Node memory is filling up (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" #剩餘磁盤小於10% - alert: 剩餘磁盤小於10% expr: (node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0 for: 2m labels: severity: warning annotations: summary: Host out of disk space (instance {{ $labels.instance }}) description: "Disk is almost full (< 10% left)\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" #cpu負載 > 80% - alert: CPU負載 > 80% expr: 100 - (avg by(instance) (rate(node_cpu_seconds_total{mode="idle"}[2m])) * 100) > 80 for: 0m labels: severity: warning annotations: summary: Host high CPU load (instance {{ $labels.instance }}) description: "CPU load is > 80%\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
告警:https://awesome-prometheus-alerts.grep.to/rules#prometheus-self-monitoring
官網儀表盤:https://grafana.com/grafana/dashboards/
到此這篇關於docker部署grafana+prometheus配置的文章就介紹到這瞭,更多相關docker部署grafana+prometheus內容請搜索WalkonNet以前的文章或繼續瀏覽下面的相關文章希望大傢以後多多支持WalkonNet!
推薦閱讀:
- docker部署釘釘機器人報警通知的實現
- 借助Docker搭建JMeter+Grafana+Influxdb監控平臺的詳細教程
- SpringBoot可視化監控的具體應用
- docker環境搭建JMeter+Grafana+influxdb可視化性能監控平臺的教程
- SpringBoot使用prometheus監控的示例代碼