Compare commits
11 Commits
0abdb8b0a5
...
feature/ad
| Author | SHA1 | Date | |
|---|---|---|---|
| 0d85bd53aa | |||
| 3392c84c65 | |||
| aa3e0c8f54 | |||
| 0dca30868b | |||
| d22bbd3dba | |||
| d70c2813de | |||
| 14d945e7b5 | |||
| ad387df05d | |||
| b1fc6bcf3c | |||
| 5fa246356a | |||
| 17dd0fddff |
@ -24,3 +24,6 @@ ansible_ssh_private_key_file=~/.ssh/id_ansible
|
||||
[all_except_ansible:children]
|
||||
infrastructure
|
||||
applications
|
||||
|
||||
[grafana]
|
||||
192.168.0.106 # pvestandt1-grafana
|
||||
|
||||
104
playbooks/add-blackbox-carefully.yml
Normal file
104
playbooks/add-blackbox-carefully.yml
Normal file
@ -0,0 +1,104 @@
|
||||
---
|
||||
- name: Add Blackbox Exporter job to existing Prometheus config
|
||||
hosts: 192.168.0.105
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Backup current config
|
||||
copy:
|
||||
src: /etc/prometheus/prometheus.yml
|
||||
dest: /etc/prometheus/prometheus.yml.backup-blackbox-{{ ansible_date_time.epoch }}
|
||||
remote_src: yes
|
||||
tags: prometheus
|
||||
|
||||
- name: Check if blackbox job already exists
|
||||
shell: |
|
||||
grep -q 'job_name:.*blackbox' /etc/prometheus/prometheus.yml && echo "exists" || echo "not exists"
|
||||
register: blackbox_exists
|
||||
changed_when: false
|
||||
tags: prometheus
|
||||
|
||||
- name: Add blackbox job to scrape_configs (if not exists)
|
||||
blockinfile:
|
||||
path: /etc/prometheus/prometheus.yml
|
||||
insertbefore: '^remote_write:'
|
||||
block: |
|
||||
- job_name: blackbox
|
||||
honor_timestamps: true
|
||||
track_timestamps_staleness: false
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
scheme: http
|
||||
follow_redirects: true
|
||||
enable_http2: true
|
||||
static_configs:
|
||||
- targets:
|
||||
# Внутренние сервисы стенда
|
||||
- "http://192.168.0.110/"
|
||||
- "http://192.168.0.111:9187/metrics"
|
||||
- "http://192.168.0.112:8080/get"
|
||||
- "http://192.168.0.100:3000/"
|
||||
- "http://192.168.0.101:9100/metrics"
|
||||
- "http://192.168.0.103:8200/ui/"
|
||||
- "http://192.168.0.104:8428/metrics"
|
||||
- "http://192.168.0.105:9090/metrics"
|
||||
- "http://192.168.0.106:3000"
|
||||
# Внешние домены
|
||||
- "http://forgejo.pvenode.ru/"
|
||||
- "http://grafana.pvenode.ru/"
|
||||
- "http://prometheus.pvenode.ru/"
|
||||
- "http://app1.pvenode.ru/"
|
||||
- "http://wiki.pvenode.ru/"
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- source_labels: [__param_target]
|
||||
target_label: instance
|
||||
- target_label: __address__
|
||||
replacement: 192.168.0.112:8083
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
target_label: instance
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: ([^:]+):\d+
|
||||
target_label: host
|
||||
replacement: ${1}
|
||||
action: replace
|
||||
marker: "# {mark} ANSIBLE MANAGED BLOCK - blackbox"
|
||||
when: blackbox_exists.stdout == "not exists"
|
||||
tags: prometheus
|
||||
|
||||
- name: Check Prometheus configuration
|
||||
command: promtool check config /etc/prometheus/prometheus.yml
|
||||
register: promtool_check
|
||||
failed_when: promtool_check.rc != 0
|
||||
changed_when: false
|
||||
tags: prometheus
|
||||
|
||||
- name: Show config check result
|
||||
debug:
|
||||
msg: "{{ promtool_check.stdout_lines }}"
|
||||
when: promtool_check.rc == 0
|
||||
tags: prometheus
|
||||
|
||||
- name: Reload Prometheus if config is valid
|
||||
systemd:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
when: promtool_check.rc == 0
|
||||
tags: prometheus
|
||||
|
||||
- name: Show status
|
||||
debug:
|
||||
msg: |
|
||||
Blackbox job {{ "added successfully" if promtool_check.rc == 0 else "failed to add" }}
|
||||
Backup created: /etc/prometheus/prometheus.yml.backup-blackbox-{{ ansible_date_time.epoch }}
|
||||
tags: prometheus
|
||||
151
playbooks/add-blackbox-correct.yml
Normal file
151
playbooks/add-blackbox-correct.yml
Normal file
@ -0,0 +1,151 @@
|
||||
---
|
||||
- name: Add correct Blackbox Exporter job
|
||||
hosts: 192.168.0.105
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Backup current config
|
||||
copy:
|
||||
src: /etc/prometheus/prometheus.yml
|
||||
dest: /etc/prometheus/prometheus.yml.backup-pre-blackbox-{{ ansible_date_time.epoch }}
|
||||
remote_src: yes
|
||||
tags: prometheus
|
||||
|
||||
- name: Check current line numbers
|
||||
shell: |
|
||||
echo "Last scrape_config job ends at line:"
|
||||
grep -n "job_name: postgres" /etc/prometheus/prometheus.yml
|
||||
echo ""
|
||||
echo "Remote_write starts at line:"
|
||||
grep -n "^remote_write:" /etc/prometheus/prometheus.yml
|
||||
register: line_info
|
||||
changed_when: false
|
||||
tags: prometheus
|
||||
|
||||
- name: Create correct blackbox job config
|
||||
copy:
|
||||
dest: /tmp/blackbox-job.yml
|
||||
content: |
|
||||
- job_name: blackbox
|
||||
honor_timestamps: true
|
||||
track_timestamps_staleness: false
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
scheme: http
|
||||
follow_redirects: true
|
||||
enable_http2: true
|
||||
static_configs:
|
||||
- targets:
|
||||
# Internal services
|
||||
- "http://192.168.0.110/"
|
||||
- "http://192.168.0.111:9187/metrics"
|
||||
- "http://192.168.0.112:8080/get"
|
||||
- "http://192.168.0.100:3000/"
|
||||
- "http://192.168.0.101:9100/metrics"
|
||||
- "http://192.168.0.103:8200/ui/"
|
||||
- "http://192.168.0.104:8428/metrics"
|
||||
- "http://192.168.0.105:9090/metrics"
|
||||
- "http://192.168.0.106:3000"
|
||||
# External domains
|
||||
- "http://forgejo.pvenode.ru/"
|
||||
- "http://grafana.pvenode.ru/"
|
||||
- "http://prometheus.pvenode.ru/"
|
||||
- "http://app1.pvenode.ru/"
|
||||
- "http://wiki.pvenode.ru/"
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- source_labels: [__param_target]
|
||||
target_label: instance
|
||||
- target_label: __address__
|
||||
replacement: 192.168.0.112:8083
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
target_label: instance
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: ([^:]+):\d+
|
||||
target_label: host
|
||||
replacement: ${1}
|
||||
action: replace
|
||||
tags: prometheus
|
||||
|
||||
- name: Insert blackbox job before remote_write
|
||||
shell: |
|
||||
# Находим строку с remote_write
|
||||
remote_line=$(grep -n "^remote_write:" /etc/prometheus/prometheus.yml | cut -d: -f1)
|
||||
|
||||
if [ -z "$remote_line" ]; then
|
||||
echo "ERROR: remote_write not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Создаем новый файл
|
||||
cp /etc/prometheus/prometheus.yml /etc/prometheus/prometheus.yml.tmp
|
||||
|
||||
# Вставляем blackbox перед remote_write
|
||||
head -n $((remote_line - 1)) /etc/prometheus/prometheus.yml > /etc/prometheus/prometheus.yml.new
|
||||
cat /tmp/blackbox-job.yml >> /etc/prometheus/prometheus.yml.new
|
||||
tail -n +$remote_line /etc/prometheus/prometheus.yml >> /etc/prometheus/prometheus.yml.new
|
||||
|
||||
# Заменяем старый файл
|
||||
mv /etc/prometheus/prometheus.yml.new /etc/prometheus/prometheus.yml
|
||||
rm -f /etc/prometheus/prometheus.yml.tmp
|
||||
|
||||
echo "Inserted at line $((remote_line - 1))"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
tags: prometheus
|
||||
|
||||
- name: Check Prometheus configuration
|
||||
command: promtool check config /etc/prometheus/prometheus.yml
|
||||
register: promtool_check
|
||||
failed_when: promtool_check.rc != 0
|
||||
changed_when: false
|
||||
tags: prometheus
|
||||
|
||||
- name: Show config check result
|
||||
debug:
|
||||
msg: "{{ promtool_check.stdout_lines }}"
|
||||
when: promtool_check.rc == 0
|
||||
tags: prometheus
|
||||
|
||||
- name: Reload Prometheus
|
||||
systemd:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
when: promtool_check.rc == 0
|
||||
tags: prometheus
|
||||
|
||||
- name: Verify blackbox job added
|
||||
shell: |
|
||||
sleep 2
|
||||
echo "=== Checking if blackbox job exists ==="
|
||||
if grep -q "job_name: blackbox" /etc/prometheus/prometheus.yml; then
|
||||
echo "✓ Blackbox job found in config"
|
||||
echo ""
|
||||
echo "=== Checking Prometheus targets ==="
|
||||
curl -s "http://localhost:9090/api/v1/targets" | python3 -c "
|
||||
import json, sys
|
||||
data = json.load(sys.stdin)
|
||||
for target in data['data']['activeTargets']:
|
||||
job = target['discoveredLabels'].get('job', 'N/A')
|
||||
if 'blackbox' in job.lower():
|
||||
print(f'✓ Blackbox target: {target[\"health\"]}')
|
||||
print(f' URL: {target[\"scrapeUrl\"]}')
|
||||
exit(0)
|
||||
print('✗ Blackbox not in targets yet (may need 15s scrape interval)')
|
||||
"
|
||||
else
|
||||
echo "✗ Blackbox job not found in config"
|
||||
fi
|
||||
args:
|
||||
executable: /bin/bash
|
||||
tags: prometheus
|
||||
105
playbooks/add-blackbox-final.yml
Normal file
105
playbooks/add-blackbox-final.yml
Normal file
@ -0,0 +1,105 @@
|
||||
---
|
||||
- name: Add Blackbox job to Prometheus config
|
||||
hosts: 192.168.0.105
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Backup config
|
||||
copy:
|
||||
src: /etc/prometheus/prometheus.yml
|
||||
dest: /etc/prometheus/prometheus.yml.backup-blackbox
|
||||
remote_src: yes
|
||||
|
||||
- name: Create blackbox config file
|
||||
copy:
|
||||
dest: /tmp/blackbox-config.yml
|
||||
content: |
|
||||
# Blackbox Exporter monitoring
|
||||
- job_name: blackbox
|
||||
honor_timestamps: true
|
||||
track_timestamps_staleness: false
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
scheme: http
|
||||
follow_redirects: true
|
||||
enable_http2: true
|
||||
static_configs:
|
||||
- targets:
|
||||
- "http://192.168.0.110/"
|
||||
- "http://192.168.0.111:9187/metrics"
|
||||
- "http://192.168.0.112:8080/get"
|
||||
- "http://192.168.0.100:3000/"
|
||||
- "http://192.168.0.101:9100/metrics"
|
||||
- "http://192.168.0.103:8200/ui/"
|
||||
- "http://192.168.0.104:8428/metrics"
|
||||
- "http://192.168.0.105:9090/metrics"
|
||||
- "http://192.168.0.106:3000"
|
||||
- "http://forgejo.pvenode.ru/"
|
||||
- "http://grafana.pvenode.ru/"
|
||||
- "http://prometheus.pvenode.ru/"
|
||||
- "http://app1.pvenode.ru/"
|
||||
- "http://wiki.pvenode.ru/"
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- source_labels: [__param_target]
|
||||
target_label: instance
|
||||
- target_label: __address__
|
||||
replacement: 192.168.0.112:8083
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
target_label: instance
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: ([^:]+):\d+
|
||||
target_label: host
|
||||
replacement: ${1}
|
||||
action: replace
|
||||
|
||||
- name: Insert blackbox config before remote_write
|
||||
shell: |
|
||||
# Find remote_write line
|
||||
remote_line=$(grep -n "^remote_write:" /etc/prometheus/prometheus.yml | head -1 | cut -d: -f1)
|
||||
|
||||
if [ -z "$remote_line" ]; then
|
||||
echo "ERROR: remote_write not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Insert blackbox config
|
||||
head -n $((remote_line - 1)) /etc/prometheus/prometheus.yml > /tmp/prometheus-new.yml
|
||||
cat /tmp/blackbox-config.yml >> /tmp/prometheus-new.yml
|
||||
tail -n +$remote_line /etc/prometheus/prometheus.yml >> /tmp/prometheus-new.yml
|
||||
|
||||
# Replace original
|
||||
mv /tmp/prometheus-new.yml /etc/prometheus/prometheus.yml
|
||||
|
||||
echo "Inserted at line $((remote_line - 1))"
|
||||
|
||||
- name: Validate config
|
||||
command: promtool check config /etc/prometheus/prometheus.yml
|
||||
register: config_check
|
||||
changed_when: false
|
||||
|
||||
- name: Show validation result
|
||||
debug:
|
||||
msg: "{{ config_check.stdout_lines }}"
|
||||
|
||||
- name: Reload Prometheus
|
||||
systemd:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
when: config_check.rc == 0
|
||||
|
||||
- name: Check result
|
||||
debug:
|
||||
msg: |
|
||||
Blackbox job {{ "successfully added" if config_check.rc == 0 else "failed to add" }}
|
||||
Backup: /etc/prometheus/prometheus.yml.backup-blackbox
|
||||
103
playbooks/add-blackbox-simple.yml
Normal file
103
playbooks/add-blackbox-simple.yml
Normal file
@ -0,0 +1,103 @@
|
||||
---
|
||||
- name: Add Blackbox Exporter to Prometheus
|
||||
hosts: 192.168.0.105
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Backup current config
|
||||
copy:
|
||||
src: /etc/prometheus/prometheus.yml
|
||||
dest: /etc/prometheus/prometheus.yml.backup-{{ ansible_date_time.epoch }}
|
||||
remote_src: yes
|
||||
|
||||
- name: Get line number where remote_write starts
|
||||
shell: grep -n "^remote_write:" /etc/prometheus/prometheus.yml | cut -d: -f1
|
||||
register: remote_line
|
||||
changed_when: false
|
||||
|
||||
- name: Create blackbox job config file
|
||||
copy:
|
||||
dest: /tmp/blackbox-job.yml
|
||||
content: |
|
||||
- job_name: blackbox
|
||||
honor_timestamps: true
|
||||
track_timestamps_staleness: false
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
scheme: http
|
||||
follow_redirects: true
|
||||
enable_http2: true
|
||||
static_configs:
|
||||
- targets:
|
||||
- "http://192.168.0.110/"
|
||||
- "http://192.168.0.111:9187/metrics"
|
||||
- "http://192.168.0.112:8080/get"
|
||||
- "http://192.168.0.100:3000/"
|
||||
- "http://192.168.0.101:9100/metrics"
|
||||
- "http://192.168.0.103:8200/ui/"
|
||||
- "http://192.168.0.104:8428/metrics"
|
||||
- "http://192.168.0.105:9090/metrics"
|
||||
- "http://192.168.0.106:3000"
|
||||
- "http://forgejo.pvenode.ru/"
|
||||
- "http://grafana.pvenode.ru/"
|
||||
- "http://prometheus.pvenode.ru/"
|
||||
- "http://app1.pvenode.ru/"
|
||||
- "http://wiki.pvenode.ru/"
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- source_labels: [__param_target]
|
||||
target_label: instance
|
||||
- target_label: __address__
|
||||
replacement: 192.168.0.112:8083
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
target_label: instance
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: ([^:]+):\d+
|
||||
target_label: host
|
||||
replacement: ${1}
|
||||
action: replace
|
||||
|
||||
- name: Insert blackbox job before remote_write
|
||||
shell: |
|
||||
# Вставляем blackbox job перед remote_write
|
||||
head -n $(({{ remote_line.stdout }} - 1)) /etc/prometheus/prometheus.yml > /tmp/prometheus-new.yml
|
||||
cat /tmp/blackbox-job.yml >> /tmp/prometheus-new.yml
|
||||
tail -n +{{ remote_line.stdout }} /etc/prometheus/prometheus.yml >> /tmp/prometheus-new.yml
|
||||
mv /tmp/prometheus-new.yml /etc/prometheus/prometheus.yml
|
||||
args:
|
||||
executable: /bin/bash
|
||||
|
||||
- name: Check Prometheus configuration
|
||||
command: promtool check config /etc/prometheus/prometheus.yml
|
||||
register: promtool_check
|
||||
changed_when: false
|
||||
|
||||
- name: Show config status
|
||||
debug:
|
||||
msg: "{{ promtool_check.stdout_lines }}"
|
||||
|
||||
- name: Reload Prometheus if config valid
|
||||
systemd:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
when: promtool_check.rc == 0
|
||||
|
||||
- name: Verify blackbox job
|
||||
shell: |
|
||||
echo "Checking if blackbox job was added..."
|
||||
if grep -q "job_name: blackbox" /etc/prometheus/prometheus.yml; then
|
||||
echo "SUCCESS: Blackbox job found in config"
|
||||
else
|
||||
echo "ERROR: Blackbox job not found"
|
||||
fi
|
||||
changed_when: false
|
||||
35
playbooks/add-postgres-to-prometheus.yml
Normal file
35
playbooks/add-postgres-to-prometheus.yml
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
- name: Add PostgreSQL exporter to Prometheus
|
||||
hosts: 192.168.0.105
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Add postgres_exporter scrape config
|
||||
blockinfile:
|
||||
path: /etc/prometheus/prometheus.yml
|
||||
insertafter: ' # Nginx metrics via nginx-prometheus-exporter'
|
||||
block: |2
|
||||
# PostgreSQL metrics via postgres_exporter
|
||||
- job_name: 'postgres-app2'
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
static_configs:
|
||||
- targets: ['192.168.0.111:9187']
|
||||
labels:
|
||||
instance: 'app2'
|
||||
service: 'postgresql'
|
||||
job: 'postgres'
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: instance
|
||||
- source_labels: [__address__]
|
||||
regex: '([^:]+):\\d+'
|
||||
replacement: '${1}'
|
||||
target_label: host
|
||||
marker: "# {mark} ANSIBLE MANAGED BLOCK - postgres_exporter"
|
||||
backup: yes
|
||||
|
||||
- name: Reload Prometheus
|
||||
systemd:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
76
playbooks/configure-blackbox-monitoring-fixed.yml
Normal file
76
playbooks/configure-blackbox-monitoring-fixed.yml
Normal file
@ -0,0 +1,76 @@
|
||||
---
|
||||
- name: Configure Prometheus for Blackbox monitoring
|
||||
hosts: 192.168.0.105
|
||||
become: yes
|
||||
|
||||
vars:
|
||||
blackbox_targets:
|
||||
# Основные сервисы стенда (из ИП)
|
||||
- "http://192.168.0.110/"
|
||||
- "http://192.168.0.111:9187/metrics" # postgres_exporter
|
||||
- "http://192.168.0.112:8080/get" # httpbin
|
||||
- "http://192.168.0.112:8081/metrics" # cadvisor
|
||||
- "http://192.168.0.100:3000/" # forgejo
|
||||
- "http://192.168.0.101:9100/metrics" # ansible node_exporter
|
||||
- "http://192.168.0.103:8200/ui/" # vault
|
||||
- "http://192.168.0.104:8428/metrics" # victoriametrics
|
||||
- "http://192.168.0.105:9090/metrics" # prometheus
|
||||
- "http://192.168.0.106:3000" # grafana
|
||||
|
||||
# Основные домены (первые для теста)
|
||||
- "http://forgejo.pvenode.ru/"
|
||||
- "http://grafana.pvenode.ru/"
|
||||
- "http://prometheus.pvenode.ru/"
|
||||
- "http://app1.pvenode.ru/"
|
||||
- "http://wiki.pvenode.ru/"
|
||||
|
||||
tasks:
|
||||
- name: Backup original Prometheus config
|
||||
copy:
|
||||
src: /etc/prometheus/prometheus.yml
|
||||
dest: /etc/prometheus/prometheus.yml.backup-{{ ansible_date_time.epoch }}
|
||||
remote_src: yes
|
||||
tags: prometheus
|
||||
|
||||
- name: Add blackbox exporter to Prometheus
|
||||
blockinfile:
|
||||
path: /etc/prometheus/prometheus.yml
|
||||
insertafter: ' # cAdvisor container metrics'
|
||||
block: |
|
||||
# Blackbox Exporter probes
|
||||
- job_name: 'blackbox'
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
static_configs:
|
||||
- targets:
|
||||
{% for target in blackbox_targets %}
|
||||
- "{{ target }}"
|
||||
{% endfor %}
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- source_labels: [__param_target]
|
||||
target_label: instance
|
||||
- target_label: __address__
|
||||
replacement: 192.168.0.112:8083 # blackbox-exporter
|
||||
marker: "# {mark} ANSIBLE MANAGED BLOCK - blackbox"
|
||||
tags: prometheus
|
||||
|
||||
- name: Check Prometheus configuration
|
||||
command: promtool check config /etc/prometheus/prometheus.yml
|
||||
register: promtool_check
|
||||
failed_when: promtool_check.rc != 0
|
||||
tags: prometheus
|
||||
|
||||
- name: Reload Prometheus
|
||||
systemd:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
when: promtool_check.rc == 0
|
||||
tags: prometheus
|
||||
|
||||
- name: Show configured targets
|
||||
debug:
|
||||
msg: "Added {{ blackbox_targets|length }} targets to blackbox monitoring"
|
||||
tags: prometheus
|
||||
43
playbooks/configure-blackbox-monitoring.yml
Normal file
43
playbooks/configure-blackbox-monitoring.yml
Normal file
@ -0,0 +1,43 @@
|
||||
---
|
||||
- name: Configure Prometheus for Blackbox monitoring
|
||||
hosts: 192.168.0.105
|
||||
become: yes
|
||||
|
||||
vars:
|
||||
blackbox_targets: "{{ hostvars['192.168.0.112']['blackbox_targets'] }}"
|
||||
|
||||
tasks:
|
||||
- name: Add blackbox exporter to Prometheus
|
||||
blockinfile:
|
||||
path: /etc/prometheus/prometheus.yml
|
||||
insertafter: ' # cAdvisor container metrics'
|
||||
block: |
|
||||
# Blackbox Exporter probes
|
||||
- job_name: 'blackbox'
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
static_configs:
|
||||
- targets:
|
||||
{% for target in blackbox_targets %}
|
||||
- {{ target.url }}
|
||||
{% endfor %}
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- source_labels: [__param_target]
|
||||
target_label: instance
|
||||
- target_label: __address__
|
||||
replacement: 192.168.0.112:8083 # blackbox-exporter
|
||||
marker: "# {mark} ANSIBLE MANAGED BLOCK - blackbox"
|
||||
|
||||
- name: Check Prometheus configuration
|
||||
command: promtool check config /etc/prometheus/prometheus.yml
|
||||
register: promtool_check
|
||||
failed_when: promtool_check.rc != 0
|
||||
|
||||
- name: Reload Prometheus
|
||||
systemd:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
when: promtool_check.rc == 0
|
||||
102
playbooks/configure-nginx-monitoring.yml
Normal file
102
playbooks/configure-nginx-monitoring.yml
Normal file
@ -0,0 +1,102 @@
|
||||
---
|
||||
- name: Configure Nginx monitoring in Prometheus
|
||||
hosts: 192.168.0.105
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Check if nginx config already exists
|
||||
stat:
|
||||
path: /etc/prometheus/nginx-app1.yml
|
||||
register: nginx_config
|
||||
|
||||
- name: Create Nginx scrape config
|
||||
copy:
|
||||
content: |
|
||||
# Nginx stub_status metrics
|
||||
- job_name: 'nginx-app1'
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
metrics_path: /status
|
||||
static_configs:
|
||||
- targets: ['192.168.0.110:80']
|
||||
labels:
|
||||
instance: 'app1'
|
||||
service: 'nginx'
|
||||
job: 'nginx'
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: instance
|
||||
- source_labels: [__address__]
|
||||
regex: '([^:]+):\d+'
|
||||
replacement: '${1}'
|
||||
target_label: host
|
||||
dest: /etc/prometheus/nginx-app1.yml
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
when: not nginx_config.stat.exists
|
||||
register: config_created
|
||||
|
||||
- name: Include nginx config in prometheus.yml
|
||||
lineinfile:
|
||||
path: /etc/prometheus/prometheus.yml
|
||||
line: ' - "nginx-app1.yml"'
|
||||
insertafter: '^rule_files:'
|
||||
state: present
|
||||
register: config_modified
|
||||
|
||||
- name: Test Prometheus configuration
|
||||
command: promtool check config /etc/prometheus/prometheus.yml
|
||||
register: prometheus_test
|
||||
changed_when: false
|
||||
|
||||
- name: Show test result
|
||||
debug:
|
||||
msg: "{{ prometheus_test.stdout_lines }}"
|
||||
|
||||
- name: Reload Prometheus if config changed
|
||||
systemd:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
when: (config_created.changed or config_modified.changed) and prometheus_test.rc == 0
|
||||
|
||||
- name: Wait for Prometheus to reload
|
||||
pause:
|
||||
seconds: 3
|
||||
|
||||
- name: Verify Nginx target in Prometheus
|
||||
shell: |
|
||||
curl -s "http://localhost:9090/api/v1/targets" | python3 -c "
|
||||
import json, sys
|
||||
data = json.load(sys.stdin)
|
||||
for target in data['data']['activeTargets']:
|
||||
if 'nginx' in target['labels'].get('job', ''):
|
||||
print(f\"Found nginx target: {target['labels']} - Health: {target['health']}\")
|
||||
"
|
||||
register: target_check
|
||||
changed_when: false
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Show target check result
|
||||
debug:
|
||||
msg: "{{ target_check.stdout_lines if target_check.stdout_lines else 'Nginx target not found' }}"
|
||||
|
||||
- name: Check if nginx metrics are available
|
||||
shell: |
|
||||
# Wait a bit for metrics to appear
|
||||
sleep 2
|
||||
curl -s "http://localhost:9090/api/v1/query?query=up{job='nginx-app1'}" | python3 -c "
|
||||
import json, sys
|
||||
data = json.load(sys.stdin)
|
||||
if data['status'] == 'success' and data['data']['result']:
|
||||
for result in data['data']['result']:
|
||||
print(f\"Nginx metrics UP: {result['metric']}\")
|
||||
else:
|
||||
print(\"Nginx metrics not available yet\")
|
||||
"
|
||||
register: metrics_check
|
||||
changed_when: false
|
||||
|
||||
- name: Show metrics check result
|
||||
debug:
|
||||
msg: "{{ metrics_check.stdout_lines }}"
|
||||
49
playbooks/deploy-app1-nginx.yml
Normal file
49
playbooks/deploy-app1-nginx.yml
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
- name: Deploy Nginx with load testing features on App1
|
||||
hosts: 192.168.0.110
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
roles:
|
||||
- role: nginx
|
||||
|
||||
tasks:
|
||||
- name: Verify Nginx installation
|
||||
uri:
|
||||
url: "http://{{ ansible_default_ipv4.address }}"
|
||||
status_code: 200
|
||||
timeout: 10
|
||||
register: nginx_check
|
||||
until: nginx_check.status == 200
|
||||
retries: 5
|
||||
delay: 5
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Test API endpoints
|
||||
uri:
|
||||
url: "http://{{ ansible_default_ipv4.address }}{{ item }}"
|
||||
status_code: 200
|
||||
timeout: 5
|
||||
loop:
|
||||
- /api/test
|
||||
- /api/metrics
|
||||
register: api_test
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Display deployment result
|
||||
debug:
|
||||
msg: |
|
||||
✅ Nginx with load testing deployed on {{ inventory_hostname }}!
|
||||
🌐 Main page: http://{{ ansible_default_ipv4.address }}
|
||||
📊 Status page: http://{{ ansible_default_ipv4.address }}/status
|
||||
🔧 Test APIs:
|
||||
- http://{{ ansible_default_ipv4.address }}/api/test
|
||||
- http://{{ ansible_default_ipv4.address }}/api/slow
|
||||
- http://{{ ansible_default_ipv4.address }}/api/error
|
||||
- http://{{ ansible_default_ipv4.address }}/api/metrics
|
||||
📈 Monitoring:
|
||||
- Node metrics: http://{{ ansible_default_ipv4.address }}:9100/metrics
|
||||
- Prometheus: http://192.168.0.105:9090
|
||||
- Grafana: http://192.168.0.106:3000
|
||||
|
||||
🎯 Load testing interface ready with JavaScript controls!
|
||||
16
playbooks/deploy-app3-blackbox.yml
Normal file
16
playbooks/deploy-app3-blackbox.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Deploy Blackbox Exporter on App3
|
||||
hosts: 192.168.0.112
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
pre_tasks:
|
||||
- name: Ensure Docker is installed
|
||||
include_role:
|
||||
name: docker
|
||||
apply:
|
||||
tags: docker
|
||||
|
||||
roles:
|
||||
- role: blackbox_exporter
|
||||
tags: blackbox
|
||||
16
playbooks/deploy-app3-cadvisor.yml
Normal file
16
playbooks/deploy-app3-cadvisor.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Deploy cAdvisor on App3
|
||||
hosts: 192.168.0.112
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
pre_tasks:
|
||||
- name: Ensure Docker is installed
|
||||
include_role:
|
||||
name: docker
|
||||
apply:
|
||||
tags: docker
|
||||
|
||||
roles:
|
||||
- role: cadvisor
|
||||
tags: cadvisor
|
||||
9
playbooks/deploy-app3-docker.yml
Normal file
9
playbooks/deploy-app3-docker.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Deploy Docker on App3
|
||||
hosts: 192.168.0.112
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
roles:
|
||||
- role: docker
|
||||
tags: docker
|
||||
16
playbooks/deploy-app3-httpbin.yml
Normal file
16
playbooks/deploy-app3-httpbin.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Deploy httpbin on App3
|
||||
hosts: 192.168.0.112
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
pre_tasks:
|
||||
- name: Ensure Docker is installed
|
||||
include_role:
|
||||
name: docker
|
||||
apply:
|
||||
tags: docker
|
||||
|
||||
roles:
|
||||
- role: httpbin
|
||||
tags: httpbin
|
||||
6
playbooks/deploy-cadvisor.yml
Normal file
6
playbooks/deploy-cadvisor.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Deploy cAdvisor on App3
|
||||
hosts: 192.168.0.112 # Указываем конкретный хост
|
||||
become: true
|
||||
roles:
|
||||
- cadvisor
|
||||
12
playbooks/deploy-postgres-app2.yml
Normal file
12
playbooks/deploy-postgres-app2.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: Deploy PostgreSQL and Postgres Exporter on App2
|
||||
hosts: 192.168.0.111
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
roles:
|
||||
- role: postgresql
|
||||
tags: postgresql
|
||||
|
||||
- role: postgres_exporter
|
||||
tags: postgres_exporter
|
||||
61
playbooks/final-app1-fix.yml
Normal file
61
playbooks/final-app1-fix.yml
Normal file
@ -0,0 +1,61 @@
|
||||
---
|
||||
- name: Final fix for App1 Nginx configuration
|
||||
hosts: 192.168.0.110
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Deploy final Nginx configuration
|
||||
template:
|
||||
src: "/root/projects/ansible-config/roles/nginx/templates/app1.conf.j2"
|
||||
dest: /etc/nginx/conf.d/app1.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Test Nginx configuration
|
||||
command: nginx -t
|
||||
register: nginx_test
|
||||
changed_when: false
|
||||
|
||||
- name: Show test result
|
||||
debug:
|
||||
msg: "{{ nginx_test.stdout_lines }}"
|
||||
|
||||
- name: Reload Nginx
|
||||
systemd:
|
||||
name: nginx
|
||||
state: reloaded
|
||||
when: nginx_test.rc == 0
|
||||
|
||||
- name: Test all endpoints
|
||||
shell: |
|
||||
echo "=== Testing endpoints ==="
|
||||
for endpoint in /api/test /health /status /nginx_status; do
|
||||
echo -n "$endpoint: "
|
||||
curl -s -o /dev/null -w "%{http_code}" http://localhost$endpoint && echo " OK" || echo " FAILED"
|
||||
done
|
||||
register: endpoint_test
|
||||
changed_when: false
|
||||
|
||||
- name: Show endpoint test results
|
||||
debug:
|
||||
msg: "{{ endpoint_test.stdout_lines }}"
|
||||
|
||||
- name: Verify endpoints return JSON
|
||||
shell: |
|
||||
echo "=== Checking JSON responses ==="
|
||||
curl -s http://localhost/api/test | python3 -m json.tool && echo "API Test: OK" || echo "API Test: FAILED"
|
||||
curl -s http://localhost/health | python3 -m json.tool && echo "Health: OK" || echo "Health: FAILED"
|
||||
register: json_test
|
||||
changed_when: false
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Final status check
|
||||
debug:
|
||||
msg: |
|
||||
✅ App1 Nginx configuration fixed!
|
||||
🌐 Access: http://192.168.0.110
|
||||
📊 Status: http://192.168.0.110/status
|
||||
🔧 API Test: http://192.168.0.110/api/test
|
||||
💚 Health: http://192.168.0.110/health
|
||||
📈 Node metrics: http://192.168.0.110:9100/metrics
|
||||
65
playbooks/install-nginx-exporter.yml
Normal file
65
playbooks/install-nginx-exporter.yml
Normal file
@ -0,0 +1,65 @@
|
||||
---
|
||||
- name: Install nginx-prometheus-exporter on App1
|
||||
hosts: 192.168.0.110
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Download nginx-prometheus-exporter
|
||||
get_url:
|
||||
url: https://github.com/nginxinc/nginx-prometheus-exporter/releases/download/v0.11.0/nginx-prometheus-exporter_0.11.0_linux_amd64.tar.gz
|
||||
dest: /tmp/nginx-exporter.tar.gz
|
||||
|
||||
- name: Extract nginx-exporter
|
||||
unarchive:
|
||||
src: /tmp/nginx-exporter.tar.gz
|
||||
dest: /usr/local/bin/
|
||||
remote_src: yes
|
||||
creates: /usr/local/bin/nginx-prometheus-exporter
|
||||
|
||||
- name: Create systemd service
|
||||
copy:
|
||||
content: |
|
||||
[Unit]
|
||||
Description=NGINX Prometheus Exporter
|
||||
After=network.target nginx.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
ExecStart=/usr/local/bin/nginx-prometheus-exporter -nginx.scrape-uri=http://localhost:80/status
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
dest: /etc/systemd/system/nginx-exporter.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Enable and start nginx-exporter
|
||||
systemd:
|
||||
name: nginx-exporter
|
||||
state: started
|
||||
enabled: yes
|
||||
daemon_reload: yes
|
||||
|
||||
- name: Test nginx-exporter
|
||||
uri:
|
||||
url: http://localhost:9113/metrics
|
||||
status_code: 200
|
||||
register: exporter_test
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Show test result
|
||||
debug:
|
||||
msg: "Nginx exporter test: {{ 'SUCCESS' if exporter_test.status == 200 else 'FAILED' }}"
|
||||
|
||||
- name: Verify metrics
|
||||
shell: |
|
||||
curl -s http://localhost:9113/metrics | grep -i nginx | head -5
|
||||
register: metrics_check
|
||||
changed_when: false
|
||||
|
||||
- name: Show metrics check
|
||||
debug:
|
||||
msg: "{{ metrics_check.stdout_lines }}"
|
||||
47
playbooks/monitoring/check_grafana.yml
Normal file
47
playbooks/monitoring/check_grafana.yml
Normal file
@ -0,0 +1,47 @@
|
||||
---
|
||||
- name: Check Grafana installation status
|
||||
hosts: grafana
|
||||
become: yes
|
||||
tasks:
|
||||
- name: Check Grafana service
|
||||
systemd:
|
||||
name: grafana
|
||||
register: service_status
|
||||
|
||||
- name: Check if Grafana is listening on port
|
||||
wait_for:
|
||||
port: 3000
|
||||
host: 127.0.0.1
|
||||
timeout: 10
|
||||
state: started
|
||||
register: port_status
|
||||
|
||||
- name: Check Grafana API health
|
||||
uri:
|
||||
url: "http://localhost:3000/api/health"
|
||||
method: GET
|
||||
status_code: 200
|
||||
timeout: 10
|
||||
register: api_status
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Get Grafana version
|
||||
command: /usr/local/bin/grafana-server --version
|
||||
register: version_info
|
||||
changed_when: false
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Display Grafana status report
|
||||
debug:
|
||||
msg: |
|
||||
📊 Статус Grafana на {{ inventory_hostname }}:
|
||||
|
||||
Служба: {{ "✅ Запущена" if service_status.status.ActiveState == "active" else "❌ Остановлена" }}
|
||||
Порт 3000: {{ "✅ Открыт" if port_status.state == "started" else "❌ Закрыт" }}
|
||||
API: {{ "✅ Доступен (HTTP " ~ api_status.status ~ ")" if api_status.status == 200 else "❌ Недоступен" }}
|
||||
|
||||
{% if version_info is succeeded %}
|
||||
Версия: {{ version_info.stdout_lines[-1] | regex_search('Version ([0-9.]+)') | default('Неизвестна') }}
|
||||
{% else %}
|
||||
Версия: Не удалось определить
|
||||
{% endif %}
|
||||
55
playbooks/monitoring/cleanup_grafana.yml
Normal file
55
playbooks/monitoring/cleanup_grafana.yml
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
- name: Clean up Grafana completely
|
||||
hosts: grafana
|
||||
become: yes
|
||||
tasks:
|
||||
- name: Stop and disable Grafana service
|
||||
systemd:
|
||||
name: grafana
|
||||
state: stopped
|
||||
enabled: no
|
||||
daemon_reload: yes
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Remove systemd service file
|
||||
file:
|
||||
path: /etc/systemd/system/grafana.service
|
||||
state: absent
|
||||
|
||||
- name: Remove symlinks
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- /usr/local/bin/grafana-server
|
||||
- /usr/local/bin/grafana-cli
|
||||
|
||||
- name: Remove Grafana directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- /usr/share/grafana
|
||||
- /usr/share/grafana-*
|
||||
- /var/lib/grafana
|
||||
- /var/log/grafana
|
||||
- /etc/grafana
|
||||
|
||||
- name: Remove temporary files
|
||||
file:
|
||||
path: /tmp/grafana-*.tar.gz
|
||||
state: absent
|
||||
|
||||
- name: Remove Grafana user and group
|
||||
user:
|
||||
name: grafana
|
||||
state: absent
|
||||
remove: yes
|
||||
|
||||
- name: Reload systemd
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
|
||||
- name: Verify cleanup
|
||||
debug:
|
||||
msg: "✅ Grafana полностью удалена с хоста {{ inventory_hostname }}"
|
||||
9
playbooks/monitoring/install_grafana.yml
Normal file
9
playbooks/monitoring/install_grafana.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Install and configure Grafana
|
||||
hosts: grafana
|
||||
become: yes
|
||||
vars:
|
||||
grafana_version: "12.3.2"
|
||||
grafana_admin_password: "admin"
|
||||
roles:
|
||||
- grafana
|
||||
47
playbooks/monitoring/install_grafana_final.yml
Normal file
47
playbooks/monitoring/install_grafana_final.yml
Normal file
@ -0,0 +1,47 @@
|
||||
---
|
||||
- name: Install and configure Grafana (with health checks)
|
||||
hosts: grafana
|
||||
become: yes
|
||||
vars:
|
||||
grafana_version: "12.3.2"
|
||||
grafana_admin_password: "admin"
|
||||
|
||||
tasks:
|
||||
- name: Include Grafana role
|
||||
include_role:
|
||||
name: grafana
|
||||
|
||||
- name: Final verification from control node
|
||||
delegate_to: localhost
|
||||
run_once: yes
|
||||
block:
|
||||
- name: Wait for Grafana to be fully ready
|
||||
pause:
|
||||
seconds: 30
|
||||
prompt: "Waiting for Grafana to complete initialization..."
|
||||
|
||||
- name: Test Grafana access from control node
|
||||
uri:
|
||||
url: "http://{{ hostvars[groups['grafana'][0]]['ansible_default_ipv4']['address'] | default(groups['grafana'][0]) }}:3000/api/health"
|
||||
method: GET
|
||||
status_code: 200
|
||||
timeout: 30
|
||||
register: final_check
|
||||
until: final_check.status == 200
|
||||
retries: 12 # 12 попыток * 5 секунд = 60 секунд
|
||||
delay: 5
|
||||
|
||||
- name: Display final success message
|
||||
debug:
|
||||
msg: |
|
||||
🎉 Grafana успешно установлена и готова к работе!
|
||||
|
||||
Доступ по адресу: http://{{ hostvars[groups['grafana'][0]]['ansible_default_ipv4']['address'] | default(groups['grafana'][0]) }}:3000
|
||||
Логин: admin
|
||||
Пароль: {{ grafana_admin_password }}
|
||||
|
||||
Для проверки выполните команду:
|
||||
curl http://{{ hostvars[groups['grafana'][0]]['ansible_default_ipv4']['address'] | default(groups['grafana'][0]) }}:3000/api/health
|
||||
|
||||
Или откройте в браузере:
|
||||
http://{{ hostvars[groups['grafana'][0]]['ansible_default_ipv4']['address'] | default(groups['grafana'][0]) }}:3000
|
||||
126
roles/blackbox_exporter/defaults/main.yml
Normal file
126
roles/blackbox_exporter/defaults/main.yml
Normal file
@ -0,0 +1,126 @@
|
||||
---
|
||||
# Blackbox Exporter configuration
|
||||
blackbox_version: "latest"
|
||||
blackbox_port: 8083
|
||||
blackbox_image: "prom/blackbox-exporter:{{ blackbox_version }}"
|
||||
blackbox_container_name: "blackbox-exporter"
|
||||
|
||||
# Все цели для мониторинга из ИП и твоего списка
|
||||
blackbox_targets:
|
||||
# Основные сервисы стенда (из ИП)
|
||||
- name: "app1-nginx"
|
||||
url: "http://192.168.0.110/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "app2-postgresql"
|
||||
url: "http://192.168.0.111:9187/metrics" # postgres_exporter
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "app3-httpbin"
|
||||
url: "http://192.168.0.112:8080/get"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "app3-cadvisor"
|
||||
url: "http://192.168.0.112:8081/metrics"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "git-forgejo"
|
||||
url: "http://192.168.0.100:3000/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "ansible"
|
||||
url: "http://192.168.0.101:9100/metrics" # node_exporter
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "vault"
|
||||
url: "http://192.168.0.103:8200/ui/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "victoriametrics"
|
||||
url: "http://192.168.0.104:8428/metrics"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "prometheus"
|
||||
url: "http://192.168.0.105:9090/metrics"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "grafana"
|
||||
url: "http://192.168.0.106:3000"
|
||||
module: "http_2xx"
|
||||
|
||||
# Домены из твоего списка
|
||||
- name: "wiki-pvenode"
|
||||
url: "http://wiki.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "victoria-pvenode"
|
||||
url: "http://victoria.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "vault-pvenode"
|
||||
url: "http://vault.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "tasks-pvenode"
|
||||
url: "http://tasks.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "python-pvenode"
|
||||
url: "http://python.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "pvenode-main"
|
||||
url: "http://pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "proxmox-pvenode"
|
||||
url: "http://proxmox.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "prometheus-pvenode"
|
||||
url: "http://prometheus.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "postgre-pvenode"
|
||||
url: "http://postgre.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "ovpn-pvenode"
|
||||
url: "http://ovpn.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "nginxpm-pvenode"
|
||||
url: "http://nginxpm.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "nextcloud-pvenode"
|
||||
url: "http://nextcloud.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "money-pvenode"
|
||||
url: "http://money.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "grafana-pvenode"
|
||||
url: "http://grafana.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "gitlab-pvenode"
|
||||
url: "http://gitlab.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "forgejo-pvenode"
|
||||
url: "http://forgejo.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "bitwarden-pvenode"
|
||||
url: "http://bitwarden.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "app1-pvenode"
|
||||
url: "http://app1.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "ansible-pvenode"
|
||||
url: "http://ansimble.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
42
roles/blackbox_exporter/files/blackbox.yml
Normal file
42
roles/blackbox_exporter/files/blackbox.yml
Normal file
@ -0,0 +1,42 @@
|
||||
modules:
|
||||
# HTTP проверка (2xx статус)
|
||||
http_2xx:
|
||||
prober: http
|
||||
timeout: 10s
|
||||
http:
|
||||
valid_status_codes: [200, 301, 302, 403]
|
||||
method: GET
|
||||
preferred_ip_protocol: "ip4"
|
||||
follow_redirects: true
|
||||
fail_if_ssl: false
|
||||
fail_if_not_ssl: false
|
||||
tls_config:
|
||||
insecure_skip_verify: true # для тестового стенда
|
||||
|
||||
# HTTP POST проверка
|
||||
http_post_2xx:
|
||||
prober: http
|
||||
http:
|
||||
method: POST
|
||||
preferred_ip_protocol: "ip4"
|
||||
|
||||
# TCP подключение
|
||||
tcp_connect:
|
||||
prober: tcp
|
||||
timeout: 5s
|
||||
|
||||
# SSL проверки (можно добавить позже)
|
||||
ssl_check:
|
||||
prober: http
|
||||
http:
|
||||
fail_if_not_ssl: true
|
||||
tls_config:
|
||||
insecure_skip_verify: false
|
||||
preferred_ip_protocol: "ip4"
|
||||
|
||||
# ICMP (ping)
|
||||
icmp_check:
|
||||
prober: icmp
|
||||
timeout: 5s
|
||||
icmp:
|
||||
preferred_ip_protocol: "ip4"
|
||||
58
roles/blackbox_exporter/tasks/main.yml
Normal file
58
roles/blackbox_exporter/tasks/main.yml
Normal file
@ -0,0 +1,58 @@
|
||||
---
|
||||
- name: Create directory for blackbox config
|
||||
file:
|
||||
path: /etc/blackbox_exporter
|
||||
state: directory
|
||||
mode: '0755'
|
||||
tags: blackbox
|
||||
|
||||
- name: Copy blackbox configuration
|
||||
copy:
|
||||
src: files/blackbox.yml
|
||||
dest: /etc/blackbox_exporter/config.yml
|
||||
mode: '0644'
|
||||
tags: blackbox
|
||||
|
||||
- name: Ensure blackbox-exporter container is running
|
||||
community.docker.docker_container:
|
||||
name: "{{ blackbox_container_name }}"
|
||||
image: "{{ blackbox_image }}"
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
ports:
|
||||
- "{{ blackbox_port }}:9115"
|
||||
volumes:
|
||||
- "/etc/blackbox_exporter/config.yml:/etc/blackbox_exporter/config.yml"
|
||||
command:
|
||||
- "--config.file=/etc/blackbox_exporter/config.yml"
|
||||
- "--web.listen-address=:9115"
|
||||
tags: blackbox
|
||||
|
||||
- name: Configure UFW for blackbox-exporter
|
||||
ufw:
|
||||
rule: allow
|
||||
port: "{{ blackbox_port }}"
|
||||
proto: tcp
|
||||
comment: "Blackbox Exporter"
|
||||
tags: blackbox
|
||||
|
||||
- name: Wait for blackbox-exporter to be ready
|
||||
wait_for:
|
||||
port: "{{ blackbox_port }}"
|
||||
host: "{{ ansible_host }}"
|
||||
delay: 2
|
||||
timeout: 60
|
||||
tags: blackbox
|
||||
|
||||
- name: Test blackbox-exporter with local target
|
||||
uri:
|
||||
url: "http://{{ ansible_host }}:{{ blackbox_port }}/probe?target=http://192.168.0.112:8080/get&module=http_2xx"
|
||||
return_content: true
|
||||
status_code: 200
|
||||
register: blackbox_test
|
||||
tags: blackbox
|
||||
|
||||
- name: Show blackbox-exporter status
|
||||
debug:
|
||||
msg: "Blackbox Exporter deployed at http://{{ ansible_host }}:{{ blackbox_port }}/"
|
||||
tags: blackbox
|
||||
9
roles/cadvisor/defaults/main.yml
Normal file
9
roles/cadvisor/defaults/main.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
# Default port for cAdvisor
|
||||
cadvisor_port: 8080
|
||||
|
||||
# Network configuration
|
||||
cadvisor_network_mode: "host" # Альтернатива: использовать host network для избежания конфликтов портов
|
||||
|
||||
# Alternative: use different port if default is busy
|
||||
cadvisor_fallback_ports: [8081, 8082, 8083, 8084]
|
||||
43
roles/cadvisor/tasks/main.yml
Normal file
43
roles/cadvisor/tasks/main.yml
Normal file
@ -0,0 +1,43 @@
|
||||
---
|
||||
- name: Check for available port for cAdvisor
|
||||
shell: |
|
||||
for port in 8080 8081 8082 8083 8084 8085; do
|
||||
if ! ss -tulpn | grep -q ":${port} "; then
|
||||
echo "${port}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: available_port
|
||||
changed_when: false
|
||||
tags: cadvisor
|
||||
|
||||
- name: Ensure Docker container for cAdvisor is running
|
||||
docker_container:
|
||||
name: cadvisor
|
||||
image: gcr.io/cadvisor/cadvisor:latest
|
||||
state: started
|
||||
restart_policy: always
|
||||
ports:
|
||||
- "{{ available_port.stdout | default('8084') }}:8080"
|
||||
volumes:
|
||||
- "/:/rootfs:ro"
|
||||
- "/var/run:/var/run:ro"
|
||||
- "/sys:/sys:ro"
|
||||
- "/var/lib/docker/:/var/lib/docker:ro"
|
||||
- "/dev/disk/:/dev/disk:ro"
|
||||
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||
privileged: true
|
||||
devices:
|
||||
- "/dev/kmsg:/dev/kmsg"
|
||||
cgroup_parent: "docker.slice"
|
||||
tags: cadvisor
|
||||
|
||||
- name: Display cAdvisor access info
|
||||
debug:
|
||||
msg: |
|
||||
cAdvisor is available at:
|
||||
- Web UI: http://{{ inventory_hostname }}:{{ available_port.stdout | default('8084') }}
|
||||
- Metrics: http://{{ inventory_hostname }}:{{ available_port.stdout | default('8084') }}/metrics
|
||||
tags: cadvisor
|
||||
14
roles/docker/defaults/main.yml
Normal file
14
roles/docker/defaults/main.yml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
# Docker configuration
|
||||
docker_compose_version: "v2.27.0"
|
||||
docker_compose_install_path: "/usr/local/bin/docker-compose"
|
||||
|
||||
# Ports for App3 services (для информации, будут использоваться в других ролях)
|
||||
app3_service_ports:
|
||||
httpbin: 8080
|
||||
cadvisor: 8081
|
||||
alertmanager: 8082
|
||||
blackbox_exporter: 8083
|
||||
loki: 8084
|
||||
wordpress: 8085
|
||||
mysql: 3306 # internal port
|
||||
62
roles/docker/tasks/main.yml
Normal file
62
roles/docker/tasks/main.yml
Normal file
@ -0,0 +1,62 @@
|
||||
---
|
||||
- name: Install prerequisites for Docker
|
||||
apt:
|
||||
name:
|
||||
- curl
|
||||
- gnupg
|
||||
- ca-certificates
|
||||
- lsb-release
|
||||
state: present
|
||||
update_cache: yes
|
||||
tags: docker
|
||||
|
||||
- name: Install Docker using official script
|
||||
shell: |
|
||||
curl -fsSL https://get.docker.com -o /tmp/get-docker.sh
|
||||
sh /tmp/get-docker.sh
|
||||
rm /tmp/get-docker.sh
|
||||
args:
|
||||
creates: /usr/bin/docker
|
||||
tags: docker
|
||||
|
||||
- name: Install Docker Compose
|
||||
get_url:
|
||||
url: "https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-linux-x86_64"
|
||||
dest: "{{ docker_compose_install_path }}"
|
||||
mode: '0755'
|
||||
timeout: 30
|
||||
tags: docker
|
||||
|
||||
- name: Start and enable Docker service
|
||||
systemd:
|
||||
name: docker
|
||||
state: started
|
||||
enabled: yes
|
||||
daemon_reload: yes
|
||||
tags: docker
|
||||
|
||||
- name: Add admin user to docker group
|
||||
user:
|
||||
name: admin
|
||||
groups: docker
|
||||
append: yes
|
||||
tags: docker
|
||||
|
||||
- name: Verify Docker installation
|
||||
command: docker --version
|
||||
register: docker_version
|
||||
changed_when: false
|
||||
tags: docker
|
||||
|
||||
- name: Verify Docker Compose installation
|
||||
command: docker-compose --version
|
||||
register: docker_compose_version
|
||||
changed_when: false
|
||||
tags: docker
|
||||
|
||||
- name: Show installation results
|
||||
debug:
|
||||
msg:
|
||||
- "Docker: {{ docker_version.stdout }}"
|
||||
- "Docker Compose: {{ docker_compose_version.stdout }}"
|
||||
tags: docker
|
||||
4
roles/grafana.backup/defaults/main.yml
Normal file
4
roles/grafana.backup/defaults/main.yml
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
grafana_admin_password: "admin"
|
||||
grafana_version: "12.3.2"
|
||||
grafana_archive_type: "tar.gz" # или "zip"
|
||||
6
roles/grafana.backup/handlers/main.yml
Normal file
6
roles/grafana.backup/handlers/main.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: restart grafana
|
||||
systemd:
|
||||
name: grafana
|
||||
state: restarted
|
||||
daemon_reload: yes
|
||||
142
roles/grafana.backup/tasks/main.yml
Normal file
142
roles/grafana.backup/tasks/main.yml
Normal file
@ -0,0 +1,142 @@
|
||||
---
|
||||
- name: Debug - Show Grafana version
|
||||
debug:
|
||||
msg: "Устанавливаем Grafana версии {{ grafana_version }}"
|
||||
tags: grafana
|
||||
|
||||
- name: Install minimal dependencies
|
||||
apt:
|
||||
name:
|
||||
- curl
|
||||
- adduser
|
||||
- libfontconfig1
|
||||
- tar
|
||||
- gzip
|
||||
- procps
|
||||
state: present
|
||||
update_cache: yes
|
||||
tags: grafana
|
||||
|
||||
- name: Create Grafana user and group
|
||||
user:
|
||||
name: grafana
|
||||
system: yes
|
||||
shell: /bin/false
|
||||
home: /usr/share/grafana
|
||||
comment: "Grafana Server"
|
||||
tags: grafana
|
||||
|
||||
- name: Create Grafana data/log/config directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: grafana
|
||||
group: grafana
|
||||
mode: '0755'
|
||||
loop:
|
||||
- /var/lib/grafana
|
||||
- /var/log/grafana
|
||||
- /etc/grafana
|
||||
tags: grafana
|
||||
|
||||
- name: Download Grafana from official site
|
||||
get_url:
|
||||
url: "https://dl.grafana.com/oss/release/grafana-{{ grafana_version }}.linux-amd64.tar.gz"
|
||||
dest: "/tmp/grafana-{{ grafana_version }}.linux-amd64.tar.gz"
|
||||
timeout: 300
|
||||
validate_certs: no
|
||||
tags: grafana
|
||||
|
||||
- name: Show download info
|
||||
debug:
|
||||
msg: "Grafana скачан: /tmp/grafana-{{ grafana_version }}.linux-amd64.tar.gz"
|
||||
tags: grafana
|
||||
|
||||
- name: Extract Grafana archive
|
||||
unarchive:
|
||||
src: "/tmp/grafana-{{ grafana_version }}.linux-amd64.tar.gz"
|
||||
dest: "/usr/share/"
|
||||
remote_src: yes
|
||||
owner: grafana
|
||||
group: grafana
|
||||
creates: "/usr/share/grafana-{{ grafana_version }}"
|
||||
tags: grafana
|
||||
|
||||
- name: Remove existing /usr/share/grafana if it exists (cleanup)
|
||||
file:
|
||||
path: /usr/share/grafana
|
||||
state: absent
|
||||
tags: grafana
|
||||
|
||||
- name: Create symlink from extracted version
|
||||
file:
|
||||
src: "/usr/share/grafana-{{ grafana_version }}"
|
||||
dest: "/usr/share/grafana"
|
||||
state: link
|
||||
owner: grafana
|
||||
group: grafana
|
||||
tags: grafana
|
||||
|
||||
- name: Create binary symlinks
|
||||
file:
|
||||
src: "/usr/share/grafana/bin/{{ item }}"
|
||||
dest: "/usr/local/bin/{{ item }}"
|
||||
state: link
|
||||
owner: root
|
||||
group: root
|
||||
loop:
|
||||
- grafana-server
|
||||
- grafana-cli
|
||||
tags: grafana
|
||||
|
||||
- name: Create Grafana configuration directory
|
||||
file:
|
||||
path: /etc/grafana
|
||||
state: directory
|
||||
owner: grafana
|
||||
group: grafana
|
||||
mode: '0755'
|
||||
tags: grafana
|
||||
|
||||
- name: Configure Grafana
|
||||
template:
|
||||
src: grafana.ini.j2
|
||||
dest: /etc/grafana/grafana.ini
|
||||
owner: grafana
|
||||
group: grafana
|
||||
mode: '0644'
|
||||
notify: restart grafana
|
||||
tags: grafana
|
||||
|
||||
- name: Install systemd service
|
||||
template:
|
||||
src: grafana.service.j2
|
||||
dest: /etc/systemd/system/grafana.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
notify: restart grafana
|
||||
tags: grafana
|
||||
|
||||
- name: Reload systemd
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
tags: grafana
|
||||
|
||||
- name: Enable and start Grafana service
|
||||
systemd:
|
||||
name: grafana
|
||||
enabled: yes
|
||||
state: started
|
||||
daemon_reload: yes
|
||||
tags: grafana
|
||||
|
||||
- name: Wait and verify Grafana is fully operational
|
||||
include_tasks: wait_and_verify.yml
|
||||
tags: grafana
|
||||
|
||||
- name: Clean up temporary files
|
||||
file:
|
||||
path: "/tmp/grafana-{{ grafana_version }}.linux-amd64.tar.gz"
|
||||
state: absent
|
||||
tags: grafana
|
||||
68
roles/grafana.backup/tasks/wait_and_verify.yml
Normal file
68
roles/grafana.backup/tasks/wait_and_verify.yml
Normal file
@ -0,0 +1,68 @@
|
||||
---
|
||||
- name: Wait for Grafana to start (initial wait)
|
||||
wait_for:
|
||||
timeout: 30
|
||||
tags: grafana
|
||||
|
||||
- name: Check if Grafana is listening on port 3000 (with retries)
|
||||
wait_for:
|
||||
port: 3000
|
||||
host: 127.0.0.1
|
||||
delay: 10
|
||||
timeout: 300 # 5 минут максимум
|
||||
state: started
|
||||
register: grafana_port_check
|
||||
tags: grafana
|
||||
|
||||
- name: Debug port check result
|
||||
debug:
|
||||
msg: "Grafana port check: {{ grafana_port_check.state }} after {{ grafana_port_check.elapsed }} seconds"
|
||||
tags: grafana
|
||||
|
||||
- name: Wait for Grafana API to be ready
|
||||
uri:
|
||||
url: "http://localhost:3000/api/health"
|
||||
method: GET
|
||||
status_code: 200
|
||||
timeout: 30
|
||||
register: grafana_api_check
|
||||
until: grafana_api_check.status == 200
|
||||
retries: 30 # 30 попыток * 5 секунд = 150 секунд
|
||||
delay: 5
|
||||
tags: grafana
|
||||
|
||||
- name: Debug API check result
|
||||
debug:
|
||||
msg: "Grafana API responded with HTTP {{ grafana_api_check.status }} after {{ grafana_api_check.attempts }} attempts"
|
||||
tags: grafana
|
||||
|
||||
- name: Verify Grafana installation (final check)
|
||||
block:
|
||||
- name: Check Grafana service status
|
||||
systemd:
|
||||
name: grafana
|
||||
register: grafana_service_status
|
||||
tags: grafana
|
||||
|
||||
- name: Check Grafana version
|
||||
command: /usr/local/bin/grafana-server --version
|
||||
register: grafana_version_check
|
||||
changed_when: false
|
||||
tags: grafana
|
||||
|
||||
- name: Show installation summary
|
||||
debug:
|
||||
msg: |
|
||||
✅ Grafana успешно установлена!
|
||||
|
||||
Версия: {{ grafana_version_check.stdout_lines[-1] | regex_search('Version ([0-9.]+)') | default('12.3.2') }}
|
||||
Служба: {{ grafana_service_status.status.ActiveState }}
|
||||
Порт 3000: {{ 'открыт' if grafana_port_check.state == 'started' else 'закрыт' }}
|
||||
API: {{ 'доступен' if grafana_api_check.status == 200 else 'недоступен' }}
|
||||
Время установки: {{ grafana_port_check.elapsed | default(0) | round(2) }} секунд
|
||||
|
||||
Доступ по адресу: http://{{ inventory_hostname }}:3000
|
||||
Логин: admin
|
||||
Пароль: {{ grafana_admin_password | default('admin') }}
|
||||
tags: grafana
|
||||
tags: grafana
|
||||
27
roles/grafana.backup/templates/grafana.ini.j2
Normal file
27
roles/grafana.backup/templates/grafana.ini.j2
Normal file
@ -0,0 +1,27 @@
|
||||
[server]
|
||||
http_port = 3000
|
||||
domain = 0.0.0.0
|
||||
root_url = http://%s:3000
|
||||
router_logging = true
|
||||
enable_gzip = false
|
||||
|
||||
[security]
|
||||
admin_user = admin
|
||||
admin_password = {{ grafana_admin_password | default('admin') }}
|
||||
|
||||
[database]
|
||||
type = sqlite3
|
||||
path = /var/lib/grafana/grafana.db
|
||||
|
||||
[session]
|
||||
provider = file
|
||||
|
||||
[analytics]
|
||||
reporting_enabled = false
|
||||
check_for_updates = false
|
||||
|
||||
[paths]
|
||||
data = /var/lib/grafana
|
||||
logs = /var/log/grafana
|
||||
plugins = /var/lib/grafana/plugins
|
||||
provisioning = /etc/grafana/provisioning
|
||||
24
roles/grafana.backup/templates/grafana.service.j2
Normal file
24
roles/grafana.backup/templates/grafana.service.j2
Normal file
@ -0,0 +1,24 @@
|
||||
[Unit]
|
||||
Description=Grafana Server
|
||||
Documentation=https://grafana.com/docs
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=grafana
|
||||
Group=grafana
|
||||
ExecStart=/usr/share/grafana/bin/grafana-server \
|
||||
--config=/etc/grafana/grafana.ini \
|
||||
--homepath=/usr/share/grafana \
|
||||
--packaging=tar
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
LimitNOFILE=10000
|
||||
Environment="GF_PATHS_HOME=/usr/share/grafana"
|
||||
Environment="GF_PATHS_CONFIG=/etc/grafana/grafana.ini"
|
||||
Environment="GF_PATHS_DATA=/var/lib/grafana"
|
||||
Environment="GF_PATHS_LOGS=/var/log/grafana"
|
||||
Environment="GF_PATHS_PLUGINS=/var/lib/grafana/plugins"
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
4
roles/grafana/defaults/main.yml
Normal file
4
roles/grafana/defaults/main.yml
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
grafana_admin_password: "admin"
|
||||
grafana_version: "12.3.2"
|
||||
grafana_archive_type: "tar.gz" # или "zip"
|
||||
6
roles/grafana/handlers/main.yml
Normal file
6
roles/grafana/handlers/main.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: restart grafana
|
||||
systemd:
|
||||
name: grafana
|
||||
state: restarted
|
||||
daemon_reload: yes
|
||||
142
roles/grafana/tasks/main.yml
Normal file
142
roles/grafana/tasks/main.yml
Normal file
@ -0,0 +1,142 @@
|
||||
---
|
||||
- name: Debug - Show Grafana version
|
||||
debug:
|
||||
msg: "Устанавливаем Grafana версии {{ grafana_version }}"
|
||||
tags: grafana
|
||||
|
||||
- name: Install minimal dependencies
|
||||
apt:
|
||||
name:
|
||||
- curl
|
||||
- adduser
|
||||
- libfontconfig1
|
||||
- tar
|
||||
- gzip
|
||||
- procps
|
||||
state: present
|
||||
update_cache: yes
|
||||
tags: grafana
|
||||
|
||||
- name: Create Grafana user and group
|
||||
user:
|
||||
name: grafana
|
||||
system: yes
|
||||
shell: /bin/false
|
||||
home: /usr/share/grafana
|
||||
comment: "Grafana Server"
|
||||
tags: grafana
|
||||
|
||||
- name: Create Grafana data/log/config directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: grafana
|
||||
group: grafana
|
||||
mode: '0755'
|
||||
loop:
|
||||
- /var/lib/grafana
|
||||
- /var/log/grafana
|
||||
- /etc/grafana
|
||||
tags: grafana
|
||||
|
||||
- name: Download Grafana from official site
|
||||
get_url:
|
||||
url: "https://dl.grafana.com/oss/release/grafana-{{ grafana_version }}.linux-amd64.tar.gz"
|
||||
dest: "/tmp/grafana-{{ grafana_version }}.linux-amd64.tar.gz"
|
||||
timeout: 300
|
||||
validate_certs: no
|
||||
tags: grafana
|
||||
|
||||
- name: Show download info
|
||||
debug:
|
||||
msg: "Grafana скачан: /tmp/grafana-{{ grafana_version }}.linux-amd64.tar.gz"
|
||||
tags: grafana
|
||||
|
||||
- name: Extract Grafana archive
|
||||
unarchive:
|
||||
src: "/tmp/grafana-{{ grafana_version }}.linux-amd64.tar.gz"
|
||||
dest: "/usr/share/"
|
||||
remote_src: yes
|
||||
owner: grafana
|
||||
group: grafana
|
||||
creates: "/usr/share/grafana-{{ grafana_version }}"
|
||||
tags: grafana
|
||||
|
||||
- name: Remove existing /usr/share/grafana if it exists (cleanup)
|
||||
file:
|
||||
path: /usr/share/grafana
|
||||
state: absent
|
||||
tags: grafana
|
||||
|
||||
- name: Create symlink from extracted version
|
||||
file:
|
||||
src: "/usr/share/grafana-{{ grafana_version }}"
|
||||
dest: "/usr/share/grafana"
|
||||
state: link
|
||||
owner: grafana
|
||||
group: grafana
|
||||
tags: grafana
|
||||
|
||||
- name: Create binary symlinks
|
||||
file:
|
||||
src: "/usr/share/grafana/bin/{{ item }}"
|
||||
dest: "/usr/local/bin/{{ item }}"
|
||||
state: link
|
||||
owner: root
|
||||
group: root
|
||||
loop:
|
||||
- grafana-server
|
||||
- grafana-cli
|
||||
tags: grafana
|
||||
|
||||
- name: Create Grafana configuration directory
|
||||
file:
|
||||
path: /etc/grafana
|
||||
state: directory
|
||||
owner: grafana
|
||||
group: grafana
|
||||
mode: '0755'
|
||||
tags: grafana
|
||||
|
||||
- name: Configure Grafana
|
||||
template:
|
||||
src: grafana.ini.j2
|
||||
dest: /etc/grafana/grafana.ini
|
||||
owner: grafana
|
||||
group: grafana
|
||||
mode: '0644'
|
||||
notify: restart grafana
|
||||
tags: grafana
|
||||
|
||||
- name: Install systemd service
|
||||
template:
|
||||
src: grafana.service.j2
|
||||
dest: /etc/systemd/system/grafana.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
notify: restart grafana
|
||||
tags: grafana
|
||||
|
||||
- name: Reload systemd
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
tags: grafana
|
||||
|
||||
- name: Enable and start Grafana service
|
||||
systemd:
|
||||
name: grafana
|
||||
enabled: yes
|
||||
state: started
|
||||
daemon_reload: yes
|
||||
tags: grafana
|
||||
|
||||
- name: Wait and verify Grafana is fully operational
|
||||
include_tasks: wait_and_verify.yml
|
||||
tags: grafana
|
||||
|
||||
- name: Clean up temporary files
|
||||
file:
|
||||
path: "/tmp/grafana-{{ grafana_version }}.linux-amd64.tar.gz"
|
||||
state: absent
|
||||
tags: grafana
|
||||
110
roles/grafana/tasks/wait_and_verify.yml
Normal file
110
roles/grafana/tasks/wait_and_verify.yml
Normal file
@ -0,0 +1,110 @@
|
||||
---
|
||||
- name: Phase 1: Initial wait for Grafana to start
|
||||
pause:
|
||||
seconds: 60
|
||||
prompt: "Phase 1/5: Initial wait for Grafana startup (60 seconds)..."
|
||||
tags: grafana
|
||||
|
||||
- name: Check if Grafana service is active (with retries)
|
||||
shell: |
|
||||
systemctl is-active grafana
|
||||
register: grafana_active
|
||||
until: grafana_active.stdout == "active"
|
||||
retries: 60 # 60 * 5 = 300 секунд (5 минут)
|
||||
delay: 5
|
||||
tags: grafana
|
||||
|
||||
- name: Phase 2: Wait for database migrations (wave 1)
|
||||
pause:
|
||||
seconds: 180
|
||||
prompt: "Phase 2/5: Waiting for database migrations (180 seconds)..."
|
||||
tags: grafana
|
||||
|
||||
- name: Phase 3: Wait for plugins installation (wave 2)
|
||||
pause:
|
||||
seconds: 180
|
||||
prompt: "Phase 3/5: Waiting for plugins installation (180 seconds)..."
|
||||
tags: grafana
|
||||
|
||||
- name: Phase 4: Wait for HTTP server startup (wave 3)
|
||||
pause:
|
||||
seconds: 180
|
||||
prompt: "Phase 4/5: Waiting for HTTP server startup (180 seconds)..."
|
||||
tags: grafana
|
||||
|
||||
- name: Check if port 3000 is listening (with very long timeout)
|
||||
wait_for:
|
||||
port: 3000
|
||||
host: 127.0.0.1
|
||||
timeout: 600 # 10 минут
|
||||
state: started
|
||||
register: port_check
|
||||
tags: grafana
|
||||
|
||||
- name: Phase 5: Final verification (wave 4)
|
||||
pause:
|
||||
seconds: 120
|
||||
prompt: "Phase 5/5: Final verification (120 seconds)..."
|
||||
tags: grafana
|
||||
|
||||
- name: Check Grafana API health (with many retries)
|
||||
uri:
|
||||
url: "http://localhost:3000/api/health"
|
||||
method: GET
|
||||
status_code: 200
|
||||
timeout: 10
|
||||
register: api_check
|
||||
until: api_check.status == 200
|
||||
retries: 60 # 60 * 5 = 300 секунд (5 минут)
|
||||
delay: 5
|
||||
tags: grafana
|
||||
|
||||
- name: Calculate total wait time
|
||||
set_fact:
|
||||
total_wait_time: "{{ 60 + 180 + 180 + 180 + 120 }}"
|
||||
tags: grafana
|
||||
|
||||
- name: Show installation success with detailed info
|
||||
debug:
|
||||
msg: |
|
||||
🎉 Grafana успешно установлена и готова к работе!
|
||||
|
||||
⏱️ Общее время установки: {{ total_wait_time }} секунд
|
||||
📊 Статус компонентов:
|
||||
• Служба: ✅ {{ grafana_active.stdout }}
|
||||
• Порт 3000: {{ '✅ открыт' if port_check is defined and port_check.state == 'started' else '❌ закрыт' }}
|
||||
• API: {{ '✅ доступен (HTTP ' ~ api_check.status ~ ')' if api_check is defined and api_check.status == 200 else '❌ недоступен' }}
|
||||
|
||||
🔗 Доступ:
|
||||
• URL: http://{{ inventory_hostname }}:3000
|
||||
• Логин: admin
|
||||
• Пароль: {{ grafana_admin_password | default('admin') }}
|
||||
|
||||
📋 Для проверки выполните:
|
||||
curl http://{{ inventory_hostname }}:3000/api/health
|
||||
|
||||
💡 Примечание: Первый запуск Grafana занимает время из-за:
|
||||
1. Миграций базы данных
|
||||
2. Установки плагинов по умолчанию
|
||||
3. Инициализации сервиса
|
||||
|
||||
Последующие запуски будут значительно быстрее.
|
||||
tags: grafana
|
||||
|
||||
- name: Final check from control node (optional)
|
||||
delegate_to: localhost
|
||||
run_once: yes
|
||||
when: false # Отключено по умолчанию, можно включить
|
||||
tags: grafana
|
||||
block:
|
||||
- name: Test external access
|
||||
uri:
|
||||
url: "http://{{ hostvars[groups['grafana'][0]]['ansible_default_ipv4']['address'] | default(groups['grafana'][0]) }}:3000/api/health"
|
||||
method: GET
|
||||
status_code: 200
|
||||
timeout: 30
|
||||
register: external_check
|
||||
|
||||
- name: Show external access result
|
||||
debug:
|
||||
msg: "External access: {{ '✅ успешно' if external_check.status == 200 else '❌ недоступно' }}"
|
||||
27
roles/grafana/templates/grafana.ini.j2
Normal file
27
roles/grafana/templates/grafana.ini.j2
Normal file
@ -0,0 +1,27 @@
|
||||
[server]
|
||||
http_port = 3000
|
||||
domain = 0.0.0.0
|
||||
root_url = http://localhost:3000
|
||||
router_logging = true
|
||||
enable_gzip = false
|
||||
|
||||
[security]
|
||||
admin_user = admin
|
||||
admin_password = {{ grafana_admin_password | default('admin') }}
|
||||
|
||||
[database]
|
||||
type = sqlite3
|
||||
path = /var/lib/grafana/grafana.db
|
||||
|
||||
[session]
|
||||
provider = file
|
||||
|
||||
[analytics]
|
||||
reporting_enabled = false
|
||||
check_for_updates = false
|
||||
|
||||
[paths]
|
||||
data = /var/lib/grafana
|
||||
logs = /var/log/grafana
|
||||
plugins = /var/lib/grafana/plugins
|
||||
provisioning = /etc/grafana/provisioning
|
||||
24
roles/grafana/templates/grafana.service.j2
Normal file
24
roles/grafana/templates/grafana.service.j2
Normal file
@ -0,0 +1,24 @@
|
||||
[Unit]
|
||||
Description=Grafana Server
|
||||
Documentation=https://grafana.com/docs
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=grafana
|
||||
Group=grafana
|
||||
ExecStart=/usr/share/grafana/bin/grafana-server \
|
||||
--config=/etc/grafana/grafana.ini \
|
||||
--homepath=/usr/share/grafana \
|
||||
--packaging=tar
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
LimitNOFILE=10000
|
||||
Environment="GF_PATHS_HOME=/usr/share/grafana"
|
||||
Environment="GF_PATHS_CONFIG=/etc/grafana/grafana.ini"
|
||||
Environment="GF_PATHS_DATA=/var/lib/grafana"
|
||||
Environment="GF_PATHS_LOGS=/var/log/grafana"
|
||||
Environment="GF_PATHS_PLUGINS=/var/lib/grafana/plugins"
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
5
roles/httpbin/defaults/main.yml
Normal file
5
roles/httpbin/defaults/main.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
# httpbin configuration
|
||||
httpbin_port: 8080
|
||||
httpbin_image: "kennethreitz/httpbin"
|
||||
httpbin_container_name: "httpbin"
|
||||
42
roles/httpbin/tasks/main.yml
Normal file
42
roles/httpbin/tasks/main.yml
Normal file
@ -0,0 +1,42 @@
|
||||
---
|
||||
- name: Ensure httpbin container is running
|
||||
community.docker.docker_container:
|
||||
name: "{{ httpbin_container_name }}"
|
||||
image: "{{ httpbin_image }}"
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
ports:
|
||||
- "{{ httpbin_port }}:80"
|
||||
tags: httpbin
|
||||
|
||||
- name: Configure UFW for httpbin
|
||||
ufw:
|
||||
rule: allow
|
||||
port: "{{ httpbin_port }}"
|
||||
proto: tcp
|
||||
comment: "httpbin API"
|
||||
tags: httpbin
|
||||
|
||||
- name: Wait for httpbin to be ready
|
||||
wait_for:
|
||||
port: "{{ httpbin_port }}"
|
||||
host: "{{ ansible_host }}"
|
||||
delay: 2
|
||||
timeout: 60
|
||||
tags: httpbin
|
||||
|
||||
- name: Verify httpbin is accessible
|
||||
uri:
|
||||
url: "http://{{ ansible_host }}:{{ httpbin_port }}/get"
|
||||
return_content: true
|
||||
status_code: 200
|
||||
register: httpbin_check
|
||||
until: httpbin_check.status == 200
|
||||
retries: 5
|
||||
delay: 3
|
||||
tags: httpbin
|
||||
|
||||
- name: Show httpbin status
|
||||
debug:
|
||||
msg: "httpbin successfully deployed at http://{{ ansible_host }}:{{ httpbin_port }}/"
|
||||
tags: httpbin
|
||||
13
roles/nginx/handlers/main.yml
Normal file
13
roles/nginx/handlers/main.yml
Normal file
@ -0,0 +1,13 @@
|
||||
---
|
||||
# Handlers for Nginx role
|
||||
- name: reload nginx
|
||||
systemd:
|
||||
name: nginx
|
||||
state: reloaded
|
||||
daemon_reload: yes
|
||||
|
||||
- name: restart nginx
|
||||
systemd:
|
||||
name: nginx
|
||||
state: restarted
|
||||
daemon_reload: yes
|
||||
113
roles/nginx/tasks/main.yml
Normal file
113
roles/nginx/tasks/main.yml
Normal file
@ -0,0 +1,113 @@
|
||||
---
|
||||
# Установка и настройка Nginx - финальная версия (без echo модуля)
|
||||
- name: Install prerequisites
|
||||
apt:
|
||||
name:
|
||||
- curl
|
||||
- wget
|
||||
- software-properties-common
|
||||
- ca-certificates
|
||||
- gnupg2
|
||||
state: present
|
||||
update_cache: yes
|
||||
tags: nginx
|
||||
|
||||
- name: Create keyrings directory
|
||||
file:
|
||||
path: /etc/apt/keyrings
|
||||
state: directory
|
||||
mode: '0755'
|
||||
tags: nginx
|
||||
|
||||
- name: Download and add Nginx GPG key
|
||||
shell: |
|
||||
curl -fsSL https://nginx.org/keys/nginx_signing.key | gpg --dearmor -o /etc/apt/keyrings/nginx.gpg
|
||||
chmod 644 /etc/apt/keyrings/nginx.gpg
|
||||
args:
|
||||
creates: /etc/apt/keyrings/nginx.gpg
|
||||
tags: nginx
|
||||
|
||||
- name: Add Nginx repository
|
||||
apt_repository:
|
||||
repo: "deb [signed-by=/etc/apt/keyrings/nginx.gpg] http://nginx.org/packages/ubuntu {{ ansible_distribution_release }} nginx"
|
||||
state: present
|
||||
filename: nginx-official
|
||||
update_cache: yes
|
||||
tags: nginx
|
||||
|
||||
- name: Install Nginx
|
||||
apt:
|
||||
name: nginx
|
||||
state: latest
|
||||
update_cache: yes
|
||||
tags: nginx
|
||||
|
||||
- name: Create custom web directory
|
||||
file:
|
||||
path: /var/www/app1
|
||||
state: directory
|
||||
owner: www-data
|
||||
group: www-data
|
||||
mode: '0755'
|
||||
tags: nginx
|
||||
|
||||
- name: Deploy test index.html
|
||||
template:
|
||||
src: index.html.j2
|
||||
dest: /var/www/app1/index.html
|
||||
owner: www-data
|
||||
group: www-data
|
||||
mode: '0644'
|
||||
tags: nginx
|
||||
|
||||
- name: Remove default Nginx configurations
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- /etc/nginx/conf.d/default.conf
|
||||
- /etc/nginx/conf.d/default.conf.backup
|
||||
- /etc/nginx/sites-enabled/default
|
||||
tags: nginx
|
||||
notify: reload nginx
|
||||
|
||||
- name: Deploy Nginx configuration for app1 in conf.d
|
||||
template:
|
||||
src: app1.conf.j2
|
||||
dest: /etc/nginx/conf.d/app1.conf
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
tags: nginx
|
||||
notify: reload nginx
|
||||
|
||||
- name: Remove old sites-available config if exists
|
||||
file:
|
||||
path: /etc/nginx/sites-available/app1
|
||||
state: absent
|
||||
tags: nginx
|
||||
|
||||
- name: Remove old sites-enabled symlink if exists
|
||||
file:
|
||||
path: /etc/nginx/sites-enabled/app1
|
||||
state: absent
|
||||
tags: nginx
|
||||
|
||||
- name: Test Nginx configuration
|
||||
command: nginx -t
|
||||
register: nginx_test
|
||||
changed_when: false
|
||||
tags: nginx
|
||||
|
||||
- name: Display Nginx test result
|
||||
debug:
|
||||
msg: "{{ nginx_test.stdout_lines }}"
|
||||
tags: nginx
|
||||
|
||||
- name: Enable and start Nginx service
|
||||
systemd:
|
||||
name: nginx
|
||||
state: started
|
||||
enabled: yes
|
||||
daemon_reload: yes
|
||||
tags: nginx
|
||||
72
roles/nginx/templates/app1.conf.j2
Normal file
72
roles/nginx/templates/app1.conf.j2
Normal file
@ -0,0 +1,72 @@
|
||||
# App1 Nginx configuration
|
||||
server {
|
||||
listen 80;
|
||||
server_name _;
|
||||
|
||||
root /var/www/app1;
|
||||
index index.html;
|
||||
|
||||
# Main page
|
||||
location / {
|
||||
try_files $uri $uri/ =404;
|
||||
}
|
||||
|
||||
# Nginx status for monitoring
|
||||
location /status {
|
||||
stub_status on;
|
||||
access_log off;
|
||||
allow 127.0.0.1;
|
||||
allow 192.168.0.0/24;
|
||||
deny all;
|
||||
}
|
||||
|
||||
# Alternative status endpoint
|
||||
location /nginx_status {
|
||||
stub_status on;
|
||||
access_log off;
|
||||
allow 127.0.0.1;
|
||||
allow 192.168.0.0/24;
|
||||
deny all;
|
||||
}
|
||||
|
||||
# Test endpoints for load generation
|
||||
location /api/test {
|
||||
add_header Content-Type application/json;
|
||||
add_header Cache-Control "no-cache";
|
||||
return 200 '{"status": "ok", "timestamp": "$time_iso8601", "server": "app1", "request_id": "$request_id", "method": "$request_method"}';
|
||||
}
|
||||
|
||||
location /api/slow {
|
||||
add_header Content-Type application/json;
|
||||
add_header Cache-Control "no-cache";
|
||||
# Simple JSON response
|
||||
return 200 '{"status": "slow_endpoint", "message": "Endpoint for testing", "timestamp": "$time_iso8601"}';
|
||||
}
|
||||
|
||||
location /api/error {
|
||||
add_header Content-Type application/json;
|
||||
add_header Cache-Control "no-cache";
|
||||
# Simple error endpoint
|
||||
return 500 '{"status": "error", "code": 500, "message": "Test error response", "request_id": "$request_id"}';
|
||||
}
|
||||
|
||||
location /api/random {
|
||||
add_header Content-Type application/json;
|
||||
add_header Cache-Control "no-cache";
|
||||
# Random response - always success but different messages
|
||||
return 200 '{"status": "random", "timestamp": "$time_iso8601", "value": "$msec", "message": "Random endpoint response"}';
|
||||
}
|
||||
|
||||
location /api/metrics {
|
||||
add_header Content-Type application/json;
|
||||
add_header Cache-Control "no-cache";
|
||||
return 200 '{"server": "app1", "timestamp": "$time_iso8601", "available_endpoints": ["/api/test", "/api/slow", "/api/error", "/api/random", "/api/metrics", "/health", "/status"]}';
|
||||
}
|
||||
|
||||
# Health check endpoint
|
||||
location /health {
|
||||
add_header Content-Type application/json;
|
||||
access_log off;
|
||||
return 200 '{"status": "healthy", "service": "nginx", "timestamp": "$time_iso8601", "version": "$nginx_version"}';
|
||||
}
|
||||
}
|
||||
307
roles/nginx/templates/index.html.j2
Normal file
307
roles/nginx/templates/index.html.j2
Normal file
@ -0,0 +1,307 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Test App1 - Nginx Load Test</title>
|
||||
<meta charset="UTF-8">
|
||||
<style>
|
||||
body {
|
||||
font-family: Arial, sans-serif;
|
||||
margin: 40px;
|
||||
background-color: #f4f4f9;
|
||||
color: #333;
|
||||
}
|
||||
.container {
|
||||
max-width: 1000px;
|
||||
margin: 0 auto;
|
||||
padding: 20px;
|
||||
background-color: white;
|
||||
border-radius: 10px;
|
||||
box-shadow: 0 0 10px rgba(0,0,0,0.1);
|
||||
}
|
||||
h1 { color: #2c3e50; border-left: 5px solid #3498db; padding-left: 15px; }
|
||||
.panel {
|
||||
background-color: #e8f4fc;
|
||||
padding: 20px;
|
||||
border-radius: 8px;
|
||||
margin: 20px 0;
|
||||
border-left: 4px solid #3498db;
|
||||
}
|
||||
.metrics-panel {
|
||||
background-color: #e8f8ef;
|
||||
padding: 20px;
|
||||
border-radius: 8px;
|
||||
margin: 20px 0;
|
||||
border-left: 4px solid #2ecc71;
|
||||
}
|
||||
.load-panel {
|
||||
background-color: #fff3cd;
|
||||
padding: 20px;
|
||||
border-radius: 8px;
|
||||
margin: 20px 0;
|
||||
border-left: 4px solid #f39c12;
|
||||
}
|
||||
button {
|
||||
padding: 10px 20px;
|
||||
margin: 5px;
|
||||
border: none;
|
||||
border-radius: 5px;
|
||||
cursor: pointer;
|
||||
font-size: 14px;
|
||||
font-weight: bold;
|
||||
}
|
||||
.btn-primary { background-color: #3498db; color: white; }
|
||||
.btn-success { background-color: #2ecc71; color: white; }
|
||||
.btn-warning { background-color: #f39c12; color: white; }
|
||||
.btn-danger { background-color: #e74c3c; color: white; }
|
||||
.stats { display: flex; gap: 20px; margin-top: 20px; }
|
||||
.stat-box {
|
||||
flex: 1;
|
||||
padding: 15px;
|
||||
background: white;
|
||||
border-radius: 5px;
|
||||
text-align: center;
|
||||
box-shadow: 0 2px 5px rgba(0,0,0,0.1);
|
||||
}
|
||||
#log {
|
||||
background: #f8f9fa;
|
||||
padding: 15px;
|
||||
border-radius: 5px;
|
||||
height: 200px;
|
||||
overflow-y: auto;
|
||||
font-family: monospace;
|
||||
font-size: 12px;
|
||||
border: 1px solid #ddd;
|
||||
}
|
||||
.status-icon::before {
|
||||
content: "✓";
|
||||
color: green;
|
||||
font-weight: bold;
|
||||
margin-right: 5px;
|
||||
}
|
||||
.error-icon::before {
|
||||
content: "✗";
|
||||
color: red;
|
||||
font-weight: bold;
|
||||
margin-right: 5px;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1>Test Application 1 - Nginx Load Testing</h1>
|
||||
|
||||
<div class="panel">
|
||||
<h3>Server Information</h3>
|
||||
<p><strong>Hostname:</strong> {{ ansible_hostname }}</p>
|
||||
<p><strong>IP Address:</strong> {{ ansible_default_ipv4.address }}</p>
|
||||
<p><strong>Role:</strong> Web Server (Nginx)</p>
|
||||
<p><strong>Deployed:</strong> {{ ansible_date_time.date }}</p>
|
||||
</div>
|
||||
|
||||
<div class="load-panel">
|
||||
<h3>Load Testing Controls</h3>
|
||||
<div>
|
||||
<button class="btn-primary" onclick="startLoadTest('light')">Light Load (1-5 req/sec)</button>
|
||||
<button class="btn-success" onclick="startLoadTest('medium')">Medium Load (10-20 req/sec)</button>
|
||||
<button class="btn-warning" onclick="startLoadTest('heavy')">Heavy Load (50-100 req/sec)</button>
|
||||
<button class="btn-danger" onclick="stopLoadTest()">Stop Load Test</button>
|
||||
</div>
|
||||
|
||||
<div style="margin-top: 20px;">
|
||||
<label>Custom Requests: </label>
|
||||
<input type="number" id="requestCount" value="100" min="1" max="10000" style="width: 80px;">
|
||||
<button class="btn-primary" onclick="sendCustomRequests()">Send Requests</button>
|
||||
<button class="btn-warning" onclick="generateTraffic()">Generate Random Traffic</button>
|
||||
</div>
|
||||
|
||||
<div class="stats">
|
||||
<div class="stat-box">
|
||||
<h4>Requests Sent</h4>
|
||||
<div id="requestsCount">0</div>
|
||||
</div>
|
||||
<div class="stat-box">
|
||||
<h4>Successful</h4>
|
||||
<div id="successCount">0</div>
|
||||
</div>
|
||||
<div class="stat-box">
|
||||
<h4>Failed</h4>
|
||||
<div id="failedCount">0</div>
|
||||
</div>
|
||||
<div class="stat-box">
|
||||
<h4>Active Tests</h4>
|
||||
<div id="activeTests">0</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="metrics-panel">
|
||||
<h3>Monitoring & Metrics</h3>
|
||||
<p>Node Exporter: <a href="http://{{ ansible_default_ipv4.address }}:9100/metrics" target="_blank">:9100/metrics</a></p>
|
||||
<p>Nginx Status: <a href="http://{{ ansible_default_ipv4.address }}/status" target="_blank">/status</a></p>
|
||||
<p>Nginx Metrics: <a href="http://{{ ansible_default_ipv4.address }}/nginx_status" target="_blank">/nginx_status</a></p>
|
||||
|
||||
<h4>API Endpoints:</h4>
|
||||
<div style="font-family: monospace; font-size: 12px; background: #f8f9fa; padding: 10px; border-radius: 5px;">
|
||||
<div><a href="/api/test" target="_blank">/api/test</a> - Test endpoint</div>
|
||||
<div><a href="/api/error" target="_blank">/api/error</a> - Error endpoint (500)</div>
|
||||
<div><a href="/api/random" target="_blank">/api/random</a> - Random response</div>
|
||||
<div><a href="/api/metrics" target="_blank">/api/metrics</a> - API info</div>
|
||||
<div><a href="/health" target="_blank">/health</a> - Health check</div>
|
||||
</div>
|
||||
|
||||
<h4>Quick Links:</h4>
|
||||
<div>
|
||||
<a href="http://192.168.0.106:3000" target="_blank">Grafana Dashboard</a> |
|
||||
<a href="http://192.168.0.105:9090" target="_blank">Prometheus UI</a> |
|
||||
<a href="http://192.168.0.100:3000" target="_blank">Git/Forgejo</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<h3>Request Log</h3>
|
||||
<div id="log"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
let loadTestInterval = null;
|
||||
let activeTests = 0;
|
||||
let requestsSent = 0;
|
||||
let successCount = 0;
|
||||
let failedCount = 0;
|
||||
|
||||
function logMessage(message, type = 'info') {
|
||||
const logDiv = document.getElementById('log');
|
||||
const timestamp = new Date().toLocaleTimeString();
|
||||
const icon = type === 'error' ? '<span class="error-icon"></span>' :
|
||||
type === 'success' ? '<span class="status-icon"></span>' : '>';
|
||||
const color = type === 'error' ? 'red' : type === 'success' ? 'green' : '#666';
|
||||
logDiv.innerHTML = `<div style="color: ${color}; margin: 2px 0;">${icon} [${timestamp}] ${message}</div>` + logDiv.innerHTML;
|
||||
}
|
||||
|
||||
function updateStats() {
|
||||
document.getElementById('requestsCount').textContent = requestsSent;
|
||||
document.getElementById('successCount').textContent = successCount;
|
||||
document.getElementById('failedCount').textContent = failedCount;
|
||||
document.getElementById('activeTests').textContent = activeTests;
|
||||
}
|
||||
|
||||
async function sendRequest(endpoint = '/') {
|
||||
requestsSent++;
|
||||
updateStats();
|
||||
|
||||
try {
|
||||
const startTime = Date.now();
|
||||
const response = await fetch(endpoint);
|
||||
const endTime = Date.now();
|
||||
const duration = endTime - startTime;
|
||||
|
||||
if (response.ok) {
|
||||
successCount++;
|
||||
logMessage(`Request ${requestsSent} to ${endpoint}: ${duration}ms (${response.status})`, 'success');
|
||||
} else {
|
||||
failedCount++;
|
||||
logMessage(`Request ${requestsSent} to ${endpoint}: Failed (${response.status})`, 'error');
|
||||
}
|
||||
} catch (error) {
|
||||
failedCount++;
|
||||
logMessage(`Request ${requestsSent} to ${endpoint}: Error - ${error.message}`, 'error');
|
||||
}
|
||||
}
|
||||
|
||||
function startLoadTest(intensity) {
|
||||
if (loadTestInterval) {
|
||||
clearInterval(loadTestInterval);
|
||||
}
|
||||
|
||||
let interval;
|
||||
let endpoints = ['/', '/api/test', '/api/random', '/health'];
|
||||
|
||||
switch(intensity) {
|
||||
case 'light':
|
||||
interval = 200; // 5 req/sec
|
||||
logMessage('Starting LIGHT load test (5 req/sec)');
|
||||
break;
|
||||
case 'medium':
|
||||
interval = 50; // 20 req/sec
|
||||
logMessage('Starting MEDIUM load test (20 req/sec)');
|
||||
break;
|
||||
case 'heavy':
|
||||
interval = 10; // 100 req/sec
|
||||
logMessage('Starting HEAVY load test (100 req/sec)');
|
||||
break;
|
||||
}
|
||||
|
||||
activeTests++;
|
||||
updateStats();
|
||||
|
||||
loadTestInterval = setInterval(() => {
|
||||
const endpoint = endpoints[Math.floor(Math.random() * endpoints.length)];
|
||||
sendRequest(endpoint);
|
||||
}, interval);
|
||||
}
|
||||
|
||||
function stopLoadTest() {
|
||||
if (loadTestInterval) {
|
||||
clearInterval(loadTestInterval);
|
||||
loadTestInterval = null;
|
||||
activeTests = Math.max(0, activeTests - 1);
|
||||
updateStats();
|
||||
logMessage('Load test stopped');
|
||||
}
|
||||
}
|
||||
|
||||
function sendCustomRequests() {
|
||||
const count = parseInt(document.getElementById('requestCount').value);
|
||||
const endpoints = ['/', '/api/test', '/api/error', '/api/random', '/health'];
|
||||
|
||||
logMessage(`Sending ${count} custom requests`);
|
||||
|
||||
activeTests++;
|
||||
updateStats();
|
||||
|
||||
let completed = 0;
|
||||
for (let i = 0; i < count; i++) {
|
||||
setTimeout(() => {
|
||||
const endpoint = endpoints[Math.floor(Math.random() * endpoints.length)];
|
||||
sendRequest(endpoint);
|
||||
completed++;
|
||||
if (completed === count) {
|
||||
activeTests--;
|
||||
updateStats();
|
||||
logMessage(`Completed ${count} custom requests`);
|
||||
}
|
||||
}, i * 10); // Stagger requests
|
||||
}
|
||||
}
|
||||
|
||||
function generateTraffic() {
|
||||
logMessage('Starting random traffic generation');
|
||||
activeTests++;
|
||||
updateStats();
|
||||
|
||||
const endpoints = ['/', '/api/test', '/api/error', '/api/random', '/health'];
|
||||
|
||||
const randomTrafficInterval = setInterval(() => {
|
||||
if (Math.random() > 0.7) { // 30% chance to send request
|
||||
const endpoint = endpoints[Math.floor(Math.random() * endpoints.length)];
|
||||
sendRequest(endpoint);
|
||||
}
|
||||
}, 100);
|
||||
|
||||
// Stop after 2 minutes
|
||||
setTimeout(() => {
|
||||
clearInterval(randomTrafficInterval);
|
||||
activeTests--;
|
||||
updateStats();
|
||||
logMessage('Random traffic generation stopped');
|
||||
}, 120000);
|
||||
}
|
||||
|
||||
// Initialize with some log messages
|
||||
logMessage('App1 Load Testing Interface Ready');
|
||||
logMessage(`Server: {{ ansible_hostname }} ({{ ansible_default_ipv4.address }})`);
|
||||
logMessage('Use buttons above to generate load for monitoring');
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
59
roles/nginx_exporter/tasks/main.yml
Normal file
59
roles/nginx_exporter/tasks/main.yml
Normal file
@ -0,0 +1,59 @@
|
||||
---
|
||||
# Install nginx-prometheus-exporter
|
||||
- name: Download nginx-prometheus-exporter
|
||||
get_url:
|
||||
url: https://github.com/nginxinc/nginx-prometheus-exporter/releases/download/v0.11.0/nginx-prometheus-exporter_0.11.0_linux_amd64.tar.gz
|
||||
dest: /tmp/nginx-exporter.tar.gz
|
||||
tags: nginx_exporter
|
||||
|
||||
- name: Extract nginx-exporter
|
||||
unarchive:
|
||||
src: /tmp/nginx-exporter.tar.gz
|
||||
dest: /tmp/
|
||||
remote_src: yes
|
||||
creates: /tmp/nginx-prometheus-exporter
|
||||
tags: nginx_exporter
|
||||
|
||||
- name: Install nginx-exporter binary
|
||||
copy:
|
||||
src: /tmp/nginx-prometheus-exporter
|
||||
dest: /usr/local/bin/nginx-prometheus-exporter
|
||||
remote_src: yes
|
||||
mode: '0755'
|
||||
tags: nginx_exporter
|
||||
|
||||
- name: Create systemd service for nginx-exporter
|
||||
copy:
|
||||
content: |
|
||||
[Unit]
|
||||
Description=NGINX Prometheus Exporter
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=root
|
||||
ExecStart=/usr/local/bin/nginx-prometheus-exporter -nginx.scrape-uri=http://localhost:80/status
|
||||
Restart=on-failure
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
dest: /etc/systemd/system/nginx-exporter.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
tags: nginx_exporter
|
||||
|
||||
- name: Enable and start nginx-exporter
|
||||
systemd:
|
||||
name: nginx-exporter
|
||||
state: started
|
||||
enabled: yes
|
||||
daemon_reload: yes
|
||||
tags: nginx_exporter
|
||||
|
||||
- name: Open firewall port for nginx-exporter
|
||||
ufw:
|
||||
rule: allow
|
||||
port: '9113'
|
||||
proto: tcp
|
||||
tags: nginx_exporter
|
||||
12
roles/postgres_exporter/defaults/main.yml
Normal file
12
roles/postgres_exporter/defaults/main.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
# Postgres Exporter
|
||||
postgres_exporter_version: "0.15.0"
|
||||
postgres_exporter_port: 9187
|
||||
postgres_exporter_user: "postgres_exporter"
|
||||
postgres_exporter_password: "exporterpassword123"
|
||||
|
||||
# Connection settings
|
||||
postgres_exporter_data_source_name: "user={{ postgres_exporter_user }} password={{ postgres_exporter_password }} host=localhost port=5432 dbname=postgres sslmode=disable"
|
||||
|
||||
# Systemd service
|
||||
postgres_exporter_service_name: "postgres_exporter"
|
||||
94
roles/postgres_exporter/tasks/main.yml
Normal file
94
roles/postgres_exporter/tasks/main.yml
Normal file
@ -0,0 +1,94 @@
|
||||
---
|
||||
- name: Install required packages
|
||||
apt:
|
||||
name:
|
||||
- wget
|
||||
- tar
|
||||
state: present
|
||||
update_cache: yes
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Create postgres_exporter user
|
||||
user:
|
||||
name: postgres_exporter
|
||||
system: yes
|
||||
shell: /bin/false
|
||||
home: /nonexistent
|
||||
comment: "Postgres Exporter Service User"
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Download Postgres Exporter
|
||||
get_url:
|
||||
url: "https://github.com/prometheus-community/postgres_exporter/releases/download/v{{ postgres_exporter_version }}/postgres_exporter-{{ postgres_exporter_version }}.linux-amd64.tar.gz"
|
||||
dest: "/tmp/postgres_exporter-{{ postgres_exporter_version }}.tar.gz"
|
||||
timeout: 30
|
||||
validate_certs: no
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Extract Postgres Exporter
|
||||
unarchive:
|
||||
src: "/tmp/postgres_exporter-{{ postgres_exporter_version }}.tar.gz"
|
||||
dest: "/tmp/"
|
||||
remote_src: yes
|
||||
creates: "/tmp/postgres_exporter-{{ postgres_exporter_version }}.linux-amd64"
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Install Postgres Exporter binary
|
||||
copy:
|
||||
src: "/tmp/postgres_exporter-{{ postgres_exporter_version }}.linux-amd64/postgres_exporter"
|
||||
dest: "/usr/local/bin/postgres_exporter"
|
||||
owner: postgres_exporter
|
||||
group: postgres_exporter
|
||||
mode: '0755'
|
||||
remote_src: yes
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Create systemd service
|
||||
template:
|
||||
src: postgres_exporter.service.j2
|
||||
dest: /etc/systemd/system/{{ postgres_exporter_service_name }}.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Clean up temp files
|
||||
file:
|
||||
path: "/tmp/postgres_exporter-{{ postgres_exporter_version }}.tar.gz"
|
||||
state: absent
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Clean up extracted directory
|
||||
file:
|
||||
path: "/tmp/postgres_exporter-{{ postgres_exporter_version }}.linux-amd64"
|
||||
state: absent
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Reload systemd
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Enable and start Postgres Exporter
|
||||
systemd:
|
||||
name: "{{ postgres_exporter_service_name }}"
|
||||
enabled: yes
|
||||
state: started
|
||||
daemon_reload: yes
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Configure UFW for Postgres Exporter
|
||||
ufw:
|
||||
rule: allow
|
||||
port: "{{ postgres_exporter_port }}"
|
||||
proto: tcp
|
||||
comment: "Postgres Exporter metrics"
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Verify Postgres Exporter is running
|
||||
wait_for:
|
||||
port: "{{ postgres_exporter_port }}"
|
||||
host: "{{ ansible_host }}"
|
||||
delay: 3
|
||||
timeout: 60
|
||||
tags: postgres_exporter
|
||||
@ -0,0 +1,16 @@
|
||||
[Unit]
|
||||
Description=Postgres Exporter
|
||||
After=network.target postgresql.service
|
||||
Wants=postgresql.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=postgres_exporter
|
||||
Group=postgres_exporter
|
||||
Environment=DATA_SOURCE_NAME="{{ postgres_exporter_data_source_name }}"
|
||||
ExecStart=/usr/local/bin/postgres_exporter --web.listen-address=:{{ postgres_exporter_port }}
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
21
roles/postgresql/defaults/main.yml
Normal file
21
roles/postgresql/defaults/main.yml
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
# PostgreSQL
|
||||
postgresql_version: "17"
|
||||
postgresql_port: 5432
|
||||
postgresql_listen_addresses: "*"
|
||||
postgresql_data_dir: "/var/lib/postgresql/{{ postgresql_version }}/main"
|
||||
|
||||
# Database configuration
|
||||
postgresql_databases:
|
||||
- name: testdb
|
||||
owner: testuser
|
||||
|
||||
postgresql_users:
|
||||
- name: testuser
|
||||
password: "testpassword123"
|
||||
databases: [testdb]
|
||||
privileges: ["ALL"]
|
||||
|
||||
# Postgres exporter user (for metrics collection)
|
||||
postgres_exporter_user: "postgres_exporter"
|
||||
postgres_exporter_password: "exporterpassword123"
|
||||
121
roles/postgresql/tasks/main.yml
Normal file
121
roles/postgresql/tasks/main.yml
Normal file
@ -0,0 +1,121 @@
|
||||
---
|
||||
- name: Install required packages for PostgreSQL installation
|
||||
apt:
|
||||
name:
|
||||
- ca-certificates
|
||||
- curl
|
||||
- gnupg
|
||||
- lsb-release
|
||||
state: present
|
||||
update_cache: yes
|
||||
tags: postgresql
|
||||
|
||||
- name: Create PostgreSQL repository keyring directory
|
||||
file:
|
||||
path: /etc/apt/keyrings
|
||||
state: directory
|
||||
mode: '0755'
|
||||
tags: postgresql
|
||||
|
||||
- name: Download and install PostgreSQL GPG key
|
||||
shell: |
|
||||
curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /etc/apt/keyrings/postgresql.gpg
|
||||
chmod 644 /etc/apt/keyrings/postgresql.gpg
|
||||
args:
|
||||
creates: /etc/apt/keyrings/postgresql.gpg
|
||||
tags: postgresql
|
||||
|
||||
- name: Add PostgreSQL repository
|
||||
apt_repository:
|
||||
repo: "deb [signed-by=/etc/apt/keyrings/postgresql.gpg] http://apt.postgresql.org/pub/repos/apt {{ ansible_distribution_release }}-pgdg main"
|
||||
state: present
|
||||
update_cache: yes
|
||||
tags: postgresql
|
||||
|
||||
- name: Install PostgreSQL
|
||||
apt:
|
||||
name:
|
||||
- postgresql-{{ postgresql_version }}
|
||||
- postgresql-contrib-{{ postgresql_version }}
|
||||
- postgresql-client-{{ postgresql_version }}
|
||||
state: present
|
||||
update_cache: yes
|
||||
tags: postgresql
|
||||
|
||||
- name: Ensure PostgreSQL service is started and enabled
|
||||
service:
|
||||
name: postgresql@17-main
|
||||
state: started
|
||||
enabled: yes
|
||||
tags: postgresql
|
||||
|
||||
- name: Configure PostgreSQL listen addresses
|
||||
lineinfile:
|
||||
path: "/etc/postgresql/{{ postgresql_version }}/main/postgresql.conf"
|
||||
regexp: "^listen_addresses[[:space:]]*="
|
||||
line: "listen_addresses = '{{ postgresql_listen_addresses }}'"
|
||||
backup: yes
|
||||
tags: postgresql
|
||||
|
||||
- name: Configure PostgreSQL authentication
|
||||
lineinfile:
|
||||
path: "/etc/postgresql/{{ postgresql_version }}/main/pg_hba.conf"
|
||||
line: "host all all 192.168.0.0/24 md5"
|
||||
insertafter: "^# IPv4 local connections:"
|
||||
backup: yes
|
||||
tags: postgresql
|
||||
|
||||
- name: Reload PostgreSQL configuration
|
||||
service:
|
||||
name: postgresql@17-main
|
||||
state: reloaded
|
||||
name: postgresql@17-main
|
||||
tags: postgresql
|
||||
|
||||
- name: Create PostgreSQL users and databases
|
||||
become: yes
|
||||
become_user: postgres
|
||||
community.postgresql.postgresql_user:
|
||||
name: "{{ item.name }}"
|
||||
password: "{{ item.password }}"
|
||||
state: present
|
||||
loop: "{{ postgresql_users }}"
|
||||
tags: postgresql
|
||||
|
||||
- name: Create PostgreSQL databases
|
||||
become: yes
|
||||
become_user: postgres
|
||||
community.postgresql.postgresql_db:
|
||||
name: "{{ item.name }}"
|
||||
owner: "{{ item.owner }}"
|
||||
state: present
|
||||
loop: "{{ postgresql_databases }}"
|
||||
tags: postgresql
|
||||
|
||||
- name: Create postgres_exporter user for monitoring
|
||||
become: yes
|
||||
become_user: postgres
|
||||
community.postgresql.postgresql_user:
|
||||
name: "{{ postgres_exporter_user }}"
|
||||
password: "{{ postgres_exporter_password }}"
|
||||
state: present
|
||||
tags: postgresql
|
||||
|
||||
- name: Grant permissions to postgres_exporter user
|
||||
become: yes
|
||||
become_user: postgres
|
||||
community.postgresql.postgresql_privs:
|
||||
database: postgres
|
||||
state: present
|
||||
privs: CONNECT
|
||||
type: database
|
||||
roles: "{{ postgres_exporter_user }}"
|
||||
tags: postgresql
|
||||
|
||||
- name: Configure UFW for PostgreSQL
|
||||
ufw:
|
||||
rule: allow
|
||||
port: "{{ postgresql_port }}"
|
||||
proto: tcp
|
||||
comment: "PostgreSQL"
|
||||
tags: postgresql
|
||||
36
roles/prometheus_nginx/tasks/main.yml
Normal file
36
roles/prometheus_nginx/tasks/main.yml
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
# Configure Prometheus to monitor Nginx
|
||||
- name: Create Nginx scrape configuration
|
||||
copy:
|
||||
content: |
|
||||
# Nginx metrics from stub_status
|
||||
- job_name: 'nginx'
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
metrics_path: /status
|
||||
static_configs:
|
||||
- targets: ['192.168.0.110:80']
|
||||
labels:
|
||||
instance: 'app1-nginx'
|
||||
service: 'web-server'
|
||||
environment: 'test'
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: instance
|
||||
- source_labels: [__address__]
|
||||
regex: '([^:]+)(?::\d+)?'
|
||||
replacement: '${1}'
|
||||
target_label: hostname
|
||||
dest: /etc/prometheus/nginx.yml
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
notify: reload prometheus
|
||||
|
||||
- name: Include Nginx config in main prometheus.yml
|
||||
lineinfile:
|
||||
path: /etc/prometheus/prometheus.yml
|
||||
line: ' - "nginx.yml"'
|
||||
insertafter: 'rule_files:'
|
||||
state: present
|
||||
notify: reload prometheus
|
||||
Reference in New Issue
Block a user