Compare commits
11 Commits
d70c2813de
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| d49f6a0396 | |||
| 44519e842b | |||
| c7fcbfcfce | |||
| 27e692c1ed | |||
| 338e0b0f19 | |||
| 83178d9a0d | |||
| 0d85bd53aa | |||
| 3392c84c65 | |||
| aa3e0c8f54 | |||
| 0dca30868b | |||
| d22bbd3dba |
104
playbooks/add-blackbox-carefully.yml
Normal file
104
playbooks/add-blackbox-carefully.yml
Normal file
@ -0,0 +1,104 @@
|
||||
---
|
||||
- name: Add Blackbox Exporter job to existing Prometheus config
|
||||
hosts: 192.168.0.105
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Backup current config
|
||||
copy:
|
||||
src: /etc/prometheus/prometheus.yml
|
||||
dest: /etc/prometheus/prometheus.yml.backup-blackbox-{{ ansible_date_time.epoch }}
|
||||
remote_src: yes
|
||||
tags: prometheus
|
||||
|
||||
- name: Check if blackbox job already exists
|
||||
shell: |
|
||||
grep -q 'job_name:.*blackbox' /etc/prometheus/prometheus.yml && echo "exists" || echo "not exists"
|
||||
register: blackbox_exists
|
||||
changed_when: false
|
||||
tags: prometheus
|
||||
|
||||
- name: Add blackbox job to scrape_configs (if not exists)
|
||||
blockinfile:
|
||||
path: /etc/prometheus/prometheus.yml
|
||||
insertbefore: '^remote_write:'
|
||||
block: |
|
||||
- job_name: blackbox
|
||||
honor_timestamps: true
|
||||
track_timestamps_staleness: false
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
scheme: http
|
||||
follow_redirects: true
|
||||
enable_http2: true
|
||||
static_configs:
|
||||
- targets:
|
||||
# Внутренние сервисы стенда
|
||||
- "http://192.168.0.110/"
|
||||
- "http://192.168.0.111:9187/metrics"
|
||||
- "http://192.168.0.112:8080/get"
|
||||
- "http://192.168.0.100:3000/"
|
||||
- "http://192.168.0.101:9100/metrics"
|
||||
- "http://192.168.0.103:8200/ui/"
|
||||
- "http://192.168.0.104:8428/metrics"
|
||||
- "http://192.168.0.105:9090/metrics"
|
||||
- "http://192.168.0.106:3000"
|
||||
# Внешние домены
|
||||
- "http://forgejo.pvenode.ru/"
|
||||
- "http://grafana.pvenode.ru/"
|
||||
- "http://prometheus.pvenode.ru/"
|
||||
- "http://app1.pvenode.ru/"
|
||||
- "http://wiki.pvenode.ru/"
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- source_labels: [__param_target]
|
||||
target_label: instance
|
||||
- target_label: __address__
|
||||
replacement: 192.168.0.112:8083
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
target_label: instance
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: ([^:]+):\d+
|
||||
target_label: host
|
||||
replacement: ${1}
|
||||
action: replace
|
||||
marker: "# {mark} ANSIBLE MANAGED BLOCK - blackbox"
|
||||
when: blackbox_exists.stdout == "not exists"
|
||||
tags: prometheus
|
||||
|
||||
- name: Check Prometheus configuration
|
||||
command: promtool check config /etc/prometheus/prometheus.yml
|
||||
register: promtool_check
|
||||
failed_when: promtool_check.rc != 0
|
||||
changed_when: false
|
||||
tags: prometheus
|
||||
|
||||
- name: Show config check result
|
||||
debug:
|
||||
msg: "{{ promtool_check.stdout_lines }}"
|
||||
when: promtool_check.rc == 0
|
||||
tags: prometheus
|
||||
|
||||
- name: Reload Prometheus if config is valid
|
||||
systemd:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
when: promtool_check.rc == 0
|
||||
tags: prometheus
|
||||
|
||||
- name: Show status
|
||||
debug:
|
||||
msg: |
|
||||
Blackbox job {{ "added successfully" if promtool_check.rc == 0 else "failed to add" }}
|
||||
Backup created: /etc/prometheus/prometheus.yml.backup-blackbox-{{ ansible_date_time.epoch }}
|
||||
tags: prometheus
|
||||
151
playbooks/add-blackbox-correct.yml
Normal file
151
playbooks/add-blackbox-correct.yml
Normal file
@ -0,0 +1,151 @@
|
||||
---
|
||||
- name: Add correct Blackbox Exporter job
|
||||
hosts: 192.168.0.105
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Backup current config
|
||||
copy:
|
||||
src: /etc/prometheus/prometheus.yml
|
||||
dest: /etc/prometheus/prometheus.yml.backup-pre-blackbox-{{ ansible_date_time.epoch }}
|
||||
remote_src: yes
|
||||
tags: prometheus
|
||||
|
||||
- name: Check current line numbers
|
||||
shell: |
|
||||
echo "Last scrape_config job ends at line:"
|
||||
grep -n "job_name: postgres" /etc/prometheus/prometheus.yml
|
||||
echo ""
|
||||
echo "Remote_write starts at line:"
|
||||
grep -n "^remote_write:" /etc/prometheus/prometheus.yml
|
||||
register: line_info
|
||||
changed_when: false
|
||||
tags: prometheus
|
||||
|
||||
- name: Create correct blackbox job config
|
||||
copy:
|
||||
dest: /tmp/blackbox-job.yml
|
||||
content: |
|
||||
- job_name: blackbox
|
||||
honor_timestamps: true
|
||||
track_timestamps_staleness: false
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
scheme: http
|
||||
follow_redirects: true
|
||||
enable_http2: true
|
||||
static_configs:
|
||||
- targets:
|
||||
# Internal services
|
||||
- "http://192.168.0.110/"
|
||||
- "http://192.168.0.111:9187/metrics"
|
||||
- "http://192.168.0.112:8080/get"
|
||||
- "http://192.168.0.100:3000/"
|
||||
- "http://192.168.0.101:9100/metrics"
|
||||
- "http://192.168.0.103:8200/ui/"
|
||||
- "http://192.168.0.104:8428/metrics"
|
||||
- "http://192.168.0.105:9090/metrics"
|
||||
- "http://192.168.0.106:3000"
|
||||
# External domains
|
||||
- "http://forgejo.pvenode.ru/"
|
||||
- "http://grafana.pvenode.ru/"
|
||||
- "http://prometheus.pvenode.ru/"
|
||||
- "http://app1.pvenode.ru/"
|
||||
- "http://wiki.pvenode.ru/"
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- source_labels: [__param_target]
|
||||
target_label: instance
|
||||
- target_label: __address__
|
||||
replacement: 192.168.0.112:8083
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
target_label: instance
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: ([^:]+):\d+
|
||||
target_label: host
|
||||
replacement: ${1}
|
||||
action: replace
|
||||
tags: prometheus
|
||||
|
||||
- name: Insert blackbox job before remote_write
|
||||
shell: |
|
||||
# Находим строку с remote_write
|
||||
remote_line=$(grep -n "^remote_write:" /etc/prometheus/prometheus.yml | cut -d: -f1)
|
||||
|
||||
if [ -z "$remote_line" ]; then
|
||||
echo "ERROR: remote_write not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Создаем новый файл
|
||||
cp /etc/prometheus/prometheus.yml /etc/prometheus/prometheus.yml.tmp
|
||||
|
||||
# Вставляем blackbox перед remote_write
|
||||
head -n $((remote_line - 1)) /etc/prometheus/prometheus.yml > /etc/prometheus/prometheus.yml.new
|
||||
cat /tmp/blackbox-job.yml >> /etc/prometheus/prometheus.yml.new
|
||||
tail -n +$remote_line /etc/prometheus/prometheus.yml >> /etc/prometheus/prometheus.yml.new
|
||||
|
||||
# Заменяем старый файл
|
||||
mv /etc/prometheus/prometheus.yml.new /etc/prometheus/prometheus.yml
|
||||
rm -f /etc/prometheus/prometheus.yml.tmp
|
||||
|
||||
echo "Inserted at line $((remote_line - 1))"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
tags: prometheus
|
||||
|
||||
- name: Check Prometheus configuration
|
||||
command: promtool check config /etc/prometheus/prometheus.yml
|
||||
register: promtool_check
|
||||
failed_when: promtool_check.rc != 0
|
||||
changed_when: false
|
||||
tags: prometheus
|
||||
|
||||
- name: Show config check result
|
||||
debug:
|
||||
msg: "{{ promtool_check.stdout_lines }}"
|
||||
when: promtool_check.rc == 0
|
||||
tags: prometheus
|
||||
|
||||
- name: Reload Prometheus
|
||||
systemd:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
when: promtool_check.rc == 0
|
||||
tags: prometheus
|
||||
|
||||
- name: Verify blackbox job added
|
||||
shell: |
|
||||
sleep 2
|
||||
echo "=== Checking if blackbox job exists ==="
|
||||
if grep -q "job_name: blackbox" /etc/prometheus/prometheus.yml; then
|
||||
echo "✓ Blackbox job found in config"
|
||||
echo ""
|
||||
echo "=== Checking Prometheus targets ==="
|
||||
curl -s "http://localhost:9090/api/v1/targets" | python3 -c "
|
||||
import json, sys
|
||||
data = json.load(sys.stdin)
|
||||
for target in data['data']['activeTargets']:
|
||||
job = target['discoveredLabels'].get('job', 'N/A')
|
||||
if 'blackbox' in job.lower():
|
||||
print(f'✓ Blackbox target: {target[\"health\"]}')
|
||||
print(f' URL: {target[\"scrapeUrl\"]}')
|
||||
exit(0)
|
||||
print('✗ Blackbox not in targets yet (may need 15s scrape interval)')
|
||||
"
|
||||
else
|
||||
echo "✗ Blackbox job not found in config"
|
||||
fi
|
||||
args:
|
||||
executable: /bin/bash
|
||||
tags: prometheus
|
||||
105
playbooks/add-blackbox-final.yml
Normal file
105
playbooks/add-blackbox-final.yml
Normal file
@ -0,0 +1,105 @@
|
||||
---
|
||||
- name: Add Blackbox job to Prometheus config
|
||||
hosts: 192.168.0.105
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Backup config
|
||||
copy:
|
||||
src: /etc/prometheus/prometheus.yml
|
||||
dest: /etc/prometheus/prometheus.yml.backup-blackbox
|
||||
remote_src: yes
|
||||
|
||||
- name: Create blackbox config file
|
||||
copy:
|
||||
dest: /tmp/blackbox-config.yml
|
||||
content: |
|
||||
# Blackbox Exporter monitoring
|
||||
- job_name: blackbox
|
||||
honor_timestamps: true
|
||||
track_timestamps_staleness: false
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
scheme: http
|
||||
follow_redirects: true
|
||||
enable_http2: true
|
||||
static_configs:
|
||||
- targets:
|
||||
- "http://192.168.0.110/"
|
||||
- "http://192.168.0.111:9187/metrics"
|
||||
- "http://192.168.0.112:8080/get"
|
||||
- "http://192.168.0.100:3000/"
|
||||
- "http://192.168.0.101:9100/metrics"
|
||||
- "http://192.168.0.103:8200/ui/"
|
||||
- "http://192.168.0.104:8428/metrics"
|
||||
- "http://192.168.0.105:9090/metrics"
|
||||
- "http://192.168.0.106:3000"
|
||||
- "http://forgejo.pvenode.ru/"
|
||||
- "http://grafana.pvenode.ru/"
|
||||
- "http://prometheus.pvenode.ru/"
|
||||
- "http://app1.pvenode.ru/"
|
||||
- "http://wiki.pvenode.ru/"
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- source_labels: [__param_target]
|
||||
target_label: instance
|
||||
- target_label: __address__
|
||||
replacement: 192.168.0.112:8083
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
target_label: instance
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: ([^:]+):\d+
|
||||
target_label: host
|
||||
replacement: ${1}
|
||||
action: replace
|
||||
|
||||
- name: Insert blackbox config before remote_write
|
||||
shell: |
|
||||
# Find remote_write line
|
||||
remote_line=$(grep -n "^remote_write:" /etc/prometheus/prometheus.yml | head -1 | cut -d: -f1)
|
||||
|
||||
if [ -z "$remote_line" ]; then
|
||||
echo "ERROR: remote_write not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Insert blackbox config
|
||||
head -n $((remote_line - 1)) /etc/prometheus/prometheus.yml > /tmp/prometheus-new.yml
|
||||
cat /tmp/blackbox-config.yml >> /tmp/prometheus-new.yml
|
||||
tail -n +$remote_line /etc/prometheus/prometheus.yml >> /tmp/prometheus-new.yml
|
||||
|
||||
# Replace original
|
||||
mv /tmp/prometheus-new.yml /etc/prometheus/prometheus.yml
|
||||
|
||||
echo "Inserted at line $((remote_line - 1))"
|
||||
|
||||
- name: Validate config
|
||||
command: promtool check config /etc/prometheus/prometheus.yml
|
||||
register: config_check
|
||||
changed_when: false
|
||||
|
||||
- name: Show validation result
|
||||
debug:
|
||||
msg: "{{ config_check.stdout_lines }}"
|
||||
|
||||
- name: Reload Prometheus
|
||||
systemd:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
when: config_check.rc == 0
|
||||
|
||||
- name: Check result
|
||||
debug:
|
||||
msg: |
|
||||
Blackbox job {{ "successfully added" if config_check.rc == 0 else "failed to add" }}
|
||||
Backup: /etc/prometheus/prometheus.yml.backup-blackbox
|
||||
103
playbooks/add-blackbox-simple.yml
Normal file
103
playbooks/add-blackbox-simple.yml
Normal file
@ -0,0 +1,103 @@
|
||||
---
|
||||
- name: Add Blackbox Exporter to Prometheus
|
||||
hosts: 192.168.0.105
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Backup current config
|
||||
copy:
|
||||
src: /etc/prometheus/prometheus.yml
|
||||
dest: /etc/prometheus/prometheus.yml.backup-{{ ansible_date_time.epoch }}
|
||||
remote_src: yes
|
||||
|
||||
- name: Get line number where remote_write starts
|
||||
shell: grep -n "^remote_write:" /etc/prometheus/prometheus.yml | cut -d: -f1
|
||||
register: remote_line
|
||||
changed_when: false
|
||||
|
||||
- name: Create blackbox job config file
|
||||
copy:
|
||||
dest: /tmp/blackbox-job.yml
|
||||
content: |
|
||||
- job_name: blackbox
|
||||
honor_timestamps: true
|
||||
track_timestamps_staleness: false
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
scheme: http
|
||||
follow_redirects: true
|
||||
enable_http2: true
|
||||
static_configs:
|
||||
- targets:
|
||||
- "http://192.168.0.110/"
|
||||
- "http://192.168.0.111:9187/metrics"
|
||||
- "http://192.168.0.112:8080/get"
|
||||
- "http://192.168.0.100:3000/"
|
||||
- "http://192.168.0.101:9100/metrics"
|
||||
- "http://192.168.0.103:8200/ui/"
|
||||
- "http://192.168.0.104:8428/metrics"
|
||||
- "http://192.168.0.105:9090/metrics"
|
||||
- "http://192.168.0.106:3000"
|
||||
- "http://forgejo.pvenode.ru/"
|
||||
- "http://grafana.pvenode.ru/"
|
||||
- "http://prometheus.pvenode.ru/"
|
||||
- "http://app1.pvenode.ru/"
|
||||
- "http://wiki.pvenode.ru/"
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- source_labels: [__param_target]
|
||||
target_label: instance
|
||||
- target_label: __address__
|
||||
replacement: 192.168.0.112:8083
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: (.*)
|
||||
target_label: instance
|
||||
replacement: $1
|
||||
action: replace
|
||||
- source_labels: [__address__]
|
||||
separator: ;
|
||||
regex: ([^:]+):\d+
|
||||
target_label: host
|
||||
replacement: ${1}
|
||||
action: replace
|
||||
|
||||
- name: Insert blackbox job before remote_write
|
||||
shell: |
|
||||
# Вставляем blackbox job перед remote_write
|
||||
head -n $(({{ remote_line.stdout }} - 1)) /etc/prometheus/prometheus.yml > /tmp/prometheus-new.yml
|
||||
cat /tmp/blackbox-job.yml >> /tmp/prometheus-new.yml
|
||||
tail -n +{{ remote_line.stdout }} /etc/prometheus/prometheus.yml >> /tmp/prometheus-new.yml
|
||||
mv /tmp/prometheus-new.yml /etc/prometheus/prometheus.yml
|
||||
args:
|
||||
executable: /bin/bash
|
||||
|
||||
- name: Check Prometheus configuration
|
||||
command: promtool check config /etc/prometheus/prometheus.yml
|
||||
register: promtool_check
|
||||
changed_when: false
|
||||
|
||||
- name: Show config status
|
||||
debug:
|
||||
msg: "{{ promtool_check.stdout_lines }}"
|
||||
|
||||
- name: Reload Prometheus if config valid
|
||||
systemd:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
when: promtool_check.rc == 0
|
||||
|
||||
- name: Verify blackbox job
|
||||
shell: |
|
||||
echo "Checking if blackbox job was added..."
|
||||
if grep -q "job_name: blackbox" /etc/prometheus/prometheus.yml; then
|
||||
echo "SUCCESS: Blackbox job found in config"
|
||||
else
|
||||
echo "ERROR: Blackbox job not found"
|
||||
fi
|
||||
changed_when: false
|
||||
35
playbooks/add-postgres-to-prometheus.yml
Normal file
35
playbooks/add-postgres-to-prometheus.yml
Normal file
@ -0,0 +1,35 @@
|
||||
---
|
||||
- name: Add PostgreSQL exporter to Prometheus
|
||||
hosts: 192.168.0.105
|
||||
become: yes
|
||||
|
||||
tasks:
|
||||
- name: Add postgres_exporter scrape config
|
||||
blockinfile:
|
||||
path: /etc/prometheus/prometheus.yml
|
||||
insertafter: ' # Nginx metrics via nginx-prometheus-exporter'
|
||||
block: |2
|
||||
# PostgreSQL metrics via postgres_exporter
|
||||
- job_name: 'postgres-app2'
|
||||
scrape_interval: 15s
|
||||
scrape_timeout: 10s
|
||||
static_configs:
|
||||
- targets: ['192.168.0.111:9187']
|
||||
labels:
|
||||
instance: 'app2'
|
||||
service: 'postgresql'
|
||||
job: 'postgres'
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: instance
|
||||
- source_labels: [__address__]
|
||||
regex: '([^:]+):\\d+'
|
||||
replacement: '${1}'
|
||||
target_label: host
|
||||
marker: "# {mark} ANSIBLE MANAGED BLOCK - postgres_exporter"
|
||||
backup: yes
|
||||
|
||||
- name: Reload Prometheus
|
||||
systemd:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
76
playbooks/configure-blackbox-monitoring-fixed.yml
Normal file
76
playbooks/configure-blackbox-monitoring-fixed.yml
Normal file
@ -0,0 +1,76 @@
|
||||
---
|
||||
- name: Configure Prometheus for Blackbox monitoring
|
||||
hosts: 192.168.0.105
|
||||
become: yes
|
||||
|
||||
vars:
|
||||
blackbox_targets:
|
||||
# Основные сервисы стенда (из ИП)
|
||||
- "http://192.168.0.110/"
|
||||
- "http://192.168.0.111:9187/metrics" # postgres_exporter
|
||||
- "http://192.168.0.112:8080/get" # httpbin
|
||||
- "http://192.168.0.112:8081/metrics" # cadvisor
|
||||
- "http://192.168.0.100:3000/" # forgejo
|
||||
- "http://192.168.0.101:9100/metrics" # ansible node_exporter
|
||||
- "http://192.168.0.103:8200/ui/" # vault
|
||||
- "http://192.168.0.104:8428/metrics" # victoriametrics
|
||||
- "http://192.168.0.105:9090/metrics" # prometheus
|
||||
- "http://192.168.0.106:3000" # grafana
|
||||
|
||||
# Основные домены (первые для теста)
|
||||
- "http://forgejo.pvenode.ru/"
|
||||
- "http://grafana.pvenode.ru/"
|
||||
- "http://prometheus.pvenode.ru/"
|
||||
- "http://app1.pvenode.ru/"
|
||||
- "http://wiki.pvenode.ru/"
|
||||
|
||||
tasks:
|
||||
- name: Backup original Prometheus config
|
||||
copy:
|
||||
src: /etc/prometheus/prometheus.yml
|
||||
dest: /etc/prometheus/prometheus.yml.backup-{{ ansible_date_time.epoch }}
|
||||
remote_src: yes
|
||||
tags: prometheus
|
||||
|
||||
- name: Add blackbox exporter to Prometheus
|
||||
blockinfile:
|
||||
path: /etc/prometheus/prometheus.yml
|
||||
insertafter: ' # cAdvisor container metrics'
|
||||
block: |
|
||||
# Blackbox Exporter probes
|
||||
- job_name: 'blackbox'
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
static_configs:
|
||||
- targets:
|
||||
{% for target in blackbox_targets %}
|
||||
- "{{ target }}"
|
||||
{% endfor %}
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- source_labels: [__param_target]
|
||||
target_label: instance
|
||||
- target_label: __address__
|
||||
replacement: 192.168.0.112:8083 # blackbox-exporter
|
||||
marker: "# {mark} ANSIBLE MANAGED BLOCK - blackbox"
|
||||
tags: prometheus
|
||||
|
||||
- name: Check Prometheus configuration
|
||||
command: promtool check config /etc/prometheus/prometheus.yml
|
||||
register: promtool_check
|
||||
failed_when: promtool_check.rc != 0
|
||||
tags: prometheus
|
||||
|
||||
- name: Reload Prometheus
|
||||
systemd:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
when: promtool_check.rc == 0
|
||||
tags: prometheus
|
||||
|
||||
- name: Show configured targets
|
||||
debug:
|
||||
msg: "Added {{ blackbox_targets|length }} targets to blackbox monitoring"
|
||||
tags: prometheus
|
||||
43
playbooks/configure-blackbox-monitoring.yml
Normal file
43
playbooks/configure-blackbox-monitoring.yml
Normal file
@ -0,0 +1,43 @@
|
||||
---
|
||||
- name: Configure Prometheus for Blackbox monitoring
|
||||
hosts: 192.168.0.105
|
||||
become: yes
|
||||
|
||||
vars:
|
||||
blackbox_targets: "{{ hostvars['192.168.0.112']['blackbox_targets'] }}"
|
||||
|
||||
tasks:
|
||||
- name: Add blackbox exporter to Prometheus
|
||||
blockinfile:
|
||||
path: /etc/prometheus/prometheus.yml
|
||||
insertafter: ' # cAdvisor container metrics'
|
||||
block: |
|
||||
# Blackbox Exporter probes
|
||||
- job_name: 'blackbox'
|
||||
metrics_path: /probe
|
||||
params:
|
||||
module: [http_2xx]
|
||||
static_configs:
|
||||
- targets:
|
||||
{% for target in blackbox_targets %}
|
||||
- {{ target.url }}
|
||||
{% endfor %}
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
target_label: __param_target
|
||||
- source_labels: [__param_target]
|
||||
target_label: instance
|
||||
- target_label: __address__
|
||||
replacement: 192.168.0.112:8083 # blackbox-exporter
|
||||
marker: "# {mark} ANSIBLE MANAGED BLOCK - blackbox"
|
||||
|
||||
- name: Check Prometheus configuration
|
||||
command: promtool check config /etc/prometheus/prometheus.yml
|
||||
register: promtool_check
|
||||
failed_when: promtool_check.rc != 0
|
||||
|
||||
- name: Reload Prometheus
|
||||
systemd:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
when: promtool_check.rc == 0
|
||||
6
playbooks/deploy-alertmanager.yml
Normal file
6
playbooks/deploy-alertmanager.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Deploy Alertmanager
|
||||
hosts: 192.168.0.112 # app3
|
||||
become: true
|
||||
roles:
|
||||
- alertmanager
|
||||
16
playbooks/deploy-app3-blackbox.yml
Normal file
16
playbooks/deploy-app3-blackbox.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Deploy Blackbox Exporter on App3
|
||||
hosts: 192.168.0.112
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
pre_tasks:
|
||||
- name: Ensure Docker is installed
|
||||
include_role:
|
||||
name: docker
|
||||
apply:
|
||||
tags: docker
|
||||
|
||||
roles:
|
||||
- role: blackbox_exporter
|
||||
tags: blackbox
|
||||
16
playbooks/deploy-app3-cadvisor.yml
Normal file
16
playbooks/deploy-app3-cadvisor.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Deploy cAdvisor on App3
|
||||
hosts: 192.168.0.112
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
pre_tasks:
|
||||
- name: Ensure Docker is installed
|
||||
include_role:
|
||||
name: docker
|
||||
apply:
|
||||
tags: docker
|
||||
|
||||
roles:
|
||||
- role: cadvisor
|
||||
tags: cadvisor
|
||||
9
playbooks/deploy-app3-docker.yml
Normal file
9
playbooks/deploy-app3-docker.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Deploy Docker on App3
|
||||
hosts: 192.168.0.112
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
roles:
|
||||
- role: docker
|
||||
tags: docker
|
||||
16
playbooks/deploy-app3-httpbin.yml
Normal file
16
playbooks/deploy-app3-httpbin.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
- name: Deploy httpbin on App3
|
||||
hosts: 192.168.0.112
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
pre_tasks:
|
||||
- name: Ensure Docker is installed
|
||||
include_role:
|
||||
name: docker
|
||||
apply:
|
||||
tags: docker
|
||||
|
||||
roles:
|
||||
- role: httpbin
|
||||
tags: httpbin
|
||||
6
playbooks/deploy-cadvisor.yml
Normal file
6
playbooks/deploy-cadvisor.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Deploy cAdvisor on App3
|
||||
hosts: 192.168.0.112 # Указываем конкретный хост
|
||||
become: true
|
||||
roles:
|
||||
- cadvisor
|
||||
6
playbooks/deploy-loki.yml
Normal file
6
playbooks/deploy-loki.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Deploy Loki
|
||||
hosts: 192.168.0.112 # app3
|
||||
become: true
|
||||
roles:
|
||||
- loki
|
||||
6
playbooks/deploy-node-red.yml
Normal file
6
playbooks/deploy-node-red.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Deploy Node-RED
|
||||
hosts: 192.168.0.112 # app3
|
||||
become: true
|
||||
roles:
|
||||
- node-red
|
||||
12
playbooks/deploy-postgres-app2.yml
Normal file
12
playbooks/deploy-postgres-app2.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: Deploy PostgreSQL and Postgres Exporter on App2
|
||||
hosts: 192.168.0.111
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
|
||||
roles:
|
||||
- role: postgresql
|
||||
tags: postgresql
|
||||
|
||||
- role: postgres_exporter
|
||||
tags: postgres_exporter
|
||||
6
playbooks/deploy-promtail.yml
Normal file
6
playbooks/deploy-promtail.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: Deploy Promtail on all nodes
|
||||
hosts: all # Установим Promtail на все хосты для сбора логов
|
||||
become: true
|
||||
roles:
|
||||
- promtail
|
||||
12
roles/alertmanager/defaults/main.yml
Normal file
12
roles/alertmanager/defaults/main.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
# Alertmanager settings
|
||||
alertmanager_port: 9093
|
||||
alertmanager_config_path: /etc/alertmanager
|
||||
|
||||
# Email notifications (заполнить позже)
|
||||
smtp_host: localhost
|
||||
smtp_from: alertmanager@example.com
|
||||
smtp_to: admin@example.com
|
||||
|
||||
# Webhook для тестирования
|
||||
webhook_url: "http://localhost:9099"
|
||||
33
roles/alertmanager/tasks/main.yml
Normal file
33
roles/alertmanager/tasks/main.yml
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
- name: Create Alertmanager directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
loop:
|
||||
- "{{ alertmanager_config_path }}"
|
||||
- /var/lib/alertmanager
|
||||
|
||||
- name: Deploy Alertmanager configuration
|
||||
template:
|
||||
src: alertmanager.yml.j2
|
||||
dest: "{{ alertmanager_config_path }}/alertmanager.yml"
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Run Alertmanager container
|
||||
docker_container:
|
||||
name: alertmanager
|
||||
image: prom/alertmanager:latest
|
||||
state: started
|
||||
restart_policy: always
|
||||
ports:
|
||||
- "{{ alertmanager_port }}:9093"
|
||||
volumes:
|
||||
- "{{ alertmanager_config_path }}/alertmanager.yml:/etc/alertmanager/alertmanager.yml"
|
||||
- /var/lib/alertmanager:/alertmanager
|
||||
command: --config.file=/etc/alertmanager/alertmanager.yml --storage.path=/alertmanager
|
||||
tags: alertmanager
|
||||
52
roles/alertmanager/templates/alertmanager.yml.j2
Normal file
52
roles/alertmanager/templates/alertmanager.yml.j2
Normal file
@ -0,0 +1,52 @@
|
||||
global:
|
||||
# Настройки для уведомлений (можно настроить позже)
|
||||
# smtp_smarthost: 'smtp.gmail.com:587'
|
||||
# smtp_from: 'alertmanager@example.com'
|
||||
# smtp_auth_username: 'user@gmail.com'
|
||||
# smtp_auth_password: 'password'
|
||||
# smtp_require_tls: true
|
||||
|
||||
route:
|
||||
# Основной маршрут - все алерты идут в Node-RED
|
||||
receiver: 'node-red-webhook'
|
||||
group_by: ['alertname', 'severity']
|
||||
group_wait: 10s
|
||||
group_interval: 10s
|
||||
repeat_interval: 1h
|
||||
|
||||
# Вложенные маршруты
|
||||
routes:
|
||||
- match:
|
||||
severity: critical
|
||||
receiver: 'node-red-critical'
|
||||
group_wait: 5s
|
||||
repeat_interval: 10m
|
||||
|
||||
- match:
|
||||
severity: warning
|
||||
receiver: 'node-red-warning'
|
||||
group_wait: 30s
|
||||
repeat_interval: 2h
|
||||
|
||||
receivers:
|
||||
- name: 'node-red-webhook'
|
||||
webhook_configs:
|
||||
- url: 'http://node-red:1880/webhook/alertmanager'
|
||||
send_resolved: true
|
||||
|
||||
- name: 'node-red-critical'
|
||||
webhook_configs:
|
||||
- url: 'http://node-red:1880/webhook/critical'
|
||||
send_resolved: true
|
||||
|
||||
- name: 'node-red-warning'
|
||||
webhook_configs:
|
||||
- url: 'http://node-red:1880/webhook/warning'
|
||||
send_resolved: true
|
||||
|
||||
inhibit_rules:
|
||||
- source_match:
|
||||
severity: 'critical'
|
||||
target_match:
|
||||
severity: 'warning'
|
||||
equal: ['alertname', 'instance']
|
||||
126
roles/blackbox_exporter/defaults/main.yml
Normal file
126
roles/blackbox_exporter/defaults/main.yml
Normal file
@ -0,0 +1,126 @@
|
||||
---
|
||||
# Blackbox Exporter configuration
|
||||
blackbox_version: "latest"
|
||||
blackbox_port: 8083
|
||||
blackbox_image: "prom/blackbox-exporter:{{ blackbox_version }}"
|
||||
blackbox_container_name: "blackbox-exporter"
|
||||
|
||||
# Все цели для мониторинга из ИП и твоего списка
|
||||
blackbox_targets:
|
||||
# Основные сервисы стенда (из ИП)
|
||||
- name: "app1-nginx"
|
||||
url: "http://192.168.0.110/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "app2-postgresql"
|
||||
url: "http://192.168.0.111:9187/metrics" # postgres_exporter
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "app3-httpbin"
|
||||
url: "http://192.168.0.112:8080/get"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "app3-cadvisor"
|
||||
url: "http://192.168.0.112:8081/metrics"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "git-forgejo"
|
||||
url: "http://192.168.0.100:3000/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "ansible"
|
||||
url: "http://192.168.0.101:9100/metrics" # node_exporter
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "vault"
|
||||
url: "http://192.168.0.103:8200/ui/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "victoriametrics"
|
||||
url: "http://192.168.0.104:8428/metrics"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "prometheus"
|
||||
url: "http://192.168.0.105:9090/metrics"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "grafana"
|
||||
url: "http://192.168.0.106:3000"
|
||||
module: "http_2xx"
|
||||
|
||||
# Домены из твоего списка
|
||||
- name: "wiki-pvenode"
|
||||
url: "http://wiki.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "victoria-pvenode"
|
||||
url: "http://victoria.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "vault-pvenode"
|
||||
url: "http://vault.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "tasks-pvenode"
|
||||
url: "http://tasks.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "python-pvenode"
|
||||
url: "http://python.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "pvenode-main"
|
||||
url: "http://pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "proxmox-pvenode"
|
||||
url: "http://proxmox.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "prometheus-pvenode"
|
||||
url: "http://prometheus.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "postgre-pvenode"
|
||||
url: "http://postgre.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "ovpn-pvenode"
|
||||
url: "http://ovpn.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "nginxpm-pvenode"
|
||||
url: "http://nginxpm.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "nextcloud-pvenode"
|
||||
url: "http://nextcloud.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "money-pvenode"
|
||||
url: "http://money.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "grafana-pvenode"
|
||||
url: "http://grafana.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "gitlab-pvenode"
|
||||
url: "http://gitlab.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "forgejo-pvenode"
|
||||
url: "http://forgejo.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "bitwarden-pvenode"
|
||||
url: "http://bitwarden.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "app1-pvenode"
|
||||
url: "http://app1.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
|
||||
- name: "ansible-pvenode"
|
||||
url: "http://ansimble.pvenode.ru/"
|
||||
module: "http_2xx"
|
||||
42
roles/blackbox_exporter/files/blackbox.yml
Normal file
42
roles/blackbox_exporter/files/blackbox.yml
Normal file
@ -0,0 +1,42 @@
|
||||
modules:
|
||||
# HTTP проверка (2xx статус)
|
||||
http_2xx:
|
||||
prober: http
|
||||
timeout: 10s
|
||||
http:
|
||||
valid_status_codes: [200, 301, 302, 403]
|
||||
method: GET
|
||||
preferred_ip_protocol: "ip4"
|
||||
follow_redirects: true
|
||||
fail_if_ssl: false
|
||||
fail_if_not_ssl: false
|
||||
tls_config:
|
||||
insecure_skip_verify: true # для тестового стенда
|
||||
|
||||
# HTTP POST проверка
|
||||
http_post_2xx:
|
||||
prober: http
|
||||
http:
|
||||
method: POST
|
||||
preferred_ip_protocol: "ip4"
|
||||
|
||||
# TCP подключение
|
||||
tcp_connect:
|
||||
prober: tcp
|
||||
timeout: 5s
|
||||
|
||||
# SSL проверки (можно добавить позже)
|
||||
ssl_check:
|
||||
prober: http
|
||||
http:
|
||||
fail_if_not_ssl: true
|
||||
tls_config:
|
||||
insecure_skip_verify: false
|
||||
preferred_ip_protocol: "ip4"
|
||||
|
||||
# ICMP (ping)
|
||||
icmp_check:
|
||||
prober: icmp
|
||||
timeout: 5s
|
||||
icmp:
|
||||
preferred_ip_protocol: "ip4"
|
||||
58
roles/blackbox_exporter/tasks/main.yml
Normal file
58
roles/blackbox_exporter/tasks/main.yml
Normal file
@ -0,0 +1,58 @@
|
||||
---
|
||||
- name: Create directory for blackbox config
|
||||
file:
|
||||
path: /etc/blackbox_exporter
|
||||
state: directory
|
||||
mode: '0755'
|
||||
tags: blackbox
|
||||
|
||||
- name: Copy blackbox configuration
|
||||
copy:
|
||||
src: files/blackbox.yml
|
||||
dest: /etc/blackbox_exporter/config.yml
|
||||
mode: '0644'
|
||||
tags: blackbox
|
||||
|
||||
- name: Ensure blackbox-exporter container is running
|
||||
community.docker.docker_container:
|
||||
name: "{{ blackbox_container_name }}"
|
||||
image: "{{ blackbox_image }}"
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
ports:
|
||||
- "{{ blackbox_port }}:9115"
|
||||
volumes:
|
||||
- "/etc/blackbox_exporter/config.yml:/etc/blackbox_exporter/config.yml"
|
||||
command:
|
||||
- "--config.file=/etc/blackbox_exporter/config.yml"
|
||||
- "--web.listen-address=:9115"
|
||||
tags: blackbox
|
||||
|
||||
- name: Configure UFW for blackbox-exporter
|
||||
ufw:
|
||||
rule: allow
|
||||
port: "{{ blackbox_port }}"
|
||||
proto: tcp
|
||||
comment: "Blackbox Exporter"
|
||||
tags: blackbox
|
||||
|
||||
- name: Wait for blackbox-exporter to be ready
|
||||
wait_for:
|
||||
port: "{{ blackbox_port }}"
|
||||
host: "{{ ansible_host }}"
|
||||
delay: 2
|
||||
timeout: 60
|
||||
tags: blackbox
|
||||
|
||||
- name: Test blackbox-exporter with local target
|
||||
uri:
|
||||
url: "http://{{ ansible_host }}:{{ blackbox_port }}/probe?target=http://192.168.0.112:8080/get&module=http_2xx"
|
||||
return_content: true
|
||||
status_code: 200
|
||||
register: blackbox_test
|
||||
tags: blackbox
|
||||
|
||||
- name: Show blackbox-exporter status
|
||||
debug:
|
||||
msg: "Blackbox Exporter deployed at http://{{ ansible_host }}:{{ blackbox_port }}/"
|
||||
tags: blackbox
|
||||
9
roles/cadvisor/defaults/main.yml
Normal file
9
roles/cadvisor/defaults/main.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
# Default port for cAdvisor
|
||||
cadvisor_port: 8080
|
||||
|
||||
# Network configuration
|
||||
cadvisor_network_mode: "host" # Альтернатива: использовать host network для избежания конфликтов портов
|
||||
|
||||
# Alternative: use different port if default is busy
|
||||
cadvisor_fallback_ports: [8081, 8082, 8083, 8084]
|
||||
43
roles/cadvisor/tasks/main.yml
Normal file
43
roles/cadvisor/tasks/main.yml
Normal file
@ -0,0 +1,43 @@
|
||||
---
|
||||
- name: Check for available port for cAdvisor
|
||||
shell: |
|
||||
for port in 8080 8081 8082 8083 8084 8085; do
|
||||
if ! ss -tulpn | grep -q ":${port} "; then
|
||||
echo "${port}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
args:
|
||||
executable: /bin/bash
|
||||
register: available_port
|
||||
changed_when: false
|
||||
tags: cadvisor
|
||||
|
||||
- name: Ensure Docker container for cAdvisor is running
|
||||
docker_container:
|
||||
name: cadvisor
|
||||
image: gcr.io/cadvisor/cadvisor:latest
|
||||
state: started
|
||||
restart_policy: always
|
||||
ports:
|
||||
- "{{ available_port.stdout | default('8084') }}:8080"
|
||||
volumes:
|
||||
- "/:/rootfs:ro"
|
||||
- "/var/run:/var/run:ro"
|
||||
- "/sys:/sys:ro"
|
||||
- "/var/lib/docker/:/var/lib/docker:ro"
|
||||
- "/dev/disk/:/dev/disk:ro"
|
||||
- "/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||
privileged: true
|
||||
devices:
|
||||
- "/dev/kmsg:/dev/kmsg"
|
||||
cgroup_parent: "docker.slice"
|
||||
tags: cadvisor
|
||||
|
||||
- name: Display cAdvisor access info
|
||||
debug:
|
||||
msg: |
|
||||
cAdvisor is available at:
|
||||
- Web UI: http://{{ inventory_hostname }}:{{ available_port.stdout | default('8084') }}
|
||||
- Metrics: http://{{ inventory_hostname }}:{{ available_port.stdout | default('8084') }}/metrics
|
||||
tags: cadvisor
|
||||
14
roles/docker/defaults/main.yml
Normal file
14
roles/docker/defaults/main.yml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
# Docker configuration
|
||||
docker_compose_version: "v2.27.0"
|
||||
docker_compose_install_path: "/usr/local/bin/docker-compose"
|
||||
|
||||
# Ports for App3 services (для информации, будут использоваться в других ролях)
|
||||
app3_service_ports:
|
||||
httpbin: 8080
|
||||
cadvisor: 8081
|
||||
alertmanager: 8082
|
||||
blackbox_exporter: 8083
|
||||
loki: 8084
|
||||
wordpress: 8085
|
||||
mysql: 3306 # internal port
|
||||
62
roles/docker/tasks/main.yml
Normal file
62
roles/docker/tasks/main.yml
Normal file
@ -0,0 +1,62 @@
|
||||
---
|
||||
- name: Install prerequisites for Docker
|
||||
apt:
|
||||
name:
|
||||
- curl
|
||||
- gnupg
|
||||
- ca-certificates
|
||||
- lsb-release
|
||||
state: present
|
||||
update_cache: yes
|
||||
tags: docker
|
||||
|
||||
- name: Install Docker using official script
|
||||
shell: |
|
||||
curl -fsSL https://get.docker.com -o /tmp/get-docker.sh
|
||||
sh /tmp/get-docker.sh
|
||||
rm /tmp/get-docker.sh
|
||||
args:
|
||||
creates: /usr/bin/docker
|
||||
tags: docker
|
||||
|
||||
- name: Install Docker Compose
|
||||
get_url:
|
||||
url: "https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-linux-x86_64"
|
||||
dest: "{{ docker_compose_install_path }}"
|
||||
mode: '0755'
|
||||
timeout: 30
|
||||
tags: docker
|
||||
|
||||
- name: Start and enable Docker service
|
||||
systemd:
|
||||
name: docker
|
||||
state: started
|
||||
enabled: yes
|
||||
daemon_reload: yes
|
||||
tags: docker
|
||||
|
||||
- name: Add admin user to docker group
|
||||
user:
|
||||
name: admin
|
||||
groups: docker
|
||||
append: yes
|
||||
tags: docker
|
||||
|
||||
- name: Verify Docker installation
|
||||
command: docker --version
|
||||
register: docker_version
|
||||
changed_when: false
|
||||
tags: docker
|
||||
|
||||
- name: Verify Docker Compose installation
|
||||
command: docker-compose --version
|
||||
register: docker_compose_version
|
||||
changed_when: false
|
||||
tags: docker
|
||||
|
||||
- name: Show installation results
|
||||
debug:
|
||||
msg:
|
||||
- "Docker: {{ docker_version.stdout }}"
|
||||
- "Docker Compose: {{ docker_compose_version.stdout }}"
|
||||
tags: docker
|
||||
5
roles/httpbin/defaults/main.yml
Normal file
5
roles/httpbin/defaults/main.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
# httpbin configuration
|
||||
httpbin_port: 8080
|
||||
httpbin_image: "kennethreitz/httpbin"
|
||||
httpbin_container_name: "httpbin"
|
||||
42
roles/httpbin/tasks/main.yml
Normal file
42
roles/httpbin/tasks/main.yml
Normal file
@ -0,0 +1,42 @@
|
||||
---
|
||||
- name: Ensure httpbin container is running
|
||||
community.docker.docker_container:
|
||||
name: "{{ httpbin_container_name }}"
|
||||
image: "{{ httpbin_image }}"
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
ports:
|
||||
- "{{ httpbin_port }}:80"
|
||||
tags: httpbin
|
||||
|
||||
- name: Configure UFW for httpbin
|
||||
ufw:
|
||||
rule: allow
|
||||
port: "{{ httpbin_port }}"
|
||||
proto: tcp
|
||||
comment: "httpbin API"
|
||||
tags: httpbin
|
||||
|
||||
- name: Wait for httpbin to be ready
|
||||
wait_for:
|
||||
port: "{{ httpbin_port }}"
|
||||
host: "{{ ansible_host }}"
|
||||
delay: 2
|
||||
timeout: 60
|
||||
tags: httpbin
|
||||
|
||||
- name: Verify httpbin is accessible
|
||||
uri:
|
||||
url: "http://{{ ansible_host }}:{{ httpbin_port }}/get"
|
||||
return_content: true
|
||||
status_code: 200
|
||||
register: httpbin_check
|
||||
until: httpbin_check.status == 200
|
||||
retries: 5
|
||||
delay: 3
|
||||
tags: httpbin
|
||||
|
||||
- name: Show httpbin status
|
||||
debug:
|
||||
msg: "httpbin successfully deployed at http://{{ ansible_host }}:{{ httpbin_port }}/"
|
||||
tags: httpbin
|
||||
9
roles/loki/defaults/main.yml
Normal file
9
roles/loki/defaults/main.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
# Default port for Loki
|
||||
loki_port: 3100
|
||||
|
||||
# Storage configuration
|
||||
loki_storage_path: /var/lib/loki
|
||||
|
||||
# Retention period
|
||||
loki_retention_period: 720h # 30 дней
|
||||
33
roles/loki/tasks/main.yml
Normal file
33
roles/loki/tasks/main.yml
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
- name: Create Loki directories
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
loop:
|
||||
- /etc/loki
|
||||
- /var/lib/loki
|
||||
|
||||
- name: Deploy Loki configuration
|
||||
template:
|
||||
src: loki-config.yml.j2
|
||||
dest: /etc/loki/loki-config.yml
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Run Loki container
|
||||
docker_container:
|
||||
name: loki
|
||||
image: grafana/loki:latest
|
||||
state: started
|
||||
restart_policy: always
|
||||
ports:
|
||||
- "3100:3100"
|
||||
volumes:
|
||||
- /etc/loki/loki-config.yml:/etc/loki/loki-config.yml
|
||||
- /var/lib/loki:/loki
|
||||
command: -config.file=/etc/loki/loki-config.yml
|
||||
tags: loki
|
||||
33
roles/loki/templates/loki-config.yml.j2
Normal file
33
roles/loki/templates/loki-config.yml.j2
Normal file
@ -0,0 +1,33 @@
|
||||
auth_enabled: false
|
||||
|
||||
server:
|
||||
http_listen_port: 3100
|
||||
grpc_listen_port: 9096
|
||||
|
||||
common:
|
||||
path_prefix: /tmp/loki # Изменяем путь на /tmp для теста
|
||||
storage:
|
||||
filesystem:
|
||||
chunks_directory: /tmp/loki/chunks
|
||||
rules_directory: /tmp/loki/rules
|
||||
replication_factor: 1
|
||||
ring:
|
||||
instance_addr: 127.0.0.1
|
||||
kvstore:
|
||||
store: inmemory
|
||||
|
||||
limits_config:
|
||||
allow_structured_metadata: false
|
||||
|
||||
schema_config:
|
||||
configs:
|
||||
- from: 2020-10-24
|
||||
store: boltdb-shipper
|
||||
object_store: filesystem
|
||||
schema: v11
|
||||
index:
|
||||
prefix: index_
|
||||
period: 24h
|
||||
|
||||
ruler:
|
||||
alertmanager_url: http://alertmanager:9093
|
||||
9
roles/node-red/defaults/main.yml
Normal file
9
roles/node-red/defaults/main.yml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
# Node-RED settings
|
||||
node_red_port: 1880
|
||||
node_red_data_dir: /var/lib/node-red
|
||||
node_red_image: nodered/node-red:latest
|
||||
|
||||
# Persistence settings
|
||||
node_red_persist_flows: true
|
||||
node_red_enable_projects: false
|
||||
32
roles/node-red/tasks/main.yml
Normal file
32
roles/node-red/tasks/main.yml
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
- name: Create Node-RED data directory with correct permissions
|
||||
file:
|
||||
path: "{{ node_red_data_dir }}"
|
||||
state: directory
|
||||
owner: 1000 # Node-RED контейнер запускается от пользователя 1000
|
||||
group: 1000
|
||||
mode: '0755'
|
||||
|
||||
- name: Run Node-RED container
|
||||
docker_container:
|
||||
name: node-red
|
||||
image: "{{ node_red_image }}"
|
||||
state: started
|
||||
restart_policy: always
|
||||
ports:
|
||||
- "{{ node_red_port }}:1880"
|
||||
volumes:
|
||||
- "{{ node_red_data_dir }}:/data"
|
||||
user: "1000:1000" # Запускаем от правильного пользователя
|
||||
env:
|
||||
NODE_RED_ENABLE_PROJECTS: "{{ 'true' if node_red_enable_projects else 'false' }}"
|
||||
TZ: "UTC"
|
||||
tags: node-red
|
||||
|
||||
- name: Display Node-RED access info
|
||||
debug:
|
||||
msg: |
|
||||
Node-RED is available at:
|
||||
- Web UI: http://{{ inventory_hostname }}:{{ node_red_port }}
|
||||
- API: http://{{ inventory_hostname }}:{{ node_red_port }}/red/api
|
||||
tags: node-red
|
||||
12
roles/postgres_exporter/defaults/main.yml
Normal file
12
roles/postgres_exporter/defaults/main.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
# Postgres Exporter
|
||||
postgres_exporter_version: "0.15.0"
|
||||
postgres_exporter_port: 9187
|
||||
postgres_exporter_user: "postgres_exporter"
|
||||
postgres_exporter_password: "exporterpassword123"
|
||||
|
||||
# Connection settings
|
||||
postgres_exporter_data_source_name: "user={{ postgres_exporter_user }} password={{ postgres_exporter_password }} host=localhost port=5432 dbname=postgres sslmode=disable"
|
||||
|
||||
# Systemd service
|
||||
postgres_exporter_service_name: "postgres_exporter"
|
||||
94
roles/postgres_exporter/tasks/main.yml
Normal file
94
roles/postgres_exporter/tasks/main.yml
Normal file
@ -0,0 +1,94 @@
|
||||
---
|
||||
- name: Install required packages
|
||||
apt:
|
||||
name:
|
||||
- wget
|
||||
- tar
|
||||
state: present
|
||||
update_cache: yes
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Create postgres_exporter user
|
||||
user:
|
||||
name: postgres_exporter
|
||||
system: yes
|
||||
shell: /bin/false
|
||||
home: /nonexistent
|
||||
comment: "Postgres Exporter Service User"
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Download Postgres Exporter
|
||||
get_url:
|
||||
url: "https://github.com/prometheus-community/postgres_exporter/releases/download/v{{ postgres_exporter_version }}/postgres_exporter-{{ postgres_exporter_version }}.linux-amd64.tar.gz"
|
||||
dest: "/tmp/postgres_exporter-{{ postgres_exporter_version }}.tar.gz"
|
||||
timeout: 30
|
||||
validate_certs: no
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Extract Postgres Exporter
|
||||
unarchive:
|
||||
src: "/tmp/postgres_exporter-{{ postgres_exporter_version }}.tar.gz"
|
||||
dest: "/tmp/"
|
||||
remote_src: yes
|
||||
creates: "/tmp/postgres_exporter-{{ postgres_exporter_version }}.linux-amd64"
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Install Postgres Exporter binary
|
||||
copy:
|
||||
src: "/tmp/postgres_exporter-{{ postgres_exporter_version }}.linux-amd64/postgres_exporter"
|
||||
dest: "/usr/local/bin/postgres_exporter"
|
||||
owner: postgres_exporter
|
||||
group: postgres_exporter
|
||||
mode: '0755'
|
||||
remote_src: yes
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Create systemd service
|
||||
template:
|
||||
src: postgres_exporter.service.j2
|
||||
dest: /etc/systemd/system/{{ postgres_exporter_service_name }}.service
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Clean up temp files
|
||||
file:
|
||||
path: "/tmp/postgres_exporter-{{ postgres_exporter_version }}.tar.gz"
|
||||
state: absent
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Clean up extracted directory
|
||||
file:
|
||||
path: "/tmp/postgres_exporter-{{ postgres_exporter_version }}.linux-amd64"
|
||||
state: absent
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Reload systemd
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Enable and start Postgres Exporter
|
||||
systemd:
|
||||
name: "{{ postgres_exporter_service_name }}"
|
||||
enabled: yes
|
||||
state: started
|
||||
daemon_reload: yes
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Configure UFW for Postgres Exporter
|
||||
ufw:
|
||||
rule: allow
|
||||
port: "{{ postgres_exporter_port }}"
|
||||
proto: tcp
|
||||
comment: "Postgres Exporter metrics"
|
||||
tags: postgres_exporter
|
||||
|
||||
- name: Verify Postgres Exporter is running
|
||||
wait_for:
|
||||
port: "{{ postgres_exporter_port }}"
|
||||
host: "{{ ansible_host }}"
|
||||
delay: 3
|
||||
timeout: 60
|
||||
tags: postgres_exporter
|
||||
@ -0,0 +1,16 @@
|
||||
[Unit]
|
||||
Description=Postgres Exporter
|
||||
After=network.target postgresql.service
|
||||
Wants=postgresql.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=postgres_exporter
|
||||
Group=postgres_exporter
|
||||
Environment=DATA_SOURCE_NAME="{{ postgres_exporter_data_source_name }}"
|
||||
ExecStart=/usr/local/bin/postgres_exporter --web.listen-address=:{{ postgres_exporter_port }}
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
21
roles/postgresql/defaults/main.yml
Normal file
21
roles/postgresql/defaults/main.yml
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
# PostgreSQL
|
||||
postgresql_version: "17"
|
||||
postgresql_port: 5432
|
||||
postgresql_listen_addresses: "*"
|
||||
postgresql_data_dir: "/var/lib/postgresql/{{ postgresql_version }}/main"
|
||||
|
||||
# Database configuration
|
||||
postgresql_databases:
|
||||
- name: testdb
|
||||
owner: testuser
|
||||
|
||||
postgresql_users:
|
||||
- name: testuser
|
||||
password: "testpassword123"
|
||||
databases: [testdb]
|
||||
privileges: ["ALL"]
|
||||
|
||||
# Postgres exporter user (for metrics collection)
|
||||
postgres_exporter_user: "postgres_exporter"
|
||||
postgres_exporter_password: "exporterpassword123"
|
||||
121
roles/postgresql/tasks/main.yml
Normal file
121
roles/postgresql/tasks/main.yml
Normal file
@ -0,0 +1,121 @@
|
||||
---
|
||||
- name: Install required packages for PostgreSQL installation
|
||||
apt:
|
||||
name:
|
||||
- ca-certificates
|
||||
- curl
|
||||
- gnupg
|
||||
- lsb-release
|
||||
state: present
|
||||
update_cache: yes
|
||||
tags: postgresql
|
||||
|
||||
- name: Create PostgreSQL repository keyring directory
|
||||
file:
|
||||
path: /etc/apt/keyrings
|
||||
state: directory
|
||||
mode: '0755'
|
||||
tags: postgresql
|
||||
|
||||
- name: Download and install PostgreSQL GPG key
|
||||
shell: |
|
||||
curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /etc/apt/keyrings/postgresql.gpg
|
||||
chmod 644 /etc/apt/keyrings/postgresql.gpg
|
||||
args:
|
||||
creates: /etc/apt/keyrings/postgresql.gpg
|
||||
tags: postgresql
|
||||
|
||||
- name: Add PostgreSQL repository
|
||||
apt_repository:
|
||||
repo: "deb [signed-by=/etc/apt/keyrings/postgresql.gpg] http://apt.postgresql.org/pub/repos/apt {{ ansible_distribution_release }}-pgdg main"
|
||||
state: present
|
||||
update_cache: yes
|
||||
tags: postgresql
|
||||
|
||||
- name: Install PostgreSQL
|
||||
apt:
|
||||
name:
|
||||
- postgresql-{{ postgresql_version }}
|
||||
- postgresql-contrib-{{ postgresql_version }}
|
||||
- postgresql-client-{{ postgresql_version }}
|
||||
state: present
|
||||
update_cache: yes
|
||||
tags: postgresql
|
||||
|
||||
- name: Ensure PostgreSQL service is started and enabled
|
||||
service:
|
||||
name: postgresql@17-main
|
||||
state: started
|
||||
enabled: yes
|
||||
tags: postgresql
|
||||
|
||||
- name: Configure PostgreSQL listen addresses
|
||||
lineinfile:
|
||||
path: "/etc/postgresql/{{ postgresql_version }}/main/postgresql.conf"
|
||||
regexp: "^listen_addresses[[:space:]]*="
|
||||
line: "listen_addresses = '{{ postgresql_listen_addresses }}'"
|
||||
backup: yes
|
||||
tags: postgresql
|
||||
|
||||
- name: Configure PostgreSQL authentication
|
||||
lineinfile:
|
||||
path: "/etc/postgresql/{{ postgresql_version }}/main/pg_hba.conf"
|
||||
line: "host all all 192.168.0.0/24 md5"
|
||||
insertafter: "^# IPv4 local connections:"
|
||||
backup: yes
|
||||
tags: postgresql
|
||||
|
||||
- name: Reload PostgreSQL configuration
|
||||
service:
|
||||
name: postgresql@17-main
|
||||
state: reloaded
|
||||
name: postgresql@17-main
|
||||
tags: postgresql
|
||||
|
||||
- name: Create PostgreSQL users and databases
|
||||
become: yes
|
||||
become_user: postgres
|
||||
community.postgresql.postgresql_user:
|
||||
name: "{{ item.name }}"
|
||||
password: "{{ item.password }}"
|
||||
state: present
|
||||
loop: "{{ postgresql_users }}"
|
||||
tags: postgresql
|
||||
|
||||
- name: Create PostgreSQL databases
|
||||
become: yes
|
||||
become_user: postgres
|
||||
community.postgresql.postgresql_db:
|
||||
name: "{{ item.name }}"
|
||||
owner: "{{ item.owner }}"
|
||||
state: present
|
||||
loop: "{{ postgresql_databases }}"
|
||||
tags: postgresql
|
||||
|
||||
- name: Create postgres_exporter user for monitoring
|
||||
become: yes
|
||||
become_user: postgres
|
||||
community.postgresql.postgresql_user:
|
||||
name: "{{ postgres_exporter_user }}"
|
||||
password: "{{ postgres_exporter_password }}"
|
||||
state: present
|
||||
tags: postgresql
|
||||
|
||||
- name: Grant permissions to postgres_exporter user
|
||||
become: yes
|
||||
become_user: postgres
|
||||
community.postgresql.postgresql_privs:
|
||||
database: postgres
|
||||
state: present
|
||||
privs: CONNECT
|
||||
type: database
|
||||
roles: "{{ postgres_exporter_user }}"
|
||||
tags: postgresql
|
||||
|
||||
- name: Configure UFW for PostgreSQL
|
||||
ufw:
|
||||
rule: allow
|
||||
port: "{{ postgresql_port }}"
|
||||
proto: tcp
|
||||
comment: "PostgreSQL"
|
||||
tags: postgresql
|
||||
7
roles/promtail/defaults/main.yml
Normal file
7
roles/promtail/defaults/main.yml
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
# Loki connection
|
||||
loki_host: 192.168.0.112
|
||||
loki_port: 3100
|
||||
|
||||
# Promtail settings
|
||||
promtail_port: 9080
|
||||
31
roles/promtail/tasks/main.yml
Normal file
31
roles/promtail/tasks/main.yml
Normal file
@ -0,0 +1,31 @@
|
||||
---
|
||||
- name: Create Promtail directories
|
||||
file:
|
||||
path: /etc/promtail
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0755'
|
||||
|
||||
- name: Deploy Promtail configuration
|
||||
template:
|
||||
src: promtail-config.yml.j2
|
||||
dest: /etc/promtail/promtail-config.yml
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Run Promtail container (using host network)
|
||||
docker_container:
|
||||
name: promtail
|
||||
image: grafana/promtail:latest
|
||||
state: started
|
||||
restart_policy: always
|
||||
network_mode: host # <-- КЛЮЧЕВОЕ ИЗМЕНЕНИЕ
|
||||
volumes:
|
||||
- /var/log:/var/log:ro
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
- /etc/promtail/promtail-config.yml:/etc/promtail/config.yml
|
||||
command: -config.file=/etc/promtail/config.yml
|
||||
pid_mode: host
|
||||
tags: promtail
|
||||
28
roles/promtail/templates/promtail-config.yml.j2
Normal file
28
roles/promtail/templates/promtail-config.yml.j2
Normal file
@ -0,0 +1,28 @@
|
||||
server:
|
||||
http_listen_port: 9080
|
||||
grpc_listen_port: 0
|
||||
|
||||
positions:
|
||||
filename: /tmp/positions.yaml
|
||||
|
||||
clients:
|
||||
- url: http://localhost:3100/loki/api/v1/push # Теперь localhost работает
|
||||
|
||||
scrape_configs:
|
||||
- job_name: system
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost
|
||||
labels:
|
||||
job: varlogs
|
||||
__path__: /var/log/*log
|
||||
host: "{{ inventory_hostname }}"
|
||||
|
||||
- job_name: docker
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost
|
||||
labels:
|
||||
job: docker
|
||||
__path__: /var/lib/docker/containers/*/*log
|
||||
host: "{{ inventory_hostname }}"
|
||||
Reference in New Issue
Block a user