10 Commits

Author SHA1 Message Date
d49f6a0396 Merge branch 'monitoring-stack-deployment' 2026-02-05 02:23:36 +00:00
44519e842b feat: add cAdvisor, Loki, Alertmanager and Node-RED support
- Добавлена роль cAdvisor для мониторинга Docker контейнеров
- Добавлены Loki и Promtail для сбора и хранения логов
- Добавлен Alertmanager с конфигурацией для Node-RED интеграции
- Добавлена роль Node-RED для автоматизации обработки алертов
- Настроена интеграция Prometheus → Alertmanager → Node-RED
- Все контейнеры запускаются на app3 (192.168.0.112)
2026-02-05 02:23:27 +00:00
c7fcbfcfce Merge pull request 'monitoring-stack-deployment' (#8) from monitoring-stack-deployment into main
Reviewed-on: #8
2026-02-04 13:25:28 +00:00
27e692c1ed Merge branch 'feature/add-cadvisor-support' into new-feature-branch 2026-02-04 13:21:43 +00:00
338e0b0f19 Add cadvisor support with monitoring stack: alertmanager, loki, node-red, promtail 2026-02-04 13:21:16 +00:00
83178d9a0d Merge pull request 'Enhance cAdvisor role and add deployment playbook' (#7) from feature/add-cadvisor-support into main
Reviewed-on: #7
2026-02-04 12:48:29 +00:00
0d85bd53aa Enhance cAdvisor role and add deployment playbook
- Updated cAdvisor default variables for better configuration
- Modified cAdvisor tasks to improve deployment process
- Added dedicated playbook for cAdvisor deployment and monitoring

This commit completes the cAdvisor monitoring integration.
2026-02-04 12:47:46 +00:00
3392c84c65 Merge pull request 'feat: add blackbox monitoring and app3 deployment' (#6) from ADDBlackboxapp3 into main
Reviewed-on: #6
2026-02-04 10:21:09 +00:00
aa3e0c8f54 feat: add blackbox monitoring and app3 deployment
- Add blackbox exporter role and playbooks
- Add cadvisor, docker, httpbin roles
- Add app3 deployment playbooks
- Configure blackbox monitoring
2026-02-04 10:20:24 +00:00
0dca30868b Merge pull request 'feat: add PostgreSQL infrastructure for App2' (#5) from feature/add-postgresql-support into main
Reviewed-on: #5
2026-02-04 07:29:07 +00:00
35 changed files with 1349 additions and 0 deletions

View File

@ -0,0 +1,104 @@
---
- name: Add Blackbox Exporter job to existing Prometheus config
hosts: 192.168.0.105
become: yes
tasks:
- name: Backup current config
copy:
src: /etc/prometheus/prometheus.yml
dest: /etc/prometheus/prometheus.yml.backup-blackbox-{{ ansible_date_time.epoch }}
remote_src: yes
tags: prometheus
- name: Check if blackbox job already exists
shell: |
grep -q 'job_name:.*blackbox' /etc/prometheus/prometheus.yml && echo "exists" || echo "not exists"
register: blackbox_exists
changed_when: false
tags: prometheus
- name: Add blackbox job to scrape_configs (if not exists)
blockinfile:
path: /etc/prometheus/prometheus.yml
insertbefore: '^remote_write:'
block: |
- job_name: blackbox
honor_timestamps: true
track_timestamps_staleness: false
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /probe
params:
module: [http_2xx]
scheme: http
follow_redirects: true
enable_http2: true
static_configs:
- targets:
# Внутренние сервисы стенда
- "http://192.168.0.110/"
- "http://192.168.0.111:9187/metrics"
- "http://192.168.0.112:8080/get"
- "http://192.168.0.100:3000/"
- "http://192.168.0.101:9100/metrics"
- "http://192.168.0.103:8200/ui/"
- "http://192.168.0.104:8428/metrics"
- "http://192.168.0.105:9090/metrics"
- "http://192.168.0.106:3000"
# Внешние домены
- "http://forgejo.pvenode.ru/"
- "http://grafana.pvenode.ru/"
- "http://prometheus.pvenode.ru/"
- "http://app1.pvenode.ru/"
- "http://wiki.pvenode.ru/"
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: 192.168.0.112:8083
metric_relabel_configs:
- source_labels: [__address__]
separator: ;
regex: (.*)
target_label: instance
replacement: $1
action: replace
- source_labels: [__address__]
separator: ;
regex: ([^:]+):\d+
target_label: host
replacement: ${1}
action: replace
marker: "# {mark} ANSIBLE MANAGED BLOCK - blackbox"
when: blackbox_exists.stdout == "not exists"
tags: prometheus
- name: Check Prometheus configuration
command: promtool check config /etc/prometheus/prometheus.yml
register: promtool_check
failed_when: promtool_check.rc != 0
changed_when: false
tags: prometheus
- name: Show config check result
debug:
msg: "{{ promtool_check.stdout_lines }}"
when: promtool_check.rc == 0
tags: prometheus
- name: Reload Prometheus if config is valid
systemd:
name: prometheus
state: reloaded
when: promtool_check.rc == 0
tags: prometheus
- name: Show status
debug:
msg: |
Blackbox job {{ "added successfully" if promtool_check.rc == 0 else "failed to add" }}
Backup created: /etc/prometheus/prometheus.yml.backup-blackbox-{{ ansible_date_time.epoch }}
tags: prometheus

View File

@ -0,0 +1,151 @@
---
- name: Add correct Blackbox Exporter job
hosts: 192.168.0.105
become: yes
tasks:
- name: Backup current config
copy:
src: /etc/prometheus/prometheus.yml
dest: /etc/prometheus/prometheus.yml.backup-pre-blackbox-{{ ansible_date_time.epoch }}
remote_src: yes
tags: prometheus
- name: Check current line numbers
shell: |
echo "Last scrape_config job ends at line:"
grep -n "job_name: postgres" /etc/prometheus/prometheus.yml
echo ""
echo "Remote_write starts at line:"
grep -n "^remote_write:" /etc/prometheus/prometheus.yml
register: line_info
changed_when: false
tags: prometheus
- name: Create correct blackbox job config
copy:
dest: /tmp/blackbox-job.yml
content: |
- job_name: blackbox
honor_timestamps: true
track_timestamps_staleness: false
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /probe
params:
module: [http_2xx]
scheme: http
follow_redirects: true
enable_http2: true
static_configs:
- targets:
# Internal services
- "http://192.168.0.110/"
- "http://192.168.0.111:9187/metrics"
- "http://192.168.0.112:8080/get"
- "http://192.168.0.100:3000/"
- "http://192.168.0.101:9100/metrics"
- "http://192.168.0.103:8200/ui/"
- "http://192.168.0.104:8428/metrics"
- "http://192.168.0.105:9090/metrics"
- "http://192.168.0.106:3000"
# External domains
- "http://forgejo.pvenode.ru/"
- "http://grafana.pvenode.ru/"
- "http://prometheus.pvenode.ru/"
- "http://app1.pvenode.ru/"
- "http://wiki.pvenode.ru/"
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: 192.168.0.112:8083
metric_relabel_configs:
- source_labels: [__address__]
separator: ;
regex: (.*)
target_label: instance
replacement: $1
action: replace
- source_labels: [__address__]
separator: ;
regex: ([^:]+):\d+
target_label: host
replacement: ${1}
action: replace
tags: prometheus
- name: Insert blackbox job before remote_write
shell: |
# Находим строку с remote_write
remote_line=$(grep -n "^remote_write:" /etc/prometheus/prometheus.yml | cut -d: -f1)
if [ -z "$remote_line" ]; then
echo "ERROR: remote_write not found"
exit 1
fi
# Создаем новый файл
cp /etc/prometheus/prometheus.yml /etc/prometheus/prometheus.yml.tmp
# Вставляем blackbox перед remote_write
head -n $((remote_line - 1)) /etc/prometheus/prometheus.yml > /etc/prometheus/prometheus.yml.new
cat /tmp/blackbox-job.yml >> /etc/prometheus/prometheus.yml.new
tail -n +$remote_line /etc/prometheus/prometheus.yml >> /etc/prometheus/prometheus.yml.new
# Заменяем старый файл
mv /etc/prometheus/prometheus.yml.new /etc/prometheus/prometheus.yml
rm -f /etc/prometheus/prometheus.yml.tmp
echo "Inserted at line $((remote_line - 1))"
args:
executable: /bin/bash
tags: prometheus
- name: Check Prometheus configuration
command: promtool check config /etc/prometheus/prometheus.yml
register: promtool_check
failed_when: promtool_check.rc != 0
changed_when: false
tags: prometheus
- name: Show config check result
debug:
msg: "{{ promtool_check.stdout_lines }}"
when: promtool_check.rc == 0
tags: prometheus
- name: Reload Prometheus
systemd:
name: prometheus
state: reloaded
when: promtool_check.rc == 0
tags: prometheus
- name: Verify blackbox job added
shell: |
sleep 2
echo "=== Checking if blackbox job exists ==="
if grep -q "job_name: blackbox" /etc/prometheus/prometheus.yml; then
echo "✓ Blackbox job found in config"
echo ""
echo "=== Checking Prometheus targets ==="
curl -s "http://localhost:9090/api/v1/targets" | python3 -c "
import json, sys
data = json.load(sys.stdin)
for target in data['data']['activeTargets']:
job = target['discoveredLabels'].get('job', 'N/A')
if 'blackbox' in job.lower():
print(f'✓ Blackbox target: {target[\"health\"]}')
print(f' URL: {target[\"scrapeUrl\"]}')
exit(0)
print('✗ Blackbox not in targets yet (may need 15s scrape interval)')
"
else
echo "✗ Blackbox job not found in config"
fi
args:
executable: /bin/bash
tags: prometheus

View File

@ -0,0 +1,105 @@
---
- name: Add Blackbox job to Prometheus config
hosts: 192.168.0.105
become: yes
tasks:
- name: Backup config
copy:
src: /etc/prometheus/prometheus.yml
dest: /etc/prometheus/prometheus.yml.backup-blackbox
remote_src: yes
- name: Create blackbox config file
copy:
dest: /tmp/blackbox-config.yml
content: |
# Blackbox Exporter monitoring
- job_name: blackbox
honor_timestamps: true
track_timestamps_staleness: false
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /probe
params:
module: [http_2xx]
scheme: http
follow_redirects: true
enable_http2: true
static_configs:
- targets:
- "http://192.168.0.110/"
- "http://192.168.0.111:9187/metrics"
- "http://192.168.0.112:8080/get"
- "http://192.168.0.100:3000/"
- "http://192.168.0.101:9100/metrics"
- "http://192.168.0.103:8200/ui/"
- "http://192.168.0.104:8428/metrics"
- "http://192.168.0.105:9090/metrics"
- "http://192.168.0.106:3000"
- "http://forgejo.pvenode.ru/"
- "http://grafana.pvenode.ru/"
- "http://prometheus.pvenode.ru/"
- "http://app1.pvenode.ru/"
- "http://wiki.pvenode.ru/"
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: 192.168.0.112:8083
metric_relabel_configs:
- source_labels: [__address__]
separator: ;
regex: (.*)
target_label: instance
replacement: $1
action: replace
- source_labels: [__address__]
separator: ;
regex: ([^:]+):\d+
target_label: host
replacement: ${1}
action: replace
- name: Insert blackbox config before remote_write
shell: |
# Find remote_write line
remote_line=$(grep -n "^remote_write:" /etc/prometheus/prometheus.yml | head -1 | cut -d: -f1)
if [ -z "$remote_line" ]; then
echo "ERROR: remote_write not found"
exit 1
fi
# Insert blackbox config
head -n $((remote_line - 1)) /etc/prometheus/prometheus.yml > /tmp/prometheus-new.yml
cat /tmp/blackbox-config.yml >> /tmp/prometheus-new.yml
tail -n +$remote_line /etc/prometheus/prometheus.yml >> /tmp/prometheus-new.yml
# Replace original
mv /tmp/prometheus-new.yml /etc/prometheus/prometheus.yml
echo "Inserted at line $((remote_line - 1))"
- name: Validate config
command: promtool check config /etc/prometheus/prometheus.yml
register: config_check
changed_when: false
- name: Show validation result
debug:
msg: "{{ config_check.stdout_lines }}"
- name: Reload Prometheus
systemd:
name: prometheus
state: reloaded
when: config_check.rc == 0
- name: Check result
debug:
msg: |
Blackbox job {{ "successfully added" if config_check.rc == 0 else "failed to add" }}
Backup: /etc/prometheus/prometheus.yml.backup-blackbox

View File

@ -0,0 +1,103 @@
---
- name: Add Blackbox Exporter to Prometheus
hosts: 192.168.0.105
become: yes
tasks:
- name: Backup current config
copy:
src: /etc/prometheus/prometheus.yml
dest: /etc/prometheus/prometheus.yml.backup-{{ ansible_date_time.epoch }}
remote_src: yes
- name: Get line number where remote_write starts
shell: grep -n "^remote_write:" /etc/prometheus/prometheus.yml | cut -d: -f1
register: remote_line
changed_when: false
- name: Create blackbox job config file
copy:
dest: /tmp/blackbox-job.yml
content: |
- job_name: blackbox
honor_timestamps: true
track_timestamps_staleness: false
scrape_interval: 15s
scrape_timeout: 10s
metrics_path: /probe
params:
module: [http_2xx]
scheme: http
follow_redirects: true
enable_http2: true
static_configs:
- targets:
- "http://192.168.0.110/"
- "http://192.168.0.111:9187/metrics"
- "http://192.168.0.112:8080/get"
- "http://192.168.0.100:3000/"
- "http://192.168.0.101:9100/metrics"
- "http://192.168.0.103:8200/ui/"
- "http://192.168.0.104:8428/metrics"
- "http://192.168.0.105:9090/metrics"
- "http://192.168.0.106:3000"
- "http://forgejo.pvenode.ru/"
- "http://grafana.pvenode.ru/"
- "http://prometheus.pvenode.ru/"
- "http://app1.pvenode.ru/"
- "http://wiki.pvenode.ru/"
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: 192.168.0.112:8083
metric_relabel_configs:
- source_labels: [__address__]
separator: ;
regex: (.*)
target_label: instance
replacement: $1
action: replace
- source_labels: [__address__]
separator: ;
regex: ([^:]+):\d+
target_label: host
replacement: ${1}
action: replace
- name: Insert blackbox job before remote_write
shell: |
# Вставляем blackbox job перед remote_write
head -n $(({{ remote_line.stdout }} - 1)) /etc/prometheus/prometheus.yml > /tmp/prometheus-new.yml
cat /tmp/blackbox-job.yml >> /tmp/prometheus-new.yml
tail -n +{{ remote_line.stdout }} /etc/prometheus/prometheus.yml >> /tmp/prometheus-new.yml
mv /tmp/prometheus-new.yml /etc/prometheus/prometheus.yml
args:
executable: /bin/bash
- name: Check Prometheus configuration
command: promtool check config /etc/prometheus/prometheus.yml
register: promtool_check
changed_when: false
- name: Show config status
debug:
msg: "{{ promtool_check.stdout_lines }}"
- name: Reload Prometheus if config valid
systemd:
name: prometheus
state: reloaded
when: promtool_check.rc == 0
- name: Verify blackbox job
shell: |
echo "Checking if blackbox job was added..."
if grep -q "job_name: blackbox" /etc/prometheus/prometheus.yml; then
echo "SUCCESS: Blackbox job found in config"
else
echo "ERROR: Blackbox job not found"
fi
changed_when: false

View File

@ -0,0 +1,76 @@
---
- name: Configure Prometheus for Blackbox monitoring
hosts: 192.168.0.105
become: yes
vars:
blackbox_targets:
# Основные сервисы стенда (из ИП)
- "http://192.168.0.110/"
- "http://192.168.0.111:9187/metrics" # postgres_exporter
- "http://192.168.0.112:8080/get" # httpbin
- "http://192.168.0.112:8081/metrics" # cadvisor
- "http://192.168.0.100:3000/" # forgejo
- "http://192.168.0.101:9100/metrics" # ansible node_exporter
- "http://192.168.0.103:8200/ui/" # vault
- "http://192.168.0.104:8428/metrics" # victoriametrics
- "http://192.168.0.105:9090/metrics" # prometheus
- "http://192.168.0.106:3000" # grafana
# Основные домены (первые для теста)
- "http://forgejo.pvenode.ru/"
- "http://grafana.pvenode.ru/"
- "http://prometheus.pvenode.ru/"
- "http://app1.pvenode.ru/"
- "http://wiki.pvenode.ru/"
tasks:
- name: Backup original Prometheus config
copy:
src: /etc/prometheus/prometheus.yml
dest: /etc/prometheus/prometheus.yml.backup-{{ ansible_date_time.epoch }}
remote_src: yes
tags: prometheus
- name: Add blackbox exporter to Prometheus
blockinfile:
path: /etc/prometheus/prometheus.yml
insertafter: ' # cAdvisor container metrics'
block: |
# Blackbox Exporter probes
- job_name: 'blackbox'
metrics_path: /probe
params:
module: [http_2xx]
static_configs:
- targets:
{% for target in blackbox_targets %}
- "{{ target }}"
{% endfor %}
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: 192.168.0.112:8083 # blackbox-exporter
marker: "# {mark} ANSIBLE MANAGED BLOCK - blackbox"
tags: prometheus
- name: Check Prometheus configuration
command: promtool check config /etc/prometheus/prometheus.yml
register: promtool_check
failed_when: promtool_check.rc != 0
tags: prometheus
- name: Reload Prometheus
systemd:
name: prometheus
state: reloaded
when: promtool_check.rc == 0
tags: prometheus
- name: Show configured targets
debug:
msg: "Added {{ blackbox_targets|length }} targets to blackbox monitoring"
tags: prometheus

View File

@ -0,0 +1,43 @@
---
- name: Configure Prometheus for Blackbox monitoring
hosts: 192.168.0.105
become: yes
vars:
blackbox_targets: "{{ hostvars['192.168.0.112']['blackbox_targets'] }}"
tasks:
- name: Add blackbox exporter to Prometheus
blockinfile:
path: /etc/prometheus/prometheus.yml
insertafter: ' # cAdvisor container metrics'
block: |
# Blackbox Exporter probes
- job_name: 'blackbox'
metrics_path: /probe
params:
module: [http_2xx]
static_configs:
- targets:
{% for target in blackbox_targets %}
- {{ target.url }}
{% endfor %}
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: 192.168.0.112:8083 # blackbox-exporter
marker: "# {mark} ANSIBLE MANAGED BLOCK - blackbox"
- name: Check Prometheus configuration
command: promtool check config /etc/prometheus/prometheus.yml
register: promtool_check
failed_when: promtool_check.rc != 0
- name: Reload Prometheus
systemd:
name: prometheus
state: reloaded
when: promtool_check.rc == 0

View File

@ -0,0 +1,6 @@
---
- name: Deploy Alertmanager
hosts: 192.168.0.112 # app3
become: true
roles:
- alertmanager

View File

@ -0,0 +1,16 @@
---
- name: Deploy Blackbox Exporter on App3
hosts: 192.168.0.112
become: yes
gather_facts: yes
pre_tasks:
- name: Ensure Docker is installed
include_role:
name: docker
apply:
tags: docker
roles:
- role: blackbox_exporter
tags: blackbox

View File

@ -0,0 +1,16 @@
---
- name: Deploy cAdvisor on App3
hosts: 192.168.0.112
become: yes
gather_facts: yes
pre_tasks:
- name: Ensure Docker is installed
include_role:
name: docker
apply:
tags: docker
roles:
- role: cadvisor
tags: cadvisor

View File

@ -0,0 +1,9 @@
---
- name: Deploy Docker on App3
hosts: 192.168.0.112
become: yes
gather_facts: yes
roles:
- role: docker
tags: docker

View File

@ -0,0 +1,16 @@
---
- name: Deploy httpbin on App3
hosts: 192.168.0.112
become: yes
gather_facts: yes
pre_tasks:
- name: Ensure Docker is installed
include_role:
name: docker
apply:
tags: docker
roles:
- role: httpbin
tags: httpbin

View File

@ -0,0 +1,6 @@
---
- name: Deploy cAdvisor on App3
hosts: 192.168.0.112 # Указываем конкретный хост
become: true
roles:
- cadvisor

View File

@ -0,0 +1,6 @@
---
- name: Deploy Loki
hosts: 192.168.0.112 # app3
become: true
roles:
- loki

View File

@ -0,0 +1,6 @@
---
- name: Deploy Node-RED
hosts: 192.168.0.112 # app3
become: true
roles:
- node-red

View File

@ -0,0 +1,6 @@
---
- name: Deploy Promtail on all nodes
hosts: all # Установим Promtail на все хосты для сбора логов
become: true
roles:
- promtail

View File

@ -0,0 +1,12 @@
---
# Alertmanager settings
alertmanager_port: 9093
alertmanager_config_path: /etc/alertmanager
# Email notifications (заполнить позже)
smtp_host: localhost
smtp_from: alertmanager@example.com
smtp_to: admin@example.com
# Webhook для тестирования
webhook_url: "http://localhost:9099"

View File

@ -0,0 +1,33 @@
---
- name: Create Alertmanager directories
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: '0755'
loop:
- "{{ alertmanager_config_path }}"
- /var/lib/alertmanager
- name: Deploy Alertmanager configuration
template:
src: alertmanager.yml.j2
dest: "{{ alertmanager_config_path }}/alertmanager.yml"
owner: root
group: root
mode: '0644'
- name: Run Alertmanager container
docker_container:
name: alertmanager
image: prom/alertmanager:latest
state: started
restart_policy: always
ports:
- "{{ alertmanager_port }}:9093"
volumes:
- "{{ alertmanager_config_path }}/alertmanager.yml:/etc/alertmanager/alertmanager.yml"
- /var/lib/alertmanager:/alertmanager
command: --config.file=/etc/alertmanager/alertmanager.yml --storage.path=/alertmanager
tags: alertmanager

View File

@ -0,0 +1,52 @@
global:
# Настройки для уведомлений (можно настроить позже)
# smtp_smarthost: 'smtp.gmail.com:587'
# smtp_from: 'alertmanager@example.com'
# smtp_auth_username: 'user@gmail.com'
# smtp_auth_password: 'password'
# smtp_require_tls: true
route:
# Основной маршрут - все алерты идут в Node-RED
receiver: 'node-red-webhook'
group_by: ['alertname', 'severity']
group_wait: 10s
group_interval: 10s
repeat_interval: 1h
# Вложенные маршруты
routes:
- match:
severity: critical
receiver: 'node-red-critical'
group_wait: 5s
repeat_interval: 10m
- match:
severity: warning
receiver: 'node-red-warning'
group_wait: 30s
repeat_interval: 2h
receivers:
- name: 'node-red-webhook'
webhook_configs:
- url: 'http://node-red:1880/webhook/alertmanager'
send_resolved: true
- name: 'node-red-critical'
webhook_configs:
- url: 'http://node-red:1880/webhook/critical'
send_resolved: true
- name: 'node-red-warning'
webhook_configs:
- url: 'http://node-red:1880/webhook/warning'
send_resolved: true
inhibit_rules:
- source_match:
severity: 'critical'
target_match:
severity: 'warning'
equal: ['alertname', 'instance']

View File

@ -0,0 +1,126 @@
---
# Blackbox Exporter configuration
blackbox_version: "latest"
blackbox_port: 8083
blackbox_image: "prom/blackbox-exporter:{{ blackbox_version }}"
blackbox_container_name: "blackbox-exporter"
# Все цели для мониторинга из ИП и твоего списка
blackbox_targets:
# Основные сервисы стенда (из ИП)
- name: "app1-nginx"
url: "http://192.168.0.110/"
module: "http_2xx"
- name: "app2-postgresql"
url: "http://192.168.0.111:9187/metrics" # postgres_exporter
module: "http_2xx"
- name: "app3-httpbin"
url: "http://192.168.0.112:8080/get"
module: "http_2xx"
- name: "app3-cadvisor"
url: "http://192.168.0.112:8081/metrics"
module: "http_2xx"
- name: "git-forgejo"
url: "http://192.168.0.100:3000/"
module: "http_2xx"
- name: "ansible"
url: "http://192.168.0.101:9100/metrics" # node_exporter
module: "http_2xx"
- name: "vault"
url: "http://192.168.0.103:8200/ui/"
module: "http_2xx"
- name: "victoriametrics"
url: "http://192.168.0.104:8428/metrics"
module: "http_2xx"
- name: "prometheus"
url: "http://192.168.0.105:9090/metrics"
module: "http_2xx"
- name: "grafana"
url: "http://192.168.0.106:3000"
module: "http_2xx"
# Домены из твоего списка
- name: "wiki-pvenode"
url: "http://wiki.pvenode.ru/"
module: "http_2xx"
- name: "victoria-pvenode"
url: "http://victoria.pvenode.ru/"
module: "http_2xx"
- name: "vault-pvenode"
url: "http://vault.pvenode.ru/"
module: "http_2xx"
- name: "tasks-pvenode"
url: "http://tasks.pvenode.ru/"
module: "http_2xx"
- name: "python-pvenode"
url: "http://python.pvenode.ru/"
module: "http_2xx"
- name: "pvenode-main"
url: "http://pvenode.ru/"
module: "http_2xx"
- name: "proxmox-pvenode"
url: "http://proxmox.pvenode.ru/"
module: "http_2xx"
- name: "prometheus-pvenode"
url: "http://prometheus.pvenode.ru/"
module: "http_2xx"
- name: "postgre-pvenode"
url: "http://postgre.pvenode.ru/"
module: "http_2xx"
- name: "ovpn-pvenode"
url: "http://ovpn.pvenode.ru/"
module: "http_2xx"
- name: "nginxpm-pvenode"
url: "http://nginxpm.pvenode.ru/"
module: "http_2xx"
- name: "nextcloud-pvenode"
url: "http://nextcloud.pvenode.ru/"
module: "http_2xx"
- name: "money-pvenode"
url: "http://money.pvenode.ru/"
module: "http_2xx"
- name: "grafana-pvenode"
url: "http://grafana.pvenode.ru/"
module: "http_2xx"
- name: "gitlab-pvenode"
url: "http://gitlab.pvenode.ru/"
module: "http_2xx"
- name: "forgejo-pvenode"
url: "http://forgejo.pvenode.ru/"
module: "http_2xx"
- name: "bitwarden-pvenode"
url: "http://bitwarden.pvenode.ru/"
module: "http_2xx"
- name: "app1-pvenode"
url: "http://app1.pvenode.ru/"
module: "http_2xx"
- name: "ansible-pvenode"
url: "http://ansimble.pvenode.ru/"
module: "http_2xx"

View File

@ -0,0 +1,42 @@
modules:
# HTTP проверка (2xx статус)
http_2xx:
prober: http
timeout: 10s
http:
valid_status_codes: [200, 301, 302, 403]
method: GET
preferred_ip_protocol: "ip4"
follow_redirects: true
fail_if_ssl: false
fail_if_not_ssl: false
tls_config:
insecure_skip_verify: true # для тестового стенда
# HTTP POST проверка
http_post_2xx:
prober: http
http:
method: POST
preferred_ip_protocol: "ip4"
# TCP подключение
tcp_connect:
prober: tcp
timeout: 5s
# SSL проверки (можно добавить позже)
ssl_check:
prober: http
http:
fail_if_not_ssl: true
tls_config:
insecure_skip_verify: false
preferred_ip_protocol: "ip4"
# ICMP (ping)
icmp_check:
prober: icmp
timeout: 5s
icmp:
preferred_ip_protocol: "ip4"

View File

@ -0,0 +1,58 @@
---
- name: Create directory for blackbox config
file:
path: /etc/blackbox_exporter
state: directory
mode: '0755'
tags: blackbox
- name: Copy blackbox configuration
copy:
src: files/blackbox.yml
dest: /etc/blackbox_exporter/config.yml
mode: '0644'
tags: blackbox
- name: Ensure blackbox-exporter container is running
community.docker.docker_container:
name: "{{ blackbox_container_name }}"
image: "{{ blackbox_image }}"
state: started
restart_policy: unless-stopped
ports:
- "{{ blackbox_port }}:9115"
volumes:
- "/etc/blackbox_exporter/config.yml:/etc/blackbox_exporter/config.yml"
command:
- "--config.file=/etc/blackbox_exporter/config.yml"
- "--web.listen-address=:9115"
tags: blackbox
- name: Configure UFW for blackbox-exporter
ufw:
rule: allow
port: "{{ blackbox_port }}"
proto: tcp
comment: "Blackbox Exporter"
tags: blackbox
- name: Wait for blackbox-exporter to be ready
wait_for:
port: "{{ blackbox_port }}"
host: "{{ ansible_host }}"
delay: 2
timeout: 60
tags: blackbox
- name: Test blackbox-exporter with local target
uri:
url: "http://{{ ansible_host }}:{{ blackbox_port }}/probe?target=http://192.168.0.112:8080/get&module=http_2xx"
return_content: true
status_code: 200
register: blackbox_test
tags: blackbox
- name: Show blackbox-exporter status
debug:
msg: "Blackbox Exporter deployed at http://{{ ansible_host }}:{{ blackbox_port }}/"
tags: blackbox

View File

@ -0,0 +1,9 @@
---
# Default port for cAdvisor
cadvisor_port: 8080
# Network configuration
cadvisor_network_mode: "host" # Альтернатива: использовать host network для избежания конфликтов портов
# Alternative: use different port if default is busy
cadvisor_fallback_ports: [8081, 8082, 8083, 8084]

View File

@ -0,0 +1,43 @@
---
- name: Check for available port for cAdvisor
shell: |
for port in 8080 8081 8082 8083 8084 8085; do
if ! ss -tulpn | grep -q ":${port} "; then
echo "${port}"
break
fi
done
args:
executable: /bin/bash
register: available_port
changed_when: false
tags: cadvisor
- name: Ensure Docker container for cAdvisor is running
docker_container:
name: cadvisor
image: gcr.io/cadvisor/cadvisor:latest
state: started
restart_policy: always
ports:
- "{{ available_port.stdout | default('8084') }}:8080"
volumes:
- "/:/rootfs:ro"
- "/var/run:/var/run:ro"
- "/sys:/sys:ro"
- "/var/lib/docker/:/var/lib/docker:ro"
- "/dev/disk/:/dev/disk:ro"
- "/var/run/docker.sock:/var/run/docker.sock:ro"
privileged: true
devices:
- "/dev/kmsg:/dev/kmsg"
cgroup_parent: "docker.slice"
tags: cadvisor
- name: Display cAdvisor access info
debug:
msg: |
cAdvisor is available at:
- Web UI: http://{{ inventory_hostname }}:{{ available_port.stdout | default('8084') }}
- Metrics: http://{{ inventory_hostname }}:{{ available_port.stdout | default('8084') }}/metrics
tags: cadvisor

View File

@ -0,0 +1,14 @@
---
# Docker configuration
docker_compose_version: "v2.27.0"
docker_compose_install_path: "/usr/local/bin/docker-compose"
# Ports for App3 services (для информации, будут использоваться в других ролях)
app3_service_ports:
httpbin: 8080
cadvisor: 8081
alertmanager: 8082
blackbox_exporter: 8083
loki: 8084
wordpress: 8085
mysql: 3306 # internal port

View File

@ -0,0 +1,62 @@
---
- name: Install prerequisites for Docker
apt:
name:
- curl
- gnupg
- ca-certificates
- lsb-release
state: present
update_cache: yes
tags: docker
- name: Install Docker using official script
shell: |
curl -fsSL https://get.docker.com -o /tmp/get-docker.sh
sh /tmp/get-docker.sh
rm /tmp/get-docker.sh
args:
creates: /usr/bin/docker
tags: docker
- name: Install Docker Compose
get_url:
url: "https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-linux-x86_64"
dest: "{{ docker_compose_install_path }}"
mode: '0755'
timeout: 30
tags: docker
- name: Start and enable Docker service
systemd:
name: docker
state: started
enabled: yes
daemon_reload: yes
tags: docker
- name: Add admin user to docker group
user:
name: admin
groups: docker
append: yes
tags: docker
- name: Verify Docker installation
command: docker --version
register: docker_version
changed_when: false
tags: docker
- name: Verify Docker Compose installation
command: docker-compose --version
register: docker_compose_version
changed_when: false
tags: docker
- name: Show installation results
debug:
msg:
- "Docker: {{ docker_version.stdout }}"
- "Docker Compose: {{ docker_compose_version.stdout }}"
tags: docker

View File

@ -0,0 +1,5 @@
---
# httpbin configuration
httpbin_port: 8080
httpbin_image: "kennethreitz/httpbin"
httpbin_container_name: "httpbin"

View File

@ -0,0 +1,42 @@
---
- name: Ensure httpbin container is running
community.docker.docker_container:
name: "{{ httpbin_container_name }}"
image: "{{ httpbin_image }}"
state: started
restart_policy: unless-stopped
ports:
- "{{ httpbin_port }}:80"
tags: httpbin
- name: Configure UFW for httpbin
ufw:
rule: allow
port: "{{ httpbin_port }}"
proto: tcp
comment: "httpbin API"
tags: httpbin
- name: Wait for httpbin to be ready
wait_for:
port: "{{ httpbin_port }}"
host: "{{ ansible_host }}"
delay: 2
timeout: 60
tags: httpbin
- name: Verify httpbin is accessible
uri:
url: "http://{{ ansible_host }}:{{ httpbin_port }}/get"
return_content: true
status_code: 200
register: httpbin_check
until: httpbin_check.status == 200
retries: 5
delay: 3
tags: httpbin
- name: Show httpbin status
debug:
msg: "httpbin successfully deployed at http://{{ ansible_host }}:{{ httpbin_port }}/"
tags: httpbin

View File

@ -0,0 +1,9 @@
---
# Default port for Loki
loki_port: 3100
# Storage configuration
loki_storage_path: /var/lib/loki
# Retention period
loki_retention_period: 720h # 30 дней

33
roles/loki/tasks/main.yml Normal file
View File

@ -0,0 +1,33 @@
---
- name: Create Loki directories
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: '0755'
loop:
- /etc/loki
- /var/lib/loki
- name: Deploy Loki configuration
template:
src: loki-config.yml.j2
dest: /etc/loki/loki-config.yml
owner: root
group: root
mode: '0644'
- name: Run Loki container
docker_container:
name: loki
image: grafana/loki:latest
state: started
restart_policy: always
ports:
- "3100:3100"
volumes:
- /etc/loki/loki-config.yml:/etc/loki/loki-config.yml
- /var/lib/loki:/loki
command: -config.file=/etc/loki/loki-config.yml
tags: loki

View File

@ -0,0 +1,33 @@
auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
common:
path_prefix: /tmp/loki # Изменяем путь на /tmp для теста
storage:
filesystem:
chunks_directory: /tmp/loki/chunks
rules_directory: /tmp/loki/rules
replication_factor: 1
ring:
instance_addr: 127.0.0.1
kvstore:
store: inmemory
limits_config:
allow_structured_metadata: false
schema_config:
configs:
- from: 2020-10-24
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
ruler:
alertmanager_url: http://alertmanager:9093

View File

@ -0,0 +1,9 @@
---
# Node-RED settings
node_red_port: 1880
node_red_data_dir: /var/lib/node-red
node_red_image: nodered/node-red:latest
# Persistence settings
node_red_persist_flows: true
node_red_enable_projects: false

View File

@ -0,0 +1,32 @@
---
- name: Create Node-RED data directory with correct permissions
file:
path: "{{ node_red_data_dir }}"
state: directory
owner: 1000 # Node-RED контейнер запускается от пользователя 1000
group: 1000
mode: '0755'
- name: Run Node-RED container
docker_container:
name: node-red
image: "{{ node_red_image }}"
state: started
restart_policy: always
ports:
- "{{ node_red_port }}:1880"
volumes:
- "{{ node_red_data_dir }}:/data"
user: "1000:1000" # Запускаем от правильного пользователя
env:
NODE_RED_ENABLE_PROJECTS: "{{ 'true' if node_red_enable_projects else 'false' }}"
TZ: "UTC"
tags: node-red
- name: Display Node-RED access info
debug:
msg: |
Node-RED is available at:
- Web UI: http://{{ inventory_hostname }}:{{ node_red_port }}
- API: http://{{ inventory_hostname }}:{{ node_red_port }}/red/api
tags: node-red

View File

@ -0,0 +1,7 @@
---
# Loki connection
loki_host: 192.168.0.112
loki_port: 3100
# Promtail settings
promtail_port: 9080

View File

@ -0,0 +1,31 @@
---
- name: Create Promtail directories
file:
path: /etc/promtail
state: directory
owner: root
group: root
mode: '0755'
- name: Deploy Promtail configuration
template:
src: promtail-config.yml.j2
dest: /etc/promtail/promtail-config.yml
owner: root
group: root
mode: '0644'
- name: Run Promtail container (using host network)
docker_container:
name: promtail
image: grafana/promtail:latest
state: started
restart_policy: always
network_mode: host # <-- КЛЮЧЕВОЕ ИЗМЕНЕНИЕ
volumes:
- /var/log:/var/log:ro
- /var/lib/docker/containers:/var/lib/docker/containers:ro
- /etc/promtail/promtail-config.yml:/etc/promtail/config.yml
command: -config.file=/etc/promtail/config.yml
pid_mode: host
tags: promtail

View File

@ -0,0 +1,28 @@
server:
http_listen_port: 9080
grpc_listen_port: 0
positions:
filename: /tmp/positions.yaml
clients:
- url: http://localhost:3100/loki/api/v1/push # Теперь localhost работает
scrape_configs:
- job_name: system
static_configs:
- targets:
- localhost
labels:
job: varlogs
__path__: /var/log/*log
host: "{{ inventory_hostname }}"
- job_name: docker
static_configs:
- targets:
- localhost
labels:
job: docker
__path__: /var/lib/docker/containers/*/*log
host: "{{ inventory_hostname }}"