diff --git a/playbooks/add-blackbox-carefully.yml b/playbooks/add-blackbox-carefully.yml new file mode 100644 index 0000000..f87142c --- /dev/null +++ b/playbooks/add-blackbox-carefully.yml @@ -0,0 +1,104 @@ +--- +- name: Add Blackbox Exporter job to existing Prometheus config + hosts: 192.168.0.105 + become: yes + + tasks: + - name: Backup current config + copy: + src: /etc/prometheus/prometheus.yml + dest: /etc/prometheus/prometheus.yml.backup-blackbox-{{ ansible_date_time.epoch }} + remote_src: yes + tags: prometheus + + - name: Check if blackbox job already exists + shell: | + grep -q 'job_name:.*blackbox' /etc/prometheus/prometheus.yml && echo "exists" || echo "not exists" + register: blackbox_exists + changed_when: false + tags: prometheus + + - name: Add blackbox job to scrape_configs (if not exists) + blockinfile: + path: /etc/prometheus/prometheus.yml + insertbefore: '^remote_write:' + block: | + - job_name: blackbox + honor_timestamps: true + track_timestamps_staleness: false + scrape_interval: 15s + scrape_timeout: 10s + metrics_path: /probe + params: + module: [http_2xx] + scheme: http + follow_redirects: true + enable_http2: true + static_configs: + - targets: + # Внутренние сервисы стенда + - "http://192.168.0.110/" + - "http://192.168.0.111:9187/metrics" + - "http://192.168.0.112:8080/get" + - "http://192.168.0.100:3000/" + - "http://192.168.0.101:9100/metrics" + - "http://192.168.0.103:8200/ui/" + - "http://192.168.0.104:8428/metrics" + - "http://192.168.0.105:9090/metrics" + - "http://192.168.0.106:3000" + # Внешние домены + - "http://forgejo.pvenode.ru/" + - "http://grafana.pvenode.ru/" + - "http://prometheus.pvenode.ru/" + - "http://app1.pvenode.ru/" + - "http://wiki.pvenode.ru/" + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - target_label: __address__ + replacement: 192.168.0.112:8083 + metric_relabel_configs: + - source_labels: [__address__] + separator: ; + regex: (.*) + target_label: instance + replacement: $1 + action: replace + - source_labels: [__address__] + separator: ; + regex: ([^:]+):\d+ + target_label: host + replacement: ${1} + action: replace + marker: "# {mark} ANSIBLE MANAGED BLOCK - blackbox" + when: blackbox_exists.stdout == "not exists" + tags: prometheus + + - name: Check Prometheus configuration + command: promtool check config /etc/prometheus/prometheus.yml + register: promtool_check + failed_when: promtool_check.rc != 0 + changed_when: false + tags: prometheus + + - name: Show config check result + debug: + msg: "{{ promtool_check.stdout_lines }}" + when: promtool_check.rc == 0 + tags: prometheus + + - name: Reload Prometheus if config is valid + systemd: + name: prometheus + state: reloaded + when: promtool_check.rc == 0 + tags: prometheus + + - name: Show status + debug: + msg: | + Blackbox job {{ "added successfully" if promtool_check.rc == 0 else "failed to add" }} + Backup created: /etc/prometheus/prometheus.yml.backup-blackbox-{{ ansible_date_time.epoch }} + tags: prometheus diff --git a/playbooks/add-blackbox-correct.yml b/playbooks/add-blackbox-correct.yml new file mode 100644 index 0000000..1832955 --- /dev/null +++ b/playbooks/add-blackbox-correct.yml @@ -0,0 +1,151 @@ +--- +- name: Add correct Blackbox Exporter job + hosts: 192.168.0.105 + become: yes + + tasks: + - name: Backup current config + copy: + src: /etc/prometheus/prometheus.yml + dest: /etc/prometheus/prometheus.yml.backup-pre-blackbox-{{ ansible_date_time.epoch }} + remote_src: yes + tags: prometheus + + - name: Check current line numbers + shell: | + echo "Last scrape_config job ends at line:" + grep -n "job_name: postgres" /etc/prometheus/prometheus.yml + echo "" + echo "Remote_write starts at line:" + grep -n "^remote_write:" /etc/prometheus/prometheus.yml + register: line_info + changed_when: false + tags: prometheus + + - name: Create correct blackbox job config + copy: + dest: /tmp/blackbox-job.yml + content: | + - job_name: blackbox + honor_timestamps: true + track_timestamps_staleness: false + scrape_interval: 15s + scrape_timeout: 10s + metrics_path: /probe + params: + module: [http_2xx] + scheme: http + follow_redirects: true + enable_http2: true + static_configs: + - targets: + # Internal services + - "http://192.168.0.110/" + - "http://192.168.0.111:9187/metrics" + - "http://192.168.0.112:8080/get" + - "http://192.168.0.100:3000/" + - "http://192.168.0.101:9100/metrics" + - "http://192.168.0.103:8200/ui/" + - "http://192.168.0.104:8428/metrics" + - "http://192.168.0.105:9090/metrics" + - "http://192.168.0.106:3000" + # External domains + - "http://forgejo.pvenode.ru/" + - "http://grafana.pvenode.ru/" + - "http://prometheus.pvenode.ru/" + - "http://app1.pvenode.ru/" + - "http://wiki.pvenode.ru/" + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - target_label: __address__ + replacement: 192.168.0.112:8083 + metric_relabel_configs: + - source_labels: [__address__] + separator: ; + regex: (.*) + target_label: instance + replacement: $1 + action: replace + - source_labels: [__address__] + separator: ; + regex: ([^:]+):\d+ + target_label: host + replacement: ${1} + action: replace + tags: prometheus + + - name: Insert blackbox job before remote_write + shell: | + # Находим строку с remote_write + remote_line=$(grep -n "^remote_write:" /etc/prometheus/prometheus.yml | cut -d: -f1) + + if [ -z "$remote_line" ]; then + echo "ERROR: remote_write not found" + exit 1 + fi + + # Создаем новый файл + cp /etc/prometheus/prometheus.yml /etc/prometheus/prometheus.yml.tmp + + # Вставляем blackbox перед remote_write + head -n $((remote_line - 1)) /etc/prometheus/prometheus.yml > /etc/prometheus/prometheus.yml.new + cat /tmp/blackbox-job.yml >> /etc/prometheus/prometheus.yml.new + tail -n +$remote_line /etc/prometheus/prometheus.yml >> /etc/prometheus/prometheus.yml.new + + # Заменяем старый файл + mv /etc/prometheus/prometheus.yml.new /etc/prometheus/prometheus.yml + rm -f /etc/prometheus/prometheus.yml.tmp + + echo "Inserted at line $((remote_line - 1))" + args: + executable: /bin/bash + tags: prometheus + + - name: Check Prometheus configuration + command: promtool check config /etc/prometheus/prometheus.yml + register: promtool_check + failed_when: promtool_check.rc != 0 + changed_when: false + tags: prometheus + + - name: Show config check result + debug: + msg: "{{ promtool_check.stdout_lines }}" + when: promtool_check.rc == 0 + tags: prometheus + + - name: Reload Prometheus + systemd: + name: prometheus + state: reloaded + when: promtool_check.rc == 0 + tags: prometheus + + - name: Verify blackbox job added + shell: | + sleep 2 + echo "=== Checking if blackbox job exists ===" + if grep -q "job_name: blackbox" /etc/prometheus/prometheus.yml; then + echo "✓ Blackbox job found in config" + echo "" + echo "=== Checking Prometheus targets ===" + curl -s "http://localhost:9090/api/v1/targets" | python3 -c " +import json, sys +data = json.load(sys.stdin) +for target in data['data']['activeTargets']: + job = target['discoveredLabels'].get('job', 'N/A') + if 'blackbox' in job.lower(): + print(f'✓ Blackbox target: {target[\"health\"]}') + print(f' URL: {target[\"scrapeUrl\"]}') + exit(0) +print('✗ Blackbox not in targets yet (may need 15s scrape interval)') +" + else + echo "✗ Blackbox job not found in config" + fi + args: + executable: /bin/bash + tags: prometheus diff --git a/playbooks/add-blackbox-final.yml b/playbooks/add-blackbox-final.yml new file mode 100644 index 0000000..c1ec57e --- /dev/null +++ b/playbooks/add-blackbox-final.yml @@ -0,0 +1,105 @@ +--- +- name: Add Blackbox job to Prometheus config + hosts: 192.168.0.105 + become: yes + + tasks: + - name: Backup config + copy: + src: /etc/prometheus/prometheus.yml + dest: /etc/prometheus/prometheus.yml.backup-blackbox + remote_src: yes + + - name: Create blackbox config file + copy: + dest: /tmp/blackbox-config.yml + content: | + # Blackbox Exporter monitoring + - job_name: blackbox + honor_timestamps: true + track_timestamps_staleness: false + scrape_interval: 15s + scrape_timeout: 10s + metrics_path: /probe + params: + module: [http_2xx] + scheme: http + follow_redirects: true + enable_http2: true + static_configs: + - targets: + - "http://192.168.0.110/" + - "http://192.168.0.111:9187/metrics" + - "http://192.168.0.112:8080/get" + - "http://192.168.0.100:3000/" + - "http://192.168.0.101:9100/metrics" + - "http://192.168.0.103:8200/ui/" + - "http://192.168.0.104:8428/metrics" + - "http://192.168.0.105:9090/metrics" + - "http://192.168.0.106:3000" + - "http://forgejo.pvenode.ru/" + - "http://grafana.pvenode.ru/" + - "http://prometheus.pvenode.ru/" + - "http://app1.pvenode.ru/" + - "http://wiki.pvenode.ru/" + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - target_label: __address__ + replacement: 192.168.0.112:8083 + metric_relabel_configs: + - source_labels: [__address__] + separator: ; + regex: (.*) + target_label: instance + replacement: $1 + action: replace + - source_labels: [__address__] + separator: ; + regex: ([^:]+):\d+ + target_label: host + replacement: ${1} + action: replace + + - name: Insert blackbox config before remote_write + shell: | + # Find remote_write line + remote_line=$(grep -n "^remote_write:" /etc/prometheus/prometheus.yml | head -1 | cut -d: -f1) + + if [ -z "$remote_line" ]; then + echo "ERROR: remote_write not found" + exit 1 + fi + + # Insert blackbox config + head -n $((remote_line - 1)) /etc/prometheus/prometheus.yml > /tmp/prometheus-new.yml + cat /tmp/blackbox-config.yml >> /tmp/prometheus-new.yml + tail -n +$remote_line /etc/prometheus/prometheus.yml >> /tmp/prometheus-new.yml + + # Replace original + mv /tmp/prometheus-new.yml /etc/prometheus/prometheus.yml + + echo "Inserted at line $((remote_line - 1))" + + - name: Validate config + command: promtool check config /etc/prometheus/prometheus.yml + register: config_check + changed_when: false + + - name: Show validation result + debug: + msg: "{{ config_check.stdout_lines }}" + + - name: Reload Prometheus + systemd: + name: prometheus + state: reloaded + when: config_check.rc == 0 + + - name: Check result + debug: + msg: | + Blackbox job {{ "successfully added" if config_check.rc == 0 else "failed to add" }} + Backup: /etc/prometheus/prometheus.yml.backup-blackbox diff --git a/playbooks/add-blackbox-simple.yml b/playbooks/add-blackbox-simple.yml new file mode 100644 index 0000000..1ceefb2 --- /dev/null +++ b/playbooks/add-blackbox-simple.yml @@ -0,0 +1,103 @@ +--- +- name: Add Blackbox Exporter to Prometheus + hosts: 192.168.0.105 + become: yes + + tasks: + - name: Backup current config + copy: + src: /etc/prometheus/prometheus.yml + dest: /etc/prometheus/prometheus.yml.backup-{{ ansible_date_time.epoch }} + remote_src: yes + + - name: Get line number where remote_write starts + shell: grep -n "^remote_write:" /etc/prometheus/prometheus.yml | cut -d: -f1 + register: remote_line + changed_when: false + + - name: Create blackbox job config file + copy: + dest: /tmp/blackbox-job.yml + content: | + - job_name: blackbox + honor_timestamps: true + track_timestamps_staleness: false + scrape_interval: 15s + scrape_timeout: 10s + metrics_path: /probe + params: + module: [http_2xx] + scheme: http + follow_redirects: true + enable_http2: true + static_configs: + - targets: + - "http://192.168.0.110/" + - "http://192.168.0.111:9187/metrics" + - "http://192.168.0.112:8080/get" + - "http://192.168.0.100:3000/" + - "http://192.168.0.101:9100/metrics" + - "http://192.168.0.103:8200/ui/" + - "http://192.168.0.104:8428/metrics" + - "http://192.168.0.105:9090/metrics" + - "http://192.168.0.106:3000" + - "http://forgejo.pvenode.ru/" + - "http://grafana.pvenode.ru/" + - "http://prometheus.pvenode.ru/" + - "http://app1.pvenode.ru/" + - "http://wiki.pvenode.ru/" + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - target_label: __address__ + replacement: 192.168.0.112:8083 + metric_relabel_configs: + - source_labels: [__address__] + separator: ; + regex: (.*) + target_label: instance + replacement: $1 + action: replace + - source_labels: [__address__] + separator: ; + regex: ([^:]+):\d+ + target_label: host + replacement: ${1} + action: replace + + - name: Insert blackbox job before remote_write + shell: | + # Вставляем blackbox job перед remote_write + head -n $(({{ remote_line.stdout }} - 1)) /etc/prometheus/prometheus.yml > /tmp/prometheus-new.yml + cat /tmp/blackbox-job.yml >> /tmp/prometheus-new.yml + tail -n +{{ remote_line.stdout }} /etc/prometheus/prometheus.yml >> /tmp/prometheus-new.yml + mv /tmp/prometheus-new.yml /etc/prometheus/prometheus.yml + args: + executable: /bin/bash + + - name: Check Prometheus configuration + command: promtool check config /etc/prometheus/prometheus.yml + register: promtool_check + changed_when: false + + - name: Show config status + debug: + msg: "{{ promtool_check.stdout_lines }}" + + - name: Reload Prometheus if config valid + systemd: + name: prometheus + state: reloaded + when: promtool_check.rc == 0 + + - name: Verify blackbox job + shell: | + echo "Checking if blackbox job was added..." + if grep -q "job_name: blackbox" /etc/prometheus/prometheus.yml; then + echo "SUCCESS: Blackbox job found in config" + else + echo "ERROR: Blackbox job not found" + fi + changed_when: false diff --git a/playbooks/configure-blackbox-monitoring-fixed.yml b/playbooks/configure-blackbox-monitoring-fixed.yml new file mode 100644 index 0000000..00bff68 --- /dev/null +++ b/playbooks/configure-blackbox-monitoring-fixed.yml @@ -0,0 +1,76 @@ +--- +- name: Configure Prometheus for Blackbox monitoring + hosts: 192.168.0.105 + become: yes + + vars: + blackbox_targets: + # Основные сервисы стенда (из ИП) + - "http://192.168.0.110/" + - "http://192.168.0.111:9187/metrics" # postgres_exporter + - "http://192.168.0.112:8080/get" # httpbin + - "http://192.168.0.112:8081/metrics" # cadvisor + - "http://192.168.0.100:3000/" # forgejo + - "http://192.168.0.101:9100/metrics" # ansible node_exporter + - "http://192.168.0.103:8200/ui/" # vault + - "http://192.168.0.104:8428/metrics" # victoriametrics + - "http://192.168.0.105:9090/metrics" # prometheus + - "http://192.168.0.106:3000" # grafana + + # Основные домены (первые для теста) + - "http://forgejo.pvenode.ru/" + - "http://grafana.pvenode.ru/" + - "http://prometheus.pvenode.ru/" + - "http://app1.pvenode.ru/" + - "http://wiki.pvenode.ru/" + + tasks: + - name: Backup original Prometheus config + copy: + src: /etc/prometheus/prometheus.yml + dest: /etc/prometheus/prometheus.yml.backup-{{ ansible_date_time.epoch }} + remote_src: yes + tags: prometheus + + - name: Add blackbox exporter to Prometheus + blockinfile: + path: /etc/prometheus/prometheus.yml + insertafter: ' # cAdvisor container metrics' + block: | + # Blackbox Exporter probes + - job_name: 'blackbox' + metrics_path: /probe + params: + module: [http_2xx] + static_configs: + - targets: + {% for target in blackbox_targets %} + - "{{ target }}" + {% endfor %} + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - target_label: __address__ + replacement: 192.168.0.112:8083 # blackbox-exporter + marker: "# {mark} ANSIBLE MANAGED BLOCK - blackbox" + tags: prometheus + + - name: Check Prometheus configuration + command: promtool check config /etc/prometheus/prometheus.yml + register: promtool_check + failed_when: promtool_check.rc != 0 + tags: prometheus + + - name: Reload Prometheus + systemd: + name: prometheus + state: reloaded + when: promtool_check.rc == 0 + tags: prometheus + + - name: Show configured targets + debug: + msg: "Added {{ blackbox_targets|length }} targets to blackbox monitoring" + tags: prometheus diff --git a/playbooks/configure-blackbox-monitoring.yml b/playbooks/configure-blackbox-monitoring.yml new file mode 100644 index 0000000..0cb18b5 --- /dev/null +++ b/playbooks/configure-blackbox-monitoring.yml @@ -0,0 +1,43 @@ +--- +- name: Configure Prometheus for Blackbox monitoring + hosts: 192.168.0.105 + become: yes + + vars: + blackbox_targets: "{{ hostvars['192.168.0.112']['blackbox_targets'] }}" + + tasks: + - name: Add blackbox exporter to Prometheus + blockinfile: + path: /etc/prometheus/prometheus.yml + insertafter: ' # cAdvisor container metrics' + block: | + # Blackbox Exporter probes + - job_name: 'blackbox' + metrics_path: /probe + params: + module: [http_2xx] + static_configs: + - targets: + {% for target in blackbox_targets %} + - {{ target.url }} + {% endfor %} + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - target_label: __address__ + replacement: 192.168.0.112:8083 # blackbox-exporter + marker: "# {mark} ANSIBLE MANAGED BLOCK - blackbox" + + - name: Check Prometheus configuration + command: promtool check config /etc/prometheus/prometheus.yml + register: promtool_check + failed_when: promtool_check.rc != 0 + + - name: Reload Prometheus + systemd: + name: prometheus + state: reloaded + when: promtool_check.rc == 0 diff --git a/playbooks/deploy-app3-blackbox.yml b/playbooks/deploy-app3-blackbox.yml new file mode 100644 index 0000000..0f18243 --- /dev/null +++ b/playbooks/deploy-app3-blackbox.yml @@ -0,0 +1,16 @@ +--- +- name: Deploy Blackbox Exporter on App3 + hosts: 192.168.0.112 + become: yes + gather_facts: yes + + pre_tasks: + - name: Ensure Docker is installed + include_role: + name: docker + apply: + tags: docker + + roles: + - role: blackbox_exporter + tags: blackbox diff --git a/playbooks/deploy-app3-cadvisor.yml b/playbooks/deploy-app3-cadvisor.yml new file mode 100644 index 0000000..bd21d3c --- /dev/null +++ b/playbooks/deploy-app3-cadvisor.yml @@ -0,0 +1,16 @@ +--- +- name: Deploy cAdvisor on App3 + hosts: 192.168.0.112 + become: yes + gather_facts: yes + + pre_tasks: + - name: Ensure Docker is installed + include_role: + name: docker + apply: + tags: docker + + roles: + - role: cadvisor + tags: cadvisor diff --git a/playbooks/deploy-app3-docker.yml b/playbooks/deploy-app3-docker.yml new file mode 100644 index 0000000..ca08483 --- /dev/null +++ b/playbooks/deploy-app3-docker.yml @@ -0,0 +1,9 @@ +--- +- name: Deploy Docker on App3 + hosts: 192.168.0.112 + become: yes + gather_facts: yes + + roles: + - role: docker + tags: docker diff --git a/playbooks/deploy-app3-httpbin.yml b/playbooks/deploy-app3-httpbin.yml new file mode 100644 index 0000000..e440f5e --- /dev/null +++ b/playbooks/deploy-app3-httpbin.yml @@ -0,0 +1,16 @@ +--- +- name: Deploy httpbin on App3 + hosts: 192.168.0.112 + become: yes + gather_facts: yes + + pre_tasks: + - name: Ensure Docker is installed + include_role: + name: docker + apply: + tags: docker + + roles: + - role: httpbin + tags: httpbin diff --git a/roles/blackbox_exporter/defaults/main.yml b/roles/blackbox_exporter/defaults/main.yml new file mode 100644 index 0000000..700c480 --- /dev/null +++ b/roles/blackbox_exporter/defaults/main.yml @@ -0,0 +1,126 @@ +--- +# Blackbox Exporter configuration +blackbox_version: "latest" +blackbox_port: 8083 +blackbox_image: "prom/blackbox-exporter:{{ blackbox_version }}" +blackbox_container_name: "blackbox-exporter" + +# Все цели для мониторинга из ИП и твоего списка +blackbox_targets: + # Основные сервисы стенда (из ИП) + - name: "app1-nginx" + url: "http://192.168.0.110/" + module: "http_2xx" + + - name: "app2-postgresql" + url: "http://192.168.0.111:9187/metrics" # postgres_exporter + module: "http_2xx" + + - name: "app3-httpbin" + url: "http://192.168.0.112:8080/get" + module: "http_2xx" + + - name: "app3-cadvisor" + url: "http://192.168.0.112:8081/metrics" + module: "http_2xx" + + - name: "git-forgejo" + url: "http://192.168.0.100:3000/" + module: "http_2xx" + + - name: "ansible" + url: "http://192.168.0.101:9100/metrics" # node_exporter + module: "http_2xx" + + - name: "vault" + url: "http://192.168.0.103:8200/ui/" + module: "http_2xx" + + - name: "victoriametrics" + url: "http://192.168.0.104:8428/metrics" + module: "http_2xx" + + - name: "prometheus" + url: "http://192.168.0.105:9090/metrics" + module: "http_2xx" + + - name: "grafana" + url: "http://192.168.0.106:3000" + module: "http_2xx" + + # Домены из твоего списка + - name: "wiki-pvenode" + url: "http://wiki.pvenode.ru/" + module: "http_2xx" + + - name: "victoria-pvenode" + url: "http://victoria.pvenode.ru/" + module: "http_2xx" + + - name: "vault-pvenode" + url: "http://vault.pvenode.ru/" + module: "http_2xx" + + - name: "tasks-pvenode" + url: "http://tasks.pvenode.ru/" + module: "http_2xx" + + - name: "python-pvenode" + url: "http://python.pvenode.ru/" + module: "http_2xx" + + - name: "pvenode-main" + url: "http://pvenode.ru/" + module: "http_2xx" + + - name: "proxmox-pvenode" + url: "http://proxmox.pvenode.ru/" + module: "http_2xx" + + - name: "prometheus-pvenode" + url: "http://prometheus.pvenode.ru/" + module: "http_2xx" + + - name: "postgre-pvenode" + url: "http://postgre.pvenode.ru/" + module: "http_2xx" + + - name: "ovpn-pvenode" + url: "http://ovpn.pvenode.ru/" + module: "http_2xx" + + - name: "nginxpm-pvenode" + url: "http://nginxpm.pvenode.ru/" + module: "http_2xx" + + - name: "nextcloud-pvenode" + url: "http://nextcloud.pvenode.ru/" + module: "http_2xx" + + - name: "money-pvenode" + url: "http://money.pvenode.ru/" + module: "http_2xx" + + - name: "grafana-pvenode" + url: "http://grafana.pvenode.ru/" + module: "http_2xx" + + - name: "gitlab-pvenode" + url: "http://gitlab.pvenode.ru/" + module: "http_2xx" + + - name: "forgejo-pvenode" + url: "http://forgejo.pvenode.ru/" + module: "http_2xx" + + - name: "bitwarden-pvenode" + url: "http://bitwarden.pvenode.ru/" + module: "http_2xx" + + - name: "app1-pvenode" + url: "http://app1.pvenode.ru/" + module: "http_2xx" + + - name: "ansible-pvenode" + url: "http://ansimble.pvenode.ru/" + module: "http_2xx" diff --git a/roles/blackbox_exporter/files/blackbox.yml b/roles/blackbox_exporter/files/blackbox.yml new file mode 100644 index 0000000..ad6c949 --- /dev/null +++ b/roles/blackbox_exporter/files/blackbox.yml @@ -0,0 +1,42 @@ +modules: + # HTTP проверка (2xx статус) + http_2xx: + prober: http + timeout: 10s + http: + valid_status_codes: [200, 301, 302, 403] + method: GET + preferred_ip_protocol: "ip4" + follow_redirects: true + fail_if_ssl: false + fail_if_not_ssl: false + tls_config: + insecure_skip_verify: true # для тестового стенда + + # HTTP POST проверка + http_post_2xx: + prober: http + http: + method: POST + preferred_ip_protocol: "ip4" + + # TCP подключение + tcp_connect: + prober: tcp + timeout: 5s + + # SSL проверки (можно добавить позже) + ssl_check: + prober: http + http: + fail_if_not_ssl: true + tls_config: + insecure_skip_verify: false + preferred_ip_protocol: "ip4" + + # ICMP (ping) + icmp_check: + prober: icmp + timeout: 5s + icmp: + preferred_ip_protocol: "ip4" diff --git a/roles/blackbox_exporter/tasks/main.yml b/roles/blackbox_exporter/tasks/main.yml new file mode 100644 index 0000000..e23f0c1 --- /dev/null +++ b/roles/blackbox_exporter/tasks/main.yml @@ -0,0 +1,58 @@ +--- +- name: Create directory for blackbox config + file: + path: /etc/blackbox_exporter + state: directory + mode: '0755' + tags: blackbox + +- name: Copy blackbox configuration + copy: + src: files/blackbox.yml + dest: /etc/blackbox_exporter/config.yml + mode: '0644' + tags: blackbox + +- name: Ensure blackbox-exporter container is running + community.docker.docker_container: + name: "{{ blackbox_container_name }}" + image: "{{ blackbox_image }}" + state: started + restart_policy: unless-stopped + ports: + - "{{ blackbox_port }}:9115" + volumes: + - "/etc/blackbox_exporter/config.yml:/etc/blackbox_exporter/config.yml" + command: + - "--config.file=/etc/blackbox_exporter/config.yml" + - "--web.listen-address=:9115" + tags: blackbox + +- name: Configure UFW for blackbox-exporter + ufw: + rule: allow + port: "{{ blackbox_port }}" + proto: tcp + comment: "Blackbox Exporter" + tags: blackbox + +- name: Wait for blackbox-exporter to be ready + wait_for: + port: "{{ blackbox_port }}" + host: "{{ ansible_host }}" + delay: 2 + timeout: 60 + tags: blackbox + +- name: Test blackbox-exporter with local target + uri: + url: "http://{{ ansible_host }}:{{ blackbox_port }}/probe?target=http://192.168.0.112:8080/get&module=http_2xx" + return_content: true + status_code: 200 + register: blackbox_test + tags: blackbox + +- name: Show blackbox-exporter status + debug: + msg: "Blackbox Exporter deployed at http://{{ ansible_host }}:{{ blackbox_port }}/" + tags: blackbox diff --git a/roles/cadvisor/defaults/main.yml b/roles/cadvisor/defaults/main.yml new file mode 100644 index 0000000..5b69442 --- /dev/null +++ b/roles/cadvisor/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# cAdvisor configuration +cadvisor_version: "latest" +cadvisor_port: 8081 +cadvisor_image: "gcr.io/cadvisor/cadvisor:{{ cadvisor_version }}" +cadvisor_container_name: "cadvisor" diff --git a/roles/cadvisor/tasks/main.yml b/roles/cadvisor/tasks/main.yml new file mode 100644 index 0000000..6fa7bbe --- /dev/null +++ b/roles/cadvisor/tasks/main.yml @@ -0,0 +1,51 @@ +--- +- name: Ensure cAdvisor container is running + community.docker.docker_container: + name: "{{ cadvisor_container_name }}" + image: "{{ cadvisor_image }}" + state: started + restart_policy: unless-stopped + ports: + - "{{ cadvisor_port }}:8080" + volumes: + - "/:/rootfs:ro" + - "/var/run:/var/run:ro" + - "/sys:/sys:ro" + - "/var/lib/docker/:/var/lib/docker:ro" + - "/dev/disk/:/dev/disk:ro" + privileged: true + devices: + - "/dev/kmsg:/dev/kmsg" + tags: cadvisor + +- name: Configure UFW for cAdvisor + ufw: + rule: allow + port: "{{ cadvisor_port }}" + proto: tcp + comment: "cAdvisor metrics" + tags: cadvisor + +- name: Wait for cAdvisor to be ready + wait_for: + port: "{{ cadvisor_port }}" + host: "{{ ansible_host }}" + delay: 2 + timeout: 60 + tags: cadvisor + +- name: Verify cAdvisor is accessible + uri: + url: "http://{{ ansible_host }}:{{ cadvisor_port }}/metrics" + return_content: true + status_code: 200 + register: cadvisor_check + until: cadvisor_check.status == 200 + retries: 5 + delay: 3 + tags: cadvisor + +- name: Show cAdvisor status + debug: + msg: "cAdvisor successfully deployed at http://{{ ansible_host }}:{{ cadvisor_port }}/metrics" + tags: cadvisor diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml new file mode 100644 index 0000000..f638b55 --- /dev/null +++ b/roles/docker/defaults/main.yml @@ -0,0 +1,14 @@ +--- +# Docker configuration +docker_compose_version: "v2.27.0" +docker_compose_install_path: "/usr/local/bin/docker-compose" + +# Ports for App3 services (для информации, будут использоваться в других ролях) +app3_service_ports: + httpbin: 8080 + cadvisor: 8081 + alertmanager: 8082 + blackbox_exporter: 8083 + loki: 8084 + wordpress: 8085 + mysql: 3306 # internal port diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml new file mode 100644 index 0000000..f56c671 --- /dev/null +++ b/roles/docker/tasks/main.yml @@ -0,0 +1,62 @@ +--- +- name: Install prerequisites for Docker + apt: + name: + - curl + - gnupg + - ca-certificates + - lsb-release + state: present + update_cache: yes + tags: docker + +- name: Install Docker using official script + shell: | + curl -fsSL https://get.docker.com -o /tmp/get-docker.sh + sh /tmp/get-docker.sh + rm /tmp/get-docker.sh + args: + creates: /usr/bin/docker + tags: docker + +- name: Install Docker Compose + get_url: + url: "https://github.com/docker/compose/releases/download/{{ docker_compose_version }}/docker-compose-linux-x86_64" + dest: "{{ docker_compose_install_path }}" + mode: '0755' + timeout: 30 + tags: docker + +- name: Start and enable Docker service + systemd: + name: docker + state: started + enabled: yes + daemon_reload: yes + tags: docker + +- name: Add admin user to docker group + user: + name: admin + groups: docker + append: yes + tags: docker + +- name: Verify Docker installation + command: docker --version + register: docker_version + changed_when: false + tags: docker + +- name: Verify Docker Compose installation + command: docker-compose --version + register: docker_compose_version + changed_when: false + tags: docker + +- name: Show installation results + debug: + msg: + - "Docker: {{ docker_version.stdout }}" + - "Docker Compose: {{ docker_compose_version.stdout }}" + tags: docker diff --git a/roles/httpbin/defaults/main.yml b/roles/httpbin/defaults/main.yml new file mode 100644 index 0000000..c71eaa1 --- /dev/null +++ b/roles/httpbin/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# httpbin configuration +httpbin_port: 8080 +httpbin_image: "kennethreitz/httpbin" +httpbin_container_name: "httpbin" diff --git a/roles/httpbin/tasks/main.yml b/roles/httpbin/tasks/main.yml new file mode 100644 index 0000000..c3738c7 --- /dev/null +++ b/roles/httpbin/tasks/main.yml @@ -0,0 +1,42 @@ +--- +- name: Ensure httpbin container is running + community.docker.docker_container: + name: "{{ httpbin_container_name }}" + image: "{{ httpbin_image }}" + state: started + restart_policy: unless-stopped + ports: + - "{{ httpbin_port }}:80" + tags: httpbin + +- name: Configure UFW for httpbin + ufw: + rule: allow + port: "{{ httpbin_port }}" + proto: tcp + comment: "httpbin API" + tags: httpbin + +- name: Wait for httpbin to be ready + wait_for: + port: "{{ httpbin_port }}" + host: "{{ ansible_host }}" + delay: 2 + timeout: 60 + tags: httpbin + +- name: Verify httpbin is accessible + uri: + url: "http://{{ ansible_host }}:{{ httpbin_port }}/get" + return_content: true + status_code: 200 + register: httpbin_check + until: httpbin_check.status == 200 + retries: 5 + delay: 3 + tags: httpbin + +- name: Show httpbin status + debug: + msg: "httpbin successfully deployed at http://{{ ansible_host }}:{{ httpbin_port }}/" + tags: httpbin