Replace the single BACKUP_HOUR/BACKUP_MINUTE slot with a comma-separated
list of UTC times. Scheduler walks all slots and sleeps until the soonest
future one, so four daily backups become a one-line env change:
BACKUP_TIMES_UTC=02:30,08:30,14:30,20:30 (default)
Legacy BACKUP_HOUR/BACKUP_MINUTE still honored as a single slot for
backwards compatibility with existing .env files.
Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
106 lines
3.2 KiB
YAML
106 lines
3.2 KiB
YAML
services:
|
||
timescale_db:
|
||
image: timescale/timescaledb-ha:pg16-ts2.15
|
||
restart: always
|
||
environment:
|
||
- POSTGRES_DB=${POSTGRES_DB}
|
||
- POSTGRES_USER=${POSTGRES_USER}
|
||
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
||
# HA image's PGDATA is /home/postgres/pgdata/data, not /var/lib/postgresql/data.
|
||
# Mount the named volume there so data survives container rebuilds.
|
||
- PGDATA=/home/postgres/pgdata/data
|
||
ports:
|
||
- "5433:5432"
|
||
volumes:
|
||
- timescale-data:/home/postgres/pgdata
|
||
healthcheck:
|
||
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"]
|
||
interval: 10s
|
||
timeout: 5s
|
||
retries: 5
|
||
|
||
ingest_movement:
|
||
build:
|
||
context: .
|
||
dockerfile: Dockerfile
|
||
command: sh -c "python run_migrations.py && python ingest_movement_rev.py"
|
||
restart: always
|
||
depends_on:
|
||
timescale_db:
|
||
condition: service_healthy
|
||
env_file: .env
|
||
|
||
ingest_events:
|
||
build:
|
||
context: .
|
||
dockerfile: Dockerfile
|
||
command: sh -c "python run_migrations.py && python ingest_events_rev.py"
|
||
restart: always
|
||
depends_on:
|
||
timescale_db:
|
||
condition: service_healthy
|
||
env_file: .env
|
||
|
||
webhook_receiver:
|
||
build:
|
||
context: .
|
||
dockerfile: Dockerfile
|
||
command: sh -c "python run_migrations.py && uvicorn webhook_receiver_rev:app --host 0.0.0.0 --port 8888 --workers 2"
|
||
restart: always
|
||
depends_on:
|
||
timescale_db:
|
||
condition: service_healthy
|
||
env_file: .env
|
||
# No host port binding — Coolify's Traefik proxy routes traffic internally.
|
||
# Set the webhook domain in Coolify UI pointing to this service on port 8888.
|
||
healthcheck:
|
||
test: ["CMD", "curl", "-f", "http://localhost:8888/health"]
|
||
interval: 30s
|
||
timeout: 5s
|
||
retries: 3
|
||
|
||
grafana:
|
||
build:
|
||
context: ./grafana
|
||
dockerfile: Dockerfile
|
||
restart: always
|
||
depends_on:
|
||
timescale_db:
|
||
condition: service_healthy
|
||
env_file: .env
|
||
environment:
|
||
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD}
|
||
- GF_USERS_DEFAULT_THEME=dark
|
||
- GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH=/etc/grafana/provisioning/dashboards-json/noc_fleet_dashboard.json
|
||
volumes:
|
||
- grafana-data:/var/lib/grafana
|
||
# Provisioning is baked into the image via grafana/Dockerfile — no bind mount needed.
|
||
# COOLIFY DOMAIN LOGIC:
|
||
# You will set the actual URL in the Coolify UI,
|
||
# but the service needs to expose port 3000 internally.
|
||
|
||
db_backup:
|
||
build:
|
||
context: ./backup
|
||
dockerfile: Dockerfile
|
||
restart: always
|
||
depends_on:
|
||
timescale_db:
|
||
condition: service_healthy
|
||
env_file: .env
|
||
environment:
|
||
# pg_dump → rustfs. Credentials from .env (RUSTFS_*).
|
||
# BACKUP_TIMES_UTC: comma-separated HH:MM list. Default: 4×/day.
|
||
- BACKUP_TIMES_UTC=${BACKUP_TIMES_UTC:-02:30,08:30,14:30,20:30}
|
||
- BACKUP_KEEP_DAYS=${BACKUP_KEEP_DAYS:-30}
|
||
- BACKUP_RUN_ON_START=${BACKUP_RUN_ON_START:-0}
|
||
- RUSTFS_ENDPOINT=${RUSTFS_ENDPOINT}
|
||
- RUSTFS_ACCESS_KEY=${RUSTFS_ACCESS_KEY}
|
||
- RUSTFS_SECRET_KEY=${RUSTFS_SECRET_KEY}
|
||
- RUSTFS_BUCKET=${RUSTFS_BUCKET:-fleet-db}
|
||
|
||
volumes:
|
||
timescale-data:
|
||
name: timescale-data
|
||
grafana-data:
|
||
name: grafana-data
|