The timescale/timescaledb-ha image uses /home/postgres/pgdata/data as PGDATA, not /var/lib/postgresql/data. The previous mount pointed at an empty directory that postgres never wrote to, so Coolify redeploys destroyed all data with the container's overlay filesystem. Pin PGDATA explicitly and move the named timescale-data volume to /home/postgres/pgdata so the real data dir is persisted. Co-Authored-By: Claude Opus 4.7 <noreply@anthropic.com>
106 lines
3.2 KiB
YAML
106 lines
3.2 KiB
YAML
services:
|
|
timescale_db:
|
|
image: timescale/timescaledb-ha:pg16-ts2.15
|
|
restart: always
|
|
environment:
|
|
- POSTGRES_DB=${POSTGRES_DB}
|
|
- POSTGRES_USER=${POSTGRES_USER}
|
|
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
|
# HA image's PGDATA is /home/postgres/pgdata/data, not /var/lib/postgresql/data.
|
|
# Mount the named volume there so data survives container rebuilds.
|
|
- PGDATA=/home/postgres/pgdata/data
|
|
ports:
|
|
- "5433:5432"
|
|
volumes:
|
|
- timescale-data:/home/postgres/pgdata
|
|
healthcheck:
|
|
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"]
|
|
interval: 10s
|
|
timeout: 5s
|
|
retries: 5
|
|
|
|
ingest_movement:
|
|
build:
|
|
context: .
|
|
dockerfile: Dockerfile
|
|
command: sh -c "python run_migrations.py && python ingest_movement_rev.py"
|
|
restart: always
|
|
depends_on:
|
|
timescale_db:
|
|
condition: service_healthy
|
|
env_file: .env
|
|
|
|
ingest_events:
|
|
build:
|
|
context: .
|
|
dockerfile: Dockerfile
|
|
command: sh -c "python run_migrations.py && python ingest_events_rev.py"
|
|
restart: always
|
|
depends_on:
|
|
timescale_db:
|
|
condition: service_healthy
|
|
env_file: .env
|
|
|
|
webhook_receiver:
|
|
build:
|
|
context: .
|
|
dockerfile: Dockerfile
|
|
command: sh -c "python run_migrations.py && uvicorn webhook_receiver_rev:app --host 0.0.0.0 --port 8888 --workers 2"
|
|
restart: always
|
|
depends_on:
|
|
timescale_db:
|
|
condition: service_healthy
|
|
env_file: .env
|
|
# No host port binding — Coolify's Traefik proxy routes traffic internally.
|
|
# Set the webhook domain in Coolify UI pointing to this service on port 8888.
|
|
healthcheck:
|
|
test: ["CMD", "curl", "-f", "http://localhost:8888/health"]
|
|
interval: 30s
|
|
timeout: 5s
|
|
retries: 3
|
|
|
|
grafana:
|
|
build:
|
|
context: ./grafana
|
|
dockerfile: Dockerfile
|
|
restart: always
|
|
depends_on:
|
|
timescale_db:
|
|
condition: service_healthy
|
|
env_file: .env
|
|
environment:
|
|
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD}
|
|
- GF_USERS_DEFAULT_THEME=dark
|
|
- GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH=/etc/grafana/provisioning/dashboards-json/noc_fleet_dashboard.json
|
|
volumes:
|
|
- grafana-data:/var/lib/grafana
|
|
# Provisioning is baked into the image via grafana/Dockerfile — no bind mount needed.
|
|
# COOLIFY DOMAIN LOGIC:
|
|
# You will set the actual URL in the Coolify UI,
|
|
# but the service needs to expose port 3000 internally.
|
|
|
|
db_backup:
|
|
build:
|
|
context: ./backup
|
|
dockerfile: Dockerfile
|
|
restart: always
|
|
depends_on:
|
|
timescale_db:
|
|
condition: service_healthy
|
|
env_file: .env
|
|
environment:
|
|
# Nightly pg_dump → rustfs. Credentials from .env (RUSTFS_*).
|
|
- BACKUP_HOUR=${BACKUP_HOUR:-2}
|
|
- BACKUP_MINUTE=${BACKUP_MINUTE:-30}
|
|
- BACKUP_KEEP_DAYS=${BACKUP_KEEP_DAYS:-30}
|
|
- BACKUP_RUN_ON_START=${BACKUP_RUN_ON_START:-0}
|
|
- RUSTFS_ENDPOINT=${RUSTFS_ENDPOINT}
|
|
- RUSTFS_ACCESS_KEY=${RUSTFS_ACCESS_KEY}
|
|
- RUSTFS_SECRET_KEY=${RUSTFS_SECRET_KEY}
|
|
- RUSTFS_BUCKET=${RUSTFS_BUCKET:-fleet-db}
|
|
|
|
volumes:
|
|
timescale-data:
|
|
name: timescale-data
|
|
grafana-data:
|
|
name: grafana-data
|