services: timescale_db: image: timescale/timescaledb-ha:pg16-ts2.15 restart: always environment: - POSTGRES_DB=${POSTGRES_DB} - POSTGRES_USER=${POSTGRES_USER} - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} volumes: - timescale-data:/var/lib/postgresql/data healthcheck: test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"] interval: 10s timeout: 5s retries: 5 ingest_movement: build: context: . dockerfile: Dockerfile command: sh -c "python run_migrations.py && python ingest_movement_rev.py" restart: always depends_on: timescale_db: condition: service_healthy env_file: .env ingest_events: build: context: . dockerfile: Dockerfile command: sh -c "python run_migrations.py && python ingest_events_rev.py" restart: always depends_on: timescale_db: condition: service_healthy env_file: .env webhook_receiver: build: context: . dockerfile: Dockerfile command: sh -c "python run_migrations.py && uvicorn webhook_receiver_rev:app --host 0.0.0.0 --port 8888 --workers 2" restart: always depends_on: timescale_db: condition: service_healthy env_file: .env # No host port binding — Coolify's Traefik proxy routes traffic internally. # Set the webhook domain in Coolify UI pointing to this service on port 8888. healthcheck: test: ["CMD", "curl", "-f", "http://localhost:8888/health"] interval: 30s timeout: 5s retries: 3 grafana: build: context: ./grafana dockerfile: Dockerfile restart: always depends_on: timescale_db: condition: service_healthy env_file: .env environment: - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD} - GF_USERS_DEFAULT_THEME=dark - GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH=/etc/grafana/provisioning/dashboards-json/noc_fleet_dashboard.json volumes: - grafana-data:/var/lib/grafana # Provisioning is baked into the image via grafana/Dockerfile — no bind mount needed. # COOLIFY DOMAIN LOGIC: # You will set the actual URL in the Coolify UI, # but the service needs to expose port 3000 internally. volumes: timescale-data: name: timescale-data grafana-data: name: grafana-data