Add db_migrate init service to auto-run SQL schema on deploy
- New run_migrations.py: executes 02_*.sql and 03_*.sql in order - New db_migrate service: runs once before all other services start - All services now depend on db_migrate (service_completed_successfully) - Tolerates re-deploy: catches errors from already-existing objects Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
parent
b59616c7aa
commit
4a31de30b1
2 changed files with 88 additions and 11 deletions
|
|
@ -14,6 +14,17 @@ services:
|
|||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
db_migrate:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
command: python run_migrations.py
|
||||
depends_on:
|
||||
timescale_db:
|
||||
condition: service_healthy
|
||||
env_file: .env
|
||||
restart: "no"
|
||||
|
||||
ingest_movement:
|
||||
build:
|
||||
context: .
|
||||
|
|
@ -21,9 +32,9 @@ services:
|
|||
command: python ingest_movement_rev.py
|
||||
restart: always
|
||||
depends_on:
|
||||
timescale_db:
|
||||
condition: service_healthy
|
||||
env_file: .env # Coolify will inject variables here
|
||||
db_migrate:
|
||||
condition: service_completed_successfully
|
||||
env_file: .env
|
||||
|
||||
ingest_events:
|
||||
build:
|
||||
|
|
@ -32,8 +43,8 @@ services:
|
|||
command: python ingest_events_rev.py
|
||||
restart: always
|
||||
depends_on:
|
||||
timescale_db:
|
||||
condition: service_healthy
|
||||
db_migrate:
|
||||
condition: service_completed_successfully
|
||||
env_file: .env
|
||||
|
||||
webhook_receiver:
|
||||
|
|
@ -43,8 +54,8 @@ services:
|
|||
command: uvicorn webhook_receiver_rev:app --host 0.0.0.0 --port 8000 --workers 2
|
||||
restart: always
|
||||
depends_on:
|
||||
timescale_db:
|
||||
condition: service_healthy
|
||||
db_migrate:
|
||||
condition: service_completed_successfully
|
||||
env_file: .env
|
||||
# No host port binding — Coolify's Traefik proxy routes traffic internally.
|
||||
# Set the webhook domain in Coolify UI pointing to this service on port 8000.
|
||||
|
|
@ -58,18 +69,18 @@ services:
|
|||
image: grafana/grafana:11.0.0
|
||||
restart: always
|
||||
depends_on:
|
||||
timescale_db:
|
||||
condition: service_healthy
|
||||
db_migrate:
|
||||
condition: service_completed_successfully
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_ADMIN_PASSWORD}
|
||||
volumes:
|
||||
- grafana-data:/var/lib/grafana
|
||||
# COOLIFY DOMAIN LOGIC:
|
||||
# You will set the actual URL in the Coolify UI,
|
||||
# You will set the actual URL in the Coolify UI,
|
||||
# but the service needs to expose port 3000 internally.
|
||||
|
||||
volumes:
|
||||
timescale-data:
|
||||
name: timescale-data
|
||||
grafana-data:
|
||||
name: grafana-data
|
||||
name: grafana-data
|
||||
|
|
|
|||
66
run_migrations.py
Normal file
66
run_migrations.py
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
"""
|
||||
run_migrations.py — Idempotent SQL migration runner for Docker init service.
|
||||
Executes each .sql migration file in order using psycopg2.
|
||||
Tolerates re-run errors (e.g. "policy already exists") so deploys are safe.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import psycopg2
|
||||
|
||||
DATABASE_URL = os.environ["DATABASE_URL"]
|
||||
|
||||
MIGRATIONS = [
|
||||
"02_tracksolid_full_schema_rev.sql",
|
||||
"03_webhook_schema_migration.sql",
|
||||
]
|
||||
|
||||
|
||||
def run_file(conn, path, filename):
|
||||
"""Execute a SQL file. Returns True on success, False on error."""
|
||||
with open(path) as f:
|
||||
sql = f.read()
|
||||
try:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(sql)
|
||||
print(f" OK: {filename}")
|
||||
return True
|
||||
except psycopg2.Error as e:
|
||||
msg = (e.pgerror or str(e)).strip().split("\n")[0]
|
||||
print(f" WARN: {filename}: {msg}")
|
||||
# Connection is now in error state — must reset
|
||||
conn.close()
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
print("=== Database Migration Runner ===")
|
||||
conn = psycopg2.connect(DATABASE_URL)
|
||||
conn.autocommit = True
|
||||
|
||||
warnings = 0
|
||||
for sql_file in MIGRATIONS:
|
||||
path = os.path.join("/app", sql_file)
|
||||
if not os.path.exists(path):
|
||||
print(f" SKIP: {sql_file} (not found)")
|
||||
continue
|
||||
|
||||
print(f"Running {sql_file}...")
|
||||
ok = run_file(conn, path, sql_file)
|
||||
if not ok:
|
||||
warnings += 1
|
||||
# Reconnect for the next file
|
||||
conn = psycopg2.connect(DATABASE_URL)
|
||||
conn.autocommit = True
|
||||
|
||||
conn.close()
|
||||
|
||||
if warnings:
|
||||
print(f"Completed with {warnings} warning(s) (expected on re-deploy).")
|
||||
else:
|
||||
print("All migrations applied cleanly.")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Loading…
Reference in a new issue