diff --git a/Dockerfile b/Dockerfile index 1b54ecb..a3cb6ff 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,6 +7,7 @@ COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/uv # Install system dependencies (Required for Postgres and Healthchecks) RUN apt-get update && apt-get install -y \ libpq5 \ + postgresql-client \ curl \ && rm -rf /var/lib/apt/lists/* diff --git a/docker-compose.yaml b/docker-compose.yaml index b279e0e..23745d4 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -1,6 +1,6 @@ services: timescale_db: - image: timescale/timescaledb-ha:pg16-ts2.15-oss + image: timescale/timescaledb-ha:pg16-ts2.15 restart: always environment: - POSTGRES_DB=${POSTGRES_DB} diff --git a/run_migrations.py b/run_migrations.py index f4d13a6..84e3b39 100644 --- a/run_migrations.py +++ b/run_migrations.py @@ -1,11 +1,13 @@ """ -run_migrations.py — Idempotent SQL migration runner for Docker init service. -Executes each .sql migration file in order using psycopg2. -Tolerates re-run errors (e.g. "policy already exists") so deploys are safe. +run_migrations.py — Idempotent SQL migration runner for Docker init. +Uses psql (not psycopg2) so each statement runs independently — +one error doesn't roll back the entire file. """ import os +import subprocess import sys + import psycopg2 DATABASE_URL = os.environ["DATABASE_URL"] @@ -15,51 +17,71 @@ MIGRATIONS = [ "03_webhook_schema_migration.sql", ] +CRITICAL_TABLES = [ + "tracksolid.devices", + "tracksolid.api_token_cache", + "tracksolid.ingestion_log", + "tracksolid.live_positions", + "tracksolid.position_history", + "tracksolid.trips", + "tracksolid.alarms", + "tracksolid.obd_readings", +] -def run_file(conn, path, filename): - """Execute a SQL file. Returns True on success, False on error.""" - with open(path) as f: - sql = f.read() - try: - with conn.cursor() as cur: - cur.execute(sql) - print(f" OK: {filename}") - return True - except psycopg2.Error as e: - msg = (e.pgerror or str(e)).strip().split("\n")[0] - print(f" WARN: {filename}: {msg}") - # Connection is now in error state — must reset - conn.close() + +def run_file(path, filename): + """Execute a SQL file via psql. Returns True on success.""" + print(f"Running {filename}...") + result = subprocess.run( + ["psql", DATABASE_URL, "-f", path], + capture_output=True, text=True, + ) + # psql prints errors to stderr but continues by default + errors = [l for l in result.stderr.splitlines() if "ERROR:" in l] + if errors: + for e in errors: + print(f" WARN: {e.strip()}") return False + print(f" OK: {filename}") + return True + + +def verify_schema(): + """Verify critical tables exist. Exit 1 if not — prevents services from starting.""" + print("Verifying schema...") + conn = psycopg2.connect(DATABASE_URL) + cur = conn.cursor() + missing = [] + for table in CRITICAL_TABLES: + schema, name = table.split(".") + cur.execute( + "SELECT 1 FROM information_schema.tables WHERE table_schema=%s AND table_name=%s", + (schema, name), + ) + if not cur.fetchone(): + missing.append(table) + cur.close() + conn.close() + + if missing: + print(f"FATAL: Missing critical tables: {', '.join(missing)}") + print("Schema bootstrap failed. Services cannot start.") + sys.exit(1) + print(" All critical tables verified.") def main(): print("=== Database Migration Runner ===") - conn = psycopg2.connect(DATABASE_URL) - conn.autocommit = True - warnings = 0 for sql_file in MIGRATIONS: path = os.path.join("/app", sql_file) if not os.path.exists(path): print(f" SKIP: {sql_file} (not found)") continue + run_file(path, sql_file) - print(f"Running {sql_file}...") - ok = run_file(conn, path, sql_file) - if not ok: - warnings += 1 - # Reconnect for the next file - conn = psycopg2.connect(DATABASE_URL) - conn.autocommit = True - - conn.close() - - if warnings: - print(f"Completed with {warnings} warning(s) (expected on re-deploy).") - else: - print("All migrations applied cleanly.") - sys.exit(0) + verify_schema() + print("Migrations complete.") if __name__ == "__main__":