2026-04-08 14:02:09 +00:00
|
|
|
"""
|
2026-04-08 14:17:58 +00:00
|
|
|
run_migrations.py — Idempotent SQL migration runner for Docker init.
|
2026-04-10 20:34:57 +00:00
|
|
|
|
|
|
|
|
Runs automatically on every container startup via docker-compose command:
|
|
|
|
|
sh -c "python run_migrations.py && python <service>.py"
|
|
|
|
|
|
|
|
|
|
How it works:
|
|
|
|
|
1. Creates tracksolid.schema_migrations table on first run.
|
|
|
|
|
2. Skips any migration already recorded in that table.
|
|
|
|
|
3. Applies pending migrations in filename order.
|
|
|
|
|
4. Records each successful migration so it never runs twice.
|
|
|
|
|
5. Verifies critical tables exist before allowing the service to start.
|
|
|
|
|
|
|
|
|
|
To add a new migration: create NN_description.sql in the repo and add
|
|
|
|
|
the filename to MIGRATIONS below. Coolify will apply it on next deploy.
|
2026-04-08 14:02:09 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
import os
|
2026-04-08 14:17:58 +00:00
|
|
|
import subprocess
|
2026-04-08 14:02:09 +00:00
|
|
|
import sys
|
2026-04-08 14:17:58 +00:00
|
|
|
|
2026-04-08 14:02:09 +00:00
|
|
|
import psycopg2
|
|
|
|
|
|
|
|
|
|
DATABASE_URL = os.environ["DATABASE_URL"]
|
|
|
|
|
|
2026-04-10 20:34:57 +00:00
|
|
|
# ── Add new migration filenames here in order ─────────────────────────────────
|
2026-04-08 14:02:09 +00:00
|
|
|
MIGRATIONS = [
|
|
|
|
|
"02_tracksolid_full_schema_rev.sql",
|
|
|
|
|
"03_webhook_schema_migration.sql",
|
2026-04-10 20:34:57 +00:00
|
|
|
"04_bug_fix_migration.sql", # distance_m → distance_km rename + correction
|
|
|
|
|
"05_enhancement_migration.sql", # new tables, OBD columns, dwh_gold expansion
|
2026-04-08 14:02:09 +00:00
|
|
|
]
|
|
|
|
|
|
2026-04-10 20:34:57 +00:00
|
|
|
# ── Tables that must exist before the service is allowed to start ─────────────
|
2026-04-08 14:17:58 +00:00
|
|
|
CRITICAL_TABLES = [
|
|
|
|
|
"tracksolid.devices",
|
|
|
|
|
"tracksolid.api_token_cache",
|
|
|
|
|
"tracksolid.ingestion_log",
|
|
|
|
|
"tracksolid.live_positions",
|
|
|
|
|
"tracksolid.position_history",
|
|
|
|
|
"tracksolid.trips",
|
|
|
|
|
"tracksolid.alarms",
|
|
|
|
|
"tracksolid.obd_readings",
|
2026-04-10 20:34:57 +00:00
|
|
|
"tracksolid.device_events",
|
|
|
|
|
"tracksolid.fuel_readings",
|
|
|
|
|
"tracksolid.temperature_readings",
|
|
|
|
|
"tracksolid.lbs_readings",
|
|
|
|
|
"tracksolid.geofences",
|
2026-04-08 14:17:58 +00:00
|
|
|
]
|
|
|
|
|
|
2026-04-08 14:02:09 +00:00
|
|
|
|
2026-04-10 20:34:57 +00:00
|
|
|
def get_conn():
|
|
|
|
|
return psycopg2.connect(DATABASE_URL)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def ensure_tracking_table(conn):
|
2026-04-10 20:40:32 +00:00
|
|
|
"""Create schema and schema_migrations tracking table if they don't exist."""
|
2026-04-10 20:34:57 +00:00
|
|
|
with conn.cursor() as cur:
|
2026-04-10 20:40:32 +00:00
|
|
|
# Schema may not exist yet on a fresh DB (migration 02 creates it,
|
|
|
|
|
# but we need it before we can create the tracking table).
|
|
|
|
|
cur.execute("CREATE SCHEMA IF NOT EXISTS tracksolid")
|
2026-04-10 20:34:57 +00:00
|
|
|
cur.execute("""
|
|
|
|
|
CREATE TABLE IF NOT EXISTS tracksolid.schema_migrations (
|
|
|
|
|
filename TEXT PRIMARY KEY,
|
|
|
|
|
applied_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
|
|
|
|
)
|
|
|
|
|
""")
|
|
|
|
|
conn.commit()
|
|
|
|
|
|
|
|
|
|
|
2026-04-10 20:43:44 +00:00
|
|
|
def seed_pre_tracking_migrations(conn):
|
|
|
|
|
"""
|
|
|
|
|
Retroactively mark migrations as applied if their schema objects already
|
2026-04-10 20:48:30 +00:00
|
|
|
exist. Checked on every startup — safe to run repeatedly (ON CONFLICT DO
|
|
|
|
|
NOTHING). Prevents re-running non-idempotent statements when a second
|
|
|
|
|
container starts after another has already applied the migration, or when
|
|
|
|
|
the tracking table is introduced to a database migrated before it existed.
|
|
|
|
|
|
|
|
|
|
Sentinel objects per migration:
|
|
|
|
|
02 — tracksolid.devices table exists
|
|
|
|
|
03 — position_history.altitude column exists
|
|
|
|
|
04 — trips.distance_km column exists (renamed from distance_m)
|
|
|
|
|
05 — tracksolid.device_events table exists (new in 05)
|
2026-04-10 20:43:44 +00:00
|
|
|
"""
|
2026-04-10 20:48:30 +00:00
|
|
|
checks = [
|
|
|
|
|
(
|
|
|
|
|
"02_tracksolid_full_schema_rev.sql",
|
|
|
|
|
"SELECT 1 FROM information_schema.tables "
|
|
|
|
|
"WHERE table_schema='tracksolid' AND table_name='devices'",
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"03_webhook_schema_migration.sql",
|
|
|
|
|
"SELECT 1 FROM information_schema.columns "
|
|
|
|
|
"WHERE table_schema='tracksolid' AND table_name='position_history' "
|
|
|
|
|
"AND column_name='altitude'",
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"04_bug_fix_migration.sql",
|
|
|
|
|
"SELECT 1 FROM information_schema.columns "
|
|
|
|
|
"WHERE table_schema='tracksolid' AND table_name='trips' "
|
|
|
|
|
"AND column_name='distance_km'",
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"05_enhancement_migration.sql",
|
|
|
|
|
"SELECT 1 FROM information_schema.tables "
|
|
|
|
|
"WHERE table_schema='tracksolid' AND table_name='device_events'",
|
|
|
|
|
),
|
|
|
|
|
]
|
2026-04-10 20:43:44 +00:00
|
|
|
|
2026-04-10 20:48:30 +00:00
|
|
|
seeds = []
|
2026-04-10 20:43:44 +00:00
|
|
|
with conn.cursor() as cur:
|
2026-04-10 20:48:30 +00:00
|
|
|
for filename, query in checks:
|
|
|
|
|
cur.execute(query)
|
|
|
|
|
if cur.fetchone():
|
|
|
|
|
cur.execute(
|
|
|
|
|
"INSERT INTO tracksolid.schema_migrations (filename) "
|
|
|
|
|
"VALUES (%s) ON CONFLICT DO NOTHING",
|
|
|
|
|
(filename,),
|
|
|
|
|
)
|
|
|
|
|
seeds.append(filename)
|
2026-04-10 20:43:44 +00:00
|
|
|
|
|
|
|
|
conn.commit()
|
|
|
|
|
if seeds:
|
2026-04-10 20:48:30 +00:00
|
|
|
print(f" Seeded as applied: {', '.join(seeds)}")
|
2026-04-10 20:43:44 +00:00
|
|
|
|
|
|
|
|
|
2026-04-10 20:34:57 +00:00
|
|
|
def already_applied(conn, filename):
|
|
|
|
|
with conn.cursor() as cur:
|
|
|
|
|
cur.execute(
|
|
|
|
|
"SELECT 1 FROM tracksolid.schema_migrations WHERE filename = %s",
|
|
|
|
|
(filename,),
|
|
|
|
|
)
|
|
|
|
|
return cur.fetchone() is not None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def record_applied(conn, filename):
|
|
|
|
|
with conn.cursor() as cur:
|
|
|
|
|
cur.execute(
|
|
|
|
|
"INSERT INTO tracksolid.schema_migrations (filename) VALUES (%s) ON CONFLICT DO NOTHING",
|
|
|
|
|
(filename,),
|
|
|
|
|
)
|
|
|
|
|
conn.commit()
|
|
|
|
|
|
|
|
|
|
|
2026-04-08 14:17:58 +00:00
|
|
|
def run_file(path, filename):
|
|
|
|
|
"""Execute a SQL file via psql. Returns True on success."""
|
2026-04-10 20:34:57 +00:00
|
|
|
print(f" APPLY {filename} ...")
|
2026-04-08 14:17:58 +00:00
|
|
|
result = subprocess.run(
|
|
|
|
|
["psql", DATABASE_URL, "-f", path],
|
|
|
|
|
capture_output=True, text=True,
|
|
|
|
|
)
|
|
|
|
|
errors = [l for l in result.stderr.splitlines() if "ERROR:" in l]
|
|
|
|
|
if errors:
|
|
|
|
|
for e in errors:
|
2026-04-10 20:34:57 +00:00
|
|
|
print(f" ERROR: {e.strip()}")
|
2026-04-08 14:02:09 +00:00
|
|
|
return False
|
2026-04-10 20:34:57 +00:00
|
|
|
print(f" OK {filename}")
|
2026-04-08 14:17:58 +00:00
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
2026-04-10 20:34:57 +00:00
|
|
|
def verify_schema(conn):
|
|
|
|
|
"""Verify critical tables exist. Exit 1 if missing — blocks service start."""
|
2026-04-08 14:17:58 +00:00
|
|
|
print("Verifying schema...")
|
2026-04-10 20:34:57 +00:00
|
|
|
with conn.cursor() as cur:
|
|
|
|
|
missing = []
|
|
|
|
|
for table in CRITICAL_TABLES:
|
|
|
|
|
schema, name = table.split(".")
|
|
|
|
|
cur.execute(
|
|
|
|
|
"SELECT 1 FROM information_schema.tables "
|
|
|
|
|
"WHERE table_schema=%s AND table_name=%s",
|
|
|
|
|
(schema, name),
|
|
|
|
|
)
|
|
|
|
|
if not cur.fetchone():
|
|
|
|
|
missing.append(table)
|
2026-04-08 14:17:58 +00:00
|
|
|
|
|
|
|
|
if missing:
|
2026-04-10 20:34:57 +00:00
|
|
|
print(f"FATAL: missing tables after migrations: {', '.join(missing)}")
|
2026-04-08 14:17:58 +00:00
|
|
|
sys.exit(1)
|
2026-04-10 20:34:57 +00:00
|
|
|
print(f" All {len(CRITICAL_TABLES)} critical tables verified.")
|
2026-04-08 14:02:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
|
print("=== Database Migration Runner ===")
|
|
|
|
|
|
2026-04-10 20:34:57 +00:00
|
|
|
conn = get_conn()
|
|
|
|
|
ensure_tracking_table(conn)
|
2026-04-10 20:43:44 +00:00
|
|
|
seed_pre_tracking_migrations(conn)
|
2026-04-10 20:34:57 +00:00
|
|
|
|
|
|
|
|
applied = skipped = 0
|
2026-04-08 14:02:09 +00:00
|
|
|
for sql_file in MIGRATIONS:
|
|
|
|
|
path = os.path.join("/app", sql_file)
|
2026-04-10 20:34:57 +00:00
|
|
|
|
2026-04-08 14:02:09 +00:00
|
|
|
if not os.path.exists(path):
|
2026-04-10 20:34:57 +00:00
|
|
|
print(f" SKIP {sql_file} (file not found in /app)")
|
|
|
|
|
skipped += 1
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
if already_applied(conn, sql_file):
|
|
|
|
|
print(f" SKIP {sql_file} (already applied)")
|
|
|
|
|
skipped += 1
|
2026-04-08 14:02:09 +00:00
|
|
|
continue
|
|
|
|
|
|
2026-04-10 20:34:57 +00:00
|
|
|
if run_file(path, sql_file):
|
|
|
|
|
record_applied(conn, sql_file)
|
|
|
|
|
applied += 1
|
|
|
|
|
else:
|
|
|
|
|
print(f"FATAL: migration {sql_file} failed — aborting.")
|
|
|
|
|
conn.close()
|
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
|
|
print(f"\nMigrations: {applied} applied, {skipped} skipped.")
|
|
|
|
|
|
|
|
|
|
verify_schema(conn)
|
|
|
|
|
conn.close()
|
|
|
|
|
print("Startup checks passed.\n")
|
2026-04-08 14:02:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
main()
|