From 108c1be05793a7017679b96c9c87b3f95bb8f12e Mon Sep 17 00:00:00 2001 From: David Kiania Date: Tue, 21 Apr 2026 12:53:23 +0300 Subject: [PATCH 1/6] feat: nightly pg_dump sidecar uploads to rustfs fleet-db bucket Adds a `db_backup` sidecar that dumps tracksolid_db every night at 02:30 UTC (configurable via BACKUP_HOUR/BACKUP_MINUTE), gzips the output, and uploads to s3://fleet-db/daily/_.sql.gz on the rustfs S3-compatible instance (s3.rahamafresh.com). Prunes objects older than BACKUP_KEEP_DAYS (default 30). Required .env additions (Coolify UI): RUSTFS_ENDPOINT=https://s3.rahamafresh.com RUSTFS_ACCESS_KEY=... RUSTFS_SECRET_KEY=... RUSTFS_BUCKET=fleet-db Mitigates data loss when Coolify service recreation wipes the service-ID-scoped timescale-data volume. Co-Authored-By: Claude Sonnet 4.6 --- backup/Dockerfile | 16 ++++++++++++ backup/backup_db.sh | 58 ++++++++++++++++++++++++++++++++++++++++++++ backup/entrypoint.sh | 26 ++++++++++++++++++++ docker-compose.yaml | 20 +++++++++++++++ 4 files changed, 120 insertions(+) create mode 100644 backup/Dockerfile create mode 100755 backup/backup_db.sh create mode 100755 backup/entrypoint.sh diff --git a/backup/Dockerfile b/backup/Dockerfile new file mode 100644 index 0000000..e9c1eae --- /dev/null +++ b/backup/Dockerfile @@ -0,0 +1,16 @@ +FROM alpine:3.20 + +RUN apk add --no-cache \ + postgresql16-client \ + aws-cli \ + gzip \ + tzdata \ + bash \ + coreutils + +WORKDIR /app +COPY backup_db.sh /app/backup_db.sh +COPY entrypoint.sh /app/entrypoint.sh +RUN chmod +x /app/backup_db.sh /app/entrypoint.sh + +ENTRYPOINT ["/app/entrypoint.sh"] diff --git a/backup/backup_db.sh b/backup/backup_db.sh new file mode 100755 index 0000000..7a724ce --- /dev/null +++ b/backup/backup_db.sh @@ -0,0 +1,58 @@ +#!/bin/sh +# Nightly pg_dump → rustfs (S3-compatible). +# Required env: POSTGRES_USER, POSTGRES_PASSWORD, POSTGRES_DB, +# RUSTFS_ENDPOINT, RUSTFS_ACCESS_KEY, RUSTFS_SECRET_KEY, RUSTFS_BUCKET. +# Optional: BACKUP_KEEP_DAYS (default 30), PGHOST (default timescale_db). + +set -eu + +: "${POSTGRES_USER:?}" +: "${POSTGRES_PASSWORD:?}" +: "${POSTGRES_DB:?}" +: "${RUSTFS_ENDPOINT:?}" +: "${RUSTFS_ACCESS_KEY:?}" +: "${RUSTFS_SECRET_KEY:?}" +: "${RUSTFS_BUCKET:?}" + +PGHOST="${PGHOST:-timescale_db}" +PGPORT="${PGPORT:-5432}" +KEEP_DAYS="${BACKUP_KEEP_DAYS:-30}" + +TS="$(date -u +%Y%m%d_%H%M%SZ)" +FILE="${POSTGRES_DB}_${TS}.sql.gz" +TMP="/tmp/${FILE}" + +export AWS_ACCESS_KEY_ID="$RUSTFS_ACCESS_KEY" +export AWS_SECRET_ACCESS_KEY="$RUSTFS_SECRET_KEY" +export AWS_DEFAULT_REGION="${RUSTFS_REGION:-us-east-1}" +export PGPASSWORD="$POSTGRES_PASSWORD" + +echo "[$(date -u +%FT%TZ)] pg_dump ${POSTGRES_DB}@${PGHOST} -> ${FILE}" +pg_dump -h "$PGHOST" -p "$PGPORT" -U "$POSTGRES_USER" -d "$POSTGRES_DB" \ + --no-owner --no-privileges --format=plain \ + | gzip -9 > "$TMP" + +SIZE=$(wc -c < "$TMP") +echo "[$(date -u +%FT%TZ)] dump size: ${SIZE} bytes" + +KEY="daily/${FILE}" +echo "[$(date -u +%FT%TZ)] uploading s3://${RUSTFS_BUCKET}/${KEY}" +aws --endpoint-url "$RUSTFS_ENDPOINT" s3 cp "$TMP" "s3://${RUSTFS_BUCKET}/${KEY}" + +rm -f "$TMP" + +# Prune anything older than KEEP_DAYS. +CUTOFF="$(date -u -d "-${KEEP_DAYS} days" +%Y%m%d 2>/dev/null || date -u -v -"${KEEP_DAYS}"d +%Y%m%d)" +aws --endpoint-url "$RUSTFS_ENDPOINT" s3 ls "s3://${RUSTFS_BUCKET}/daily/" \ + | awk '{print $4}' \ + | while read -r OBJ; do + [ -z "$OBJ" ] && continue + OBJ_DATE=$(echo "$OBJ" | sed -n 's/.*_\([0-9]\{8\}\)_.*/\1/p') + [ -z "$OBJ_DATE" ] && continue + if [ "$OBJ_DATE" -lt "$CUTOFF" ]; then + echo "[$(date -u +%FT%TZ)] prune s3://${RUSTFS_BUCKET}/daily/${OBJ}" + aws --endpoint-url "$RUSTFS_ENDPOINT" s3 rm "s3://${RUSTFS_BUCKET}/daily/${OBJ}" + fi + done + +echo "[$(date -u +%FT%TZ)] backup complete" diff --git a/backup/entrypoint.sh b/backup/entrypoint.sh new file mode 100755 index 0000000..d107577 --- /dev/null +++ b/backup/entrypoint.sh @@ -0,0 +1,26 @@ +#!/bin/sh +# Loops forever: sleeps until the next BACKUP_HOUR:BACKUP_MINUTE UTC, then runs backup_db.sh. +# Defaults: 02:30 UTC nightly. + +set -eu + +HOUR="${BACKUP_HOUR:-2}" +MINUTE="${BACKUP_MINUTE:-30}" + +if [ "${BACKUP_RUN_ON_START:-0}" = "1" ]; then + echo "[$(date -u +%FT%TZ)] BACKUP_RUN_ON_START=1 — running backup immediately" + /app/backup_db.sh || echo "[$(date -u +%FT%TZ)] initial backup failed (continuing)" +fi + +while true; do + NOW_EPOCH=$(date -u +%s) + TARGET=$(date -u -d "today ${HOUR}:${MINUTE}:00" +%s 2>/dev/null \ + || date -u -j -f "%H:%M:%S" "${HOUR}:${MINUTE}:00" +%s) + if [ "$TARGET" -le "$NOW_EPOCH" ]; then + TARGET=$((TARGET + 86400)) + fi + SLEEP=$((TARGET - NOW_EPOCH)) + echo "[$(date -u +%FT%TZ)] next backup in ${SLEEP}s (at $(date -u -d "@${TARGET}" +%FT%TZ 2>/dev/null || date -u -r "${TARGET}" +%FT%TZ))" + sleep "$SLEEP" + /app/backup_db.sh || echo "[$(date -u +%FT%TZ)] backup failed (will retry tomorrow)" +done diff --git a/docker-compose.yaml b/docker-compose.yaml index df4b87d..b14559c 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -76,6 +76,26 @@ services: # You will set the actual URL in the Coolify UI, # but the service needs to expose port 3000 internally. + db_backup: + build: + context: ./backup + dockerfile: Dockerfile + restart: always + depends_on: + timescale_db: + condition: service_healthy + env_file: .env + environment: + # Nightly pg_dump → rustfs. Credentials from .env (RUSTFS_*). + - BACKUP_HOUR=${BACKUP_HOUR:-2} + - BACKUP_MINUTE=${BACKUP_MINUTE:-30} + - BACKUP_KEEP_DAYS=${BACKUP_KEEP_DAYS:-30} + - BACKUP_RUN_ON_START=${BACKUP_RUN_ON_START:-0} + - RUSTFS_ENDPOINT=${RUSTFS_ENDPOINT} + - RUSTFS_ACCESS_KEY=${RUSTFS_ACCESS_KEY} + - RUSTFS_SECRET_KEY=${RUSTFS_SECRET_KEY} + - RUSTFS_BUCKET=${RUSTFS_BUCKET:-fleet-db} + volumes: timescale-data: name: timescale-data From 778686e7ce011ff84939e2cd8a628fd717d7d10c Mon Sep 17 00:00:00 2001 From: David Kiania Date: Tue, 21 Apr 2026 16:01:38 +0300 Subject: [PATCH 2/6] =?UTF-8?q?docs:=20CLAUDE.md=20audit=20=E2=80=94=20add?= =?UTF-8?q?=20backup=20sidecar,=20missing=20files,=20update=20open=20items?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-Authored-By: Claude Sonnet 4.6 --- CLAUDE.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/CLAUDE.md b/CLAUDE.md index 586282a..2b46201 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -45,6 +45,7 @@ This repository ingests the Tracksolid Pro API into a TimescaleDB/PostGIS databa | Visualisation | Grafana (provisioned via custom image) | | Workflow automation | n8n | | API source | Tracksolid Pro / Jimi IoT Open Platform (`eu-open.tracksolidpro.com/route/rest`) | +| Backup | pg_dump sidecar → rustfs S3 (`fleet-db` bucket), nightly | | Version control | Forgejo at `repo.rahamafresh.com` | --- @@ -84,6 +85,11 @@ docs/ # Reference docs (connections, API, KPIs, project co docs/superpowers/ # Pitch specs and implementation plans (not deployed code) 02_tracksolid_full_schema_rev.sql # Full schema bootstrap 03..06_*.sql # Incremental migrations (06 adds assigned_city, dispatch_log, ops.*) +07_analytics_views.sql # Analytics views migration (applied 2026-04-21) +Dockerfile # Custom image for ingest/webhook containers +pyproject.toml # Python project + uv dependency spec +OPERATIONS_MANUAL.md # Day-to-day ops runbook +backup/ # pg_dump sidecar scripts and config 01_BusinessAnalytics.md # SQL analytics library — read before writing queries 20260414_FS__Logistics - final_fixed.csv # 144-device driver/vehicle source data tracksolidApiDocumentation.md # API endpoint reference @@ -209,7 +215,7 @@ Latest full snapshot: `260412_baseline_report.md` | Priority | Item | |---|---| | HIGH | Run `import_drivers_csv.py --apply` — 144 X3/JC400P devices with names + plates waiting | -| HIGH | Register webhooks: `/pushobd` `/pushoil` `/pushtem` `/pushlbs` `/pushevent` | +| HIGH | Register webhooks: `/pushoil` `/pushtem` `/pushlbs` (auto-register on push now done — commit 257643c) | | HIGH | Investigate X3-63282 in Kampala — legitimate or unauthorised? | | MEDIUM | Set `fuel_100km` per vehicle type to activate fuel cost calculations | | MEDIUM | Investigate 44 silent devices (only 19 of 63 reporting) — SIM installed? Activated? | From 417627675ebdafa4f646d767a730048af7143738 Mon Sep 17 00:00:00 2001 From: David Kiania Date: Wed, 22 Apr 2026 18:21:25 +0300 Subject: [PATCH 3/6] fix: [FIX-M18] pull driverName/vehicleNumber/sim from detail endpoint MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit jimi.user.device.list returns null for vehicleName, vehicleNumber, driverName, driverPhone, and sim even after those fields are set via jimi.open.device.update — the values only surface through jimi.track.device.detail. sync_devices() now reads from dtl first with d as fallback, which unblocks backfill of the 144 CSV-driven updates pushed on 2026-04-22. Co-Authored-By: Claude Opus 4.7 --- CLAUDE.md | 1 + ingest_movement_rev.py | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 2b46201..04f917b 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -171,6 +171,7 @@ All views carry a `COMMENT ON VIEW` referencing their spec — `\d+ tracksolid.v | FIX-M15 | `ingest_movement_rev.py` | `get_device_locations()` — on-demand precision refresh | | FIX-M16 | `ingest_movement_rev.py` | `distance` from API is metres → divide by 1000 before storing | | FIX-M17 | `ingest_movement_rev.py` | `sync_devices()` ON CONFLICT now updates all 26 fields (was 5) | +| FIX-M18 | `ingest_movement_rev.py` | `sync_devices()` pulls `vehicleName`/`vehicleNumber`/`driverName`/`driverPhone`/`sim` from `jimi.track.device.detail` — list endpoint returns null for these even when set | | FIX-E06 | `ingest_events_rev.py` | Alarm field mapping: `alertTypeId`/`alarmTypeName`/`alertTime` | | BUG-02 | Migration 04 | Historical `distance_m` rows ÷1,000,000 → renamed to `distance_km` | diff --git a/ingest_movement_rev.py b/ingest_movement_rev.py index 67ad1b2..9ccc332 100644 --- a/ingest_movement_rev.py +++ b/ingest_movement_rev.py @@ -30,6 +30,10 @@ REVISIONS (QA-Verified): jimi.device.location.get for up to 50 specific IMEIs on demand. Used for precision refreshes (alarm enrichment, stale device recovery) without waiting for the next full fleet sweep. + [FIX-M18] sync_devices: Pull vehicleName / vehicleNumber / driverName / + driverPhone / sim from jimi.track.device.detail first (list + endpoint returns null for these even when populated via + jimi.open.device.update). ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ """ @@ -134,10 +138,17 @@ def sync_devices(): last_synced_at = NOW(), updated_at = NOW() """, ( + # [FIX-M18] vehicleName/vehicleNumber/driverName/driverPhone/sim + # only surface via jimi.track.device.detail — list returns null. imei, clean(d.get("deviceName")), clean(d.get("mcType")), clean(d.get("mcTypeUseScope")), - clean(d.get("vehicleName")), clean(d.get("vehicleNumber")), clean(d.get("vehicleModels")), clean(d.get("vehicleIcon")), + clean(dtl.get("vehicleName") or d.get("vehicleName")), + clean(dtl.get("vehicleNumber") or d.get("vehicleNumber")), + clean(d.get("vehicleModels")), clean(d.get("vehicleIcon")), clean(dtl.get("vin")), clean(dtl.get("engineNumber")), clean(dtl.get("vehicleBrand")), clean_num(dtl.get("fuel_100km")), - clean(d.get("driverName")), clean(d.get("driverPhone")), clean(d.get("sim")), clean(dtl.get("iccid")), clean(dtl.get("imsi")), + clean(dtl.get("driverName") or d.get("driverName")), + clean(dtl.get("driverPhone") or d.get("driverPhone")), + clean(dtl.get("sim") or d.get("sim")), + clean(dtl.get("iccid")), clean(dtl.get("imsi")), clean(dtl.get("account")), clean(dtl.get("customerName")), clean(d.get("deviceGroupId")), clean(d.get("deviceGroup")), clean_ts(d.get("activationTime")), clean_ts(d.get("expiration")), clean_int(d.get("enabledFlag", 1)), clean(dtl.get("status", "active")), clean_num(dtl.get("currentMileage")) From fa110f4313485a34ae6cc9ead165fbb7ad1050a7 Mon Sep 17 00:00:00 2001 From: David Kiania Date: Fri, 24 Apr 2026 10:43:07 +0300 Subject: [PATCH 4/6] feat: [FIX-M19] multi-account ingest across fireside sub-accounts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fleet lives across three Tracksolid sub-accounts: fireside — 63 devices Fireside@HQ — 52 devices Fireside_MSA — 41 devices Previously sync_devices / poll_live_positions / poll_parking only queried a single TARGET_ACCOUNT, so ~64% of the fleet was invisible to the pipeline. Changes: - ts_shared_rev.py: new TARGETS list (env TRACKSOLID_TARGETS, comma-separated; falls back to the single TARGET_ACCOUNT). - ts_shared_rev.py: new get_active_imeis_by_target() helper that groups active IMEIs by their stored account so parking calls can pass the right account param per batch. - ingest_movement_rev.py: sync_devices and poll_live_positions loop over every target and dedupe by IMEI before upserting. poll_parking loops over imeis_by_target so each batch carries the matching account. - CLAUDE.md: FIX-M19 entry. Requires new env var TRACKSOLID_TARGETS="fireside,Fireside@HQ,Fireside_MSA" on the ingest services in Coolify. Co-Authored-By: Claude Opus 4.7 --- CLAUDE.md | 1 + ingest_movement_rev.py | 138 +++++++++++++++++++++++++---------------- ts_shared_rev.py | 23 +++++++ 3 files changed, 109 insertions(+), 53 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 04f917b..f7b27ac 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -172,6 +172,7 @@ All views carry a `COMMENT ON VIEW` referencing their spec — `\d+ tracksolid.v | FIX-M16 | `ingest_movement_rev.py` | `distance` from API is metres → divide by 1000 before storing | | FIX-M17 | `ingest_movement_rev.py` | `sync_devices()` ON CONFLICT now updates all 26 fields (was 5) | | FIX-M18 | `ingest_movement_rev.py` | `sync_devices()` pulls `vehicleName`/`vehicleNumber`/`driverName`/`driverPhone`/`sim` from `jimi.track.device.detail` — list endpoint returns null for these even when set | +| FIX-M19 | `ts_shared_rev.py`, `ingest_movement_rev.py` | Multi-account support: fleet spans `fireside`, `Fireside@HQ`, `Fireside_MSA` (156 devices total). `sync_devices`, `poll_live_positions`, `poll_parking` iterate `TRACKSOLID_TARGETS` (comma-separated env var). New helper `get_active_imeis_by_target()` scopes parking calls to the right account | | FIX-E06 | `ingest_events_rev.py` | Alarm field mapping: `alertTypeId`/`alarmTypeName`/`alertTime` | | BUG-02 | Migration 04 | Historical `distance_m` rows ÷1,000,000 → renamed to `distance_km` | diff --git a/ingest_movement_rev.py b/ingest_movement_rev.py index 9ccc332..90651d5 100644 --- a/ingest_movement_rev.py +++ b/ingest_movement_rev.py @@ -34,6 +34,10 @@ REVISIONS (QA-Verified): driverPhone / sim from jimi.track.device.detail first (list endpoint returns null for these even when populated via jimi.open.device.update). + [FIX-M19] Multi-account support: the fleet is split across multiple + Tracksolid sub-accounts. sync_devices, poll_live_positions + and poll_parking now iterate every target in TRACKSOLID_TARGETS + and dedupe/scope per-target before writing. ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ """ @@ -46,8 +50,10 @@ from psycopg2.extras import execute_values from ts_shared_rev import ( TARGET_ACCOUNT, + TARGETS, api_post, get_active_imeis, + get_active_imeis_by_target, get_conn, get_token, is_valid_fix, @@ -67,14 +73,26 @@ setup_shutdown(log) # ── 1. Device Registry Sync (Daily) ────────────────────────────────────────── def sync_devices(): - log.info("Syncing device registry...") + log.info("Syncing device registry across %d target(s): %s", len(TARGETS), TARGETS) t0, token = time.time(), get_token() if not token: return - resp = api_post("jimi.user.device.list", {"target": TARGET_ACCOUNT}, token) - if resp.get("code") != 0: return + # [FIX-M19] Fleet is split across multiple sub-accounts. Aggregate the + # device list from every configured target and dedupe by IMEI. + devices_by_imei: dict[str, dict] = {} + for target in TARGETS: + resp = api_post("jimi.user.device.list", {"target": target}, token) + if resp.get("code") != 0: + log.warning("device.list failed for target=%s: code=%s msg=%s", + target, resp.get("code"), resp.get("message")) + continue + for d in (resp.get("result") or []): + imei = d.get("imei") + if imei: + devices_by_imei[imei] = d - devices = resp.get("result") or [] + devices = list(devices_by_imei.values()) + log.info("Aggregated %d unique devices across targets.", len(devices)) upserted = 0 # Fetch per-device detail in parallel — previously an N+1 blocker where @@ -165,10 +183,21 @@ def poll_live_positions(): t0, token = time.time(), get_token() if not token: return - resp = api_post("jimi.user.device.location.list", {"target": TARGET_ACCOUNT, "map_type": "GOOGLE"}, token) - if resp.get("code") != 0: return + # [FIX-M19] Iterate every target and dedupe by IMEI (keep last). + positions_by_imei: dict[str, dict] = {} + for target in TARGETS: + resp = api_post("jimi.user.device.location.list", + {"target": target, "map_type": "GOOGLE"}, token) + if resp.get("code") != 0: + log.warning("location.list failed for target=%s: code=%s msg=%s", + target, resp.get("code"), resp.get("message")) + continue + for p in (resp.get("result") or []): + imei = p.get("imei") + if imei: + positions_by_imei[imei] = p - positions = resp.get("result") or [] + positions = list(positions_by_imei.values()) upserted, inserted = 0, 0 with get_conn() as conn: @@ -288,63 +317,66 @@ def poll_trips(): def poll_parking(): t0 = time.time() - token, imeis = get_token(), get_active_imeis() - if not token or not imeis: return + # [FIX-M19] Parking requires an `account` param tied to the IMEI's + # sub-account — call per target with that target's IMEIs only. + token, imeis_by_target = get_token(), get_active_imeis_by_target() + if not token or not imeis_by_target: return end_ts = datetime.now(timezone.utc) start_ts = end_ts - timedelta(hours=1) + total_imei = sum(len(v) for v in imeis_by_target.values()) inserted = 0 with get_conn() as conn: with conn.cursor() as cur: - for i in range(0, len(imeis), 50): - batch = imeis[i:i+50] - # [FIX-M13] Added account + acc_type=0 (all stop types). Without these - # the API returns empty results even when parking events exist. - resp = api_post("jimi.open.platform.report.parking", { - "account": TARGET_ACCOUNT, - "imeis": ",".join(batch), - "begin_time": start_ts.strftime("%Y-%m-%d %H:%M:%S"), - "end_time": end_ts.strftime("%Y-%m-%d %H:%M:%S"), - "acc_type": 0, - }, token) + for target, target_imeis in imeis_by_target.items(): + for i in range(0, len(target_imeis), 50): + batch = target_imeis[i:i+50] + # [FIX-M13] account + acc_type=0 required for non-empty results. + resp = api_post("jimi.open.platform.report.parking", { + "account": target, + "imeis": ",".join(batch), + "begin_time": start_ts.strftime("%Y-%m-%d %H:%M:%S"), + "end_time": end_ts.strftime("%Y-%m-%d %H:%M:%S"), + "acc_type": 0, + }, token) - events = resp.get("result") or [] - for p in events: - try: - cur.execute("SAVEPOINT sp") - imei = p.get("imei") - start_time = clean_ts(p.get("startTime")) - if not imei or not start_time: + events = resp.get("result") or [] + for p in events: + try: + cur.execute("SAVEPOINT sp") + imei = p.get("imei") + start_time = clean_ts(p.get("startTime")) + if not imei or not start_time: + cur.execute("RELEASE SAVEPOINT sp") + continue + lat, lng = clean_num(p.get("lat")), clean_num(p.get("lng")) + cur.execute(""" + INSERT INTO tracksolid.parking_events ( + imei, event_type, start_time, end_time, + duration_seconds, geom, address + ) VALUES ( + %s, 'parking', %s, %s, %s, + CASE WHEN %s IS NOT NULL AND %s IS NOT NULL + THEN ST_SetSRID(ST_MakePoint(%s, %s), 4326) + ELSE NULL END, + %s + ) ON CONFLICT (imei, start_time, event_type) DO NOTHING + """, ( + imei, start_time, clean_ts(p.get("endTime")), + clean_int(p.get("durSecond")), # [FIX-M13] API returns durSecond, not seconds + lng, lat, lng, lat, + clean(p.get("address")) + )) cur.execute("RELEASE SAVEPOINT sp") - continue - lat, lng = clean_num(p.get("lat")), clean_num(p.get("lng")) - cur.execute(""" - INSERT INTO tracksolid.parking_events ( - imei, event_type, start_time, end_time, - duration_seconds, geom, address - ) VALUES ( - %s, 'parking', %s, %s, %s, - CASE WHEN %s IS NOT NULL AND %s IS NOT NULL - THEN ST_SetSRID(ST_MakePoint(%s, %s), 4326) - ELSE NULL END, - %s - ) ON CONFLICT (imei, start_time, event_type) DO NOTHING - """, ( - imei, start_time, clean_ts(p.get("endTime")), - clean_int(p.get("durSecond")), # [FIX-M13] API returns durSecond, not seconds - lng, lat, lng, lat, - clean(p.get("address")) - )) - cur.execute("RELEASE SAVEPOINT sp") - inserted += cur.rowcount - except Exception: - cur.execute("ROLLBACK TO SAVEPOINT sp") - log.warning("Failed to process parking for %s", p.get("imei"), exc_info=True) + inserted += cur.rowcount + except Exception: + cur.execute("ROLLBACK TO SAVEPOINT sp") + log.warning("Failed to process parking for %s", p.get("imei"), exc_info=True) - log_ingestion(cur, "jimi.open.platform.report.parking", len(imeis), 0, inserted, + log_ingestion(cur, "jimi.open.platform.report.parking", total_imei, 0, inserted, int((time.time() - t0) * 1000), True) - log.info("Parking: %d events processed.", inserted) + log.info("Parking: %d events processed across %d target(s).", inserted, len(imeis_by_target)) # ── 5. High-Resolution GPS Trail (Every 30m) — POLL-01 ─────────────────────── diff --git a/ts_shared_rev.py b/ts_shared_rev.py index 251575f..e782faf 100644 --- a/ts_shared_rev.py +++ b/ts_shared_rev.py @@ -48,6 +48,12 @@ APP_KEY = _require_env("TRACKSOLID_APP_KEY") APP_SECRET = _require_env("TRACKSOLID_APP_SECRET") USER_ID = _require_env("TRACKSOLID_USER_ID") TARGET_ACCOUNT = os.getenv("TRACKSOLID_TARGET_ACCOUNT", USER_ID) +# [FIX-M19] Multi-account support: the fleet is split across multiple +# Tracksolid sub-accounts (e.g. fireside, Fireside@HQ, Fireside_MSA). +# TRACKSOLID_TARGETS is a comma-separated list; falls back to TARGET_ACCOUNT. +TARGETS = [ + t.strip() for t in os.getenv("TRACKSOLID_TARGETS", "").split(",") if t.strip() +] or [TARGET_ACCOUNT] PWD_MD5 = _require_env("TRACKSOLID_PWD_MD5") DATABASE_URL = _require_env("DATABASE_URL") API_BASE_URL = os.getenv("TRACKSOLID_API_URL", "https://eu-open.tracksolidpro.com/route/rest") @@ -229,6 +235,23 @@ def get_active_imeis() -> list[str]: cur.execute("SELECT imei FROM tracksolid.devices WHERE enabled_flag = 1") return [r[0] for r in cur.fetchall()] +def get_active_imeis_by_target() -> dict[str, list[str]]: + """[FIX-M19] Group active IMEIs by their Tracksolid sub-account so + endpoints that require an `account`/`target` param (e.g. parking) can + scope per-target calls. IMEIs with a NULL account are bucketed under + the primary TARGET_ACCOUNT as a safe default.""" + with get_conn() as conn: + with conn.cursor() as cur: + cur.execute(""" + SELECT COALESCE(account, %s) AS target, imei + FROM tracksolid.devices + WHERE enabled_flag = 1 + """, (TARGET_ACCOUNT,)) + out: dict[str, list[str]] = {} + for target, imei in cur.fetchall(): + out.setdefault(target, []).append(imei) + return out + def log_ingestion(cur, endpoint: str, imei_count: int, upserted: int, inserted: int, duration_ms: int, success: bool, error_code: str = None, error_msg: str = None): cur.execute(""" INSERT INTO tracksolid.ingestion_log From 3807d9554c839edb4b00651f399fcfeddf38e14d Mon Sep 17 00:00:00 2001 From: David Kiania Date: Fri, 24 Apr 2026 10:59:53 +0300 Subject: [PATCH 5/6] fix(db): mount TimescaleDB HA volume at correct PGDATA path The timescale/timescaledb-ha image uses /home/postgres/pgdata/data as PGDATA, not /var/lib/postgresql/data. The previous mount pointed at an empty directory that postgres never wrote to, so Coolify redeploys destroyed all data with the container's overlay filesystem. Pin PGDATA explicitly and move the named timescale-data volume to /home/postgres/pgdata so the real data dir is persisted. Co-Authored-By: Claude Opus 4.7 --- docker-compose.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index b14559c..34b5b80 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -6,10 +6,13 @@ services: - POSTGRES_DB=${POSTGRES_DB} - POSTGRES_USER=${POSTGRES_USER} - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + # HA image's PGDATA is /home/postgres/pgdata/data, not /var/lib/postgresql/data. + # Mount the named volume there so data survives container rebuilds. + - PGDATA=/home/postgres/pgdata/data ports: - "5433:5432" volumes: - - timescale-data:/var/lib/postgresql/data + - timescale-data:/home/postgres/pgdata healthcheck: test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"] interval: 10s From c585e674821b0c0340071bccb8f51361119ea6fe Mon Sep 17 00:00:00 2001 From: David Kiania Date: Fri, 24 Apr 2026 11:00:02 +0300 Subject: [PATCH 6/6] feat(backup): run pg_dump multiple times per day via BACKUP_TIMES_UTC Replace the single BACKUP_HOUR/BACKUP_MINUTE slot with a comma-separated list of UTC times. Scheduler walks all slots and sleeps until the soonest future one, so four daily backups become a one-line env change: BACKUP_TIMES_UTC=02:30,08:30,14:30,20:30 (default) Legacy BACKUP_HOUR/BACKUP_MINUTE still honored as a single slot for backwards compatibility with existing .env files. Co-Authored-By: Claude Opus 4.7 --- backup/entrypoint.sh | 58 +++++++++++++++++++++++++++++++++++--------- docker-compose.yaml | 6 ++--- 2 files changed, 50 insertions(+), 14 deletions(-) diff --git a/backup/entrypoint.sh b/backup/entrypoint.sh index d107577..f74eaed 100755 --- a/backup/entrypoint.sh +++ b/backup/entrypoint.sh @@ -1,26 +1,62 @@ #!/bin/sh -# Loops forever: sleeps until the next BACKUP_HOUR:BACKUP_MINUTE UTC, then runs backup_db.sh. -# Defaults: 02:30 UTC nightly. +# Runs backup_db.sh at each time in BACKUP_TIMES_UTC (comma-separated HH:MM list). +# Defaults: 02:30, 08:30, 14:30, 20:30 UTC — four backups per day. +# +# Back-compat: if BACKUP_TIMES_UTC is unset but legacy BACKUP_HOUR/BACKUP_MINUTE are, +# those are honored as a single slot. set -eu -HOUR="${BACKUP_HOUR:-2}" -MINUTE="${BACKUP_MINUTE:-30}" +if [ -n "${BACKUP_TIMES_UTC:-}" ]; then + TIMES="$BACKUP_TIMES_UTC" +elif [ -n "${BACKUP_HOUR:-}" ] || [ -n "${BACKUP_MINUTE:-}" ]; then + TIMES="$(printf '%02d:%02d' "${BACKUP_HOUR:-2}" "${BACKUP_MINUTE:-30}")" +else + TIMES="02:30,08:30,14:30,20:30" +fi + +echo "[$(date -u +%FT%TZ)] backup schedule (UTC): ${TIMES}" if [ "${BACKUP_RUN_ON_START:-0}" = "1" ]; then echo "[$(date -u +%FT%TZ)] BACKUP_RUN_ON_START=1 — running backup immediately" /app/backup_db.sh || echo "[$(date -u +%FT%TZ)] initial backup failed (continuing)" fi +# Compute epoch for "today HH:MM UTC" on both GNU and BSD date. +slot_to_epoch_today() { + HM="$1" + date -u -d "today ${HM}:00" +%s 2>/dev/null \ + || date -u -j -f "%H:%M:%S" "${HM}:00" +%s +} + while true; do NOW_EPOCH=$(date -u +%s) - TARGET=$(date -u -d "today ${HOUR}:${MINUTE}:00" +%s 2>/dev/null \ - || date -u -j -f "%H:%M:%S" "${HOUR}:${MINUTE}:00" +%s) - if [ "$TARGET" -le "$NOW_EPOCH" ]; then - TARGET=$((TARGET + 86400)) + NEXT="" + # Find the smallest TARGET > NOW across all configured slots (rolling to tomorrow if needed). + OLDIFS="$IFS" + IFS=',' + for HM in $TIMES; do + HM="$(echo "$HM" | tr -d ' ')" + [ -z "$HM" ] && continue + T=$(slot_to_epoch_today "$HM") + if [ "$T" -le "$NOW_EPOCH" ]; then + T=$((T + 86400)) + fi + if [ -z "$NEXT" ] || [ "$T" -lt "$NEXT" ]; then + NEXT="$T" + fi + done + IFS="$OLDIFS" + + if [ -z "$NEXT" ]; then + echo "[$(date -u +%FT%TZ)] no valid times in BACKUP_TIMES_UTC='${TIMES}'; sleeping 1h" + sleep 3600 + continue fi - SLEEP=$((TARGET - NOW_EPOCH)) - echo "[$(date -u +%FT%TZ)] next backup in ${SLEEP}s (at $(date -u -d "@${TARGET}" +%FT%TZ 2>/dev/null || date -u -r "${TARGET}" +%FT%TZ))" + + SLEEP=$((NEXT - NOW_EPOCH)) + NEXT_ISO=$(date -u -d "@${NEXT}" +%FT%TZ 2>/dev/null || date -u -r "${NEXT}" +%FT%TZ) + echo "[$(date -u +%FT%TZ)] next backup in ${SLEEP}s (at ${NEXT_ISO})" sleep "$SLEEP" - /app/backup_db.sh || echo "[$(date -u +%FT%TZ)] backup failed (will retry tomorrow)" + /app/backup_db.sh || echo "[$(date -u +%FT%TZ)] backup failed (will retry at next slot)" done diff --git a/docker-compose.yaml b/docker-compose.yaml index 34b5b80..26618b1 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -89,9 +89,9 @@ services: condition: service_healthy env_file: .env environment: - # Nightly pg_dump → rustfs. Credentials from .env (RUSTFS_*). - - BACKUP_HOUR=${BACKUP_HOUR:-2} - - BACKUP_MINUTE=${BACKUP_MINUTE:-30} + # pg_dump → rustfs. Credentials from .env (RUSTFS_*). + # BACKUP_TIMES_UTC: comma-separated HH:MM list. Default: 4×/day. + - BACKUP_TIMES_UTC=${BACKUP_TIMES_UTC:-02:30,08:30,14:30,20:30} - BACKUP_KEEP_DAYS=${BACKUP_KEEP_DAYS:-30} - BACKUP_RUN_ON_START=${BACKUP_RUN_ON_START:-0} - RUSTFS_ENDPOINT=${RUSTFS_ENDPOINT}