599 lines
22 KiB
Bash
599 lines
22 KiB
Bash
|
|
#!/usr/bin/env bash
|
||
|
|
# =============================================================================
|
||
|
|
# migrate-to-postgres.sh
|
||
|
|
#
|
||
|
|
# Migrates the lean101-clients production stack from SQLite to PostgreSQL.
|
||
|
|
# Safe: backs up everything before touching anything. Old stack stays live
|
||
|
|
# until you confirm migration succeeded.
|
||
|
|
#
|
||
|
|
# HOW TO USE:
|
||
|
|
# 1. Copy this file to the server:
|
||
|
|
# scp deploy/migrate-to-postgres.sh root@<host>:/srv/lean101-clients/deploy/
|
||
|
|
#
|
||
|
|
# 2. SSH in and run it directly (must be interactive — not piped):
|
||
|
|
# ssh root@<host>
|
||
|
|
# bash /srv/lean101-clients/deploy/migrate-to-postgres.sh
|
||
|
|
# =============================================================================
|
||
|
|
|
||
|
|
set -euo pipefail
|
||
|
|
|
||
|
|
# ── Config ────────────────────────────────────────────────────────────────────
|
||
|
|
WORK_DIR="/srv/lean101-clients"
|
||
|
|
BACKEND="lean101-clients-backend"
|
||
|
|
FRONTEND="lean101-clients-frontend"
|
||
|
|
NGINX="lean101-clients"
|
||
|
|
OLD_COMPOSE="docker-compose.yml"
|
||
|
|
NEW_COMPOSE="docker-compose.production.yml"
|
||
|
|
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
|
||
|
|
BACKUP_DIR="/srv/lean101-clients-backup-$TIMESTAMP"
|
||
|
|
MIGRATE_SCRIPT="/tmp/lean101_migrate_data.py"
|
||
|
|
PG_DB_SERVICE="lean101-clients-db"
|
||
|
|
PG_IMAGE="postgres:16-alpine"
|
||
|
|
|
||
|
|
# ── Colour helpers ────────────────────────────────────────────────────────────
|
||
|
|
RED='\033[0;31m'; YELLOW='\033[1;33m'; GREEN='\033[0;32m'
|
||
|
|
CYAN='\033[0;36m'; BOLD='\033[1m'; RESET='\033[0m'
|
||
|
|
|
||
|
|
sep() { echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"; }
|
||
|
|
h1() { sep; echo -e "${BOLD}${CYAN} $1${RESET}"; sep; }
|
||
|
|
ok() { echo -e " ${GREEN}✔${RESET} $1"; }
|
||
|
|
warn() { echo -e " ${YELLOW}⚠${RESET} $1"; }
|
||
|
|
die() { echo -e " ${RED}✘ FATAL: $1${RESET}" >&2; exit 1; }
|
||
|
|
info() { echo -e " ${CYAN}→${RESET} $1"; }
|
||
|
|
prompt() { echo -e "\n ${BOLD}$1${RESET}"; }
|
||
|
|
|
||
|
|
# ── Rollback instructions ─────────────────────────────────────────────────────
|
||
|
|
print_rollback() {
|
||
|
|
echo ""
|
||
|
|
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
|
||
|
|
echo -e "${YELLOW} ROLLBACK — to restore the original SQLite stack:${RESET}"
|
||
|
|
echo ""
|
||
|
|
echo " cd $WORK_DIR"
|
||
|
|
echo " docker compose -f $NEW_COMPOSE --env-file .env.production down 2>/dev/null || true"
|
||
|
|
echo " docker compose -f $OLD_COMPOSE --env-file .env up -d"
|
||
|
|
echo ""
|
||
|
|
echo " SQLite backup is at: $BACKUP_DIR/data_entry_app.db"
|
||
|
|
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
|
||
|
|
echo ""
|
||
|
|
}
|
||
|
|
trap print_rollback ERR
|
||
|
|
|
||
|
|
# =============================================================================
|
||
|
|
h1 "PHASE 0 — PRE-FLIGHT"
|
||
|
|
# =============================================================================
|
||
|
|
|
||
|
|
[[ $EUID -eq 0 ]] || die "Run this script as root."
|
||
|
|
[[ -t 0 ]] || die "This script must be run interactively (not piped via stdin)."
|
||
|
|
|
||
|
|
cd "$WORK_DIR" || die "Cannot cd to $WORK_DIR"
|
||
|
|
|
||
|
|
# Check required containers are running
|
||
|
|
for C in "$BACKEND" "$FRONTEND" "$NGINX"; do
|
||
|
|
STATUS=$(docker inspect --format='{{.State.Status}}' "$C" 2>/dev/null || echo missing)
|
||
|
|
[[ "$STATUS" == "running" ]] || die "Container $C is not running (status: $STATUS). Cannot migrate."
|
||
|
|
ok "$C is running"
|
||
|
|
done
|
||
|
|
|
||
|
|
# Confirm SQLite DB is reachable inside backend container
|
||
|
|
SQLITE_EXISTS=$(docker exec "$BACKEND" python -c \
|
||
|
|
"import os; print('yes' if os.path.exists('/data/data_entry_app.db') else 'no')" 2>/dev/null || echo no)
|
||
|
|
[[ "$SQLITE_EXISTS" == "yes" ]] || die "SQLite DB not found at /data/data_entry_app.db inside $BACKEND"
|
||
|
|
ok "SQLite DB reachable inside backend container"
|
||
|
|
|
||
|
|
# Check production compose file
|
||
|
|
[[ -f "$WORK_DIR/$NEW_COMPOSE" ]] && ok "$NEW_COMPOSE already present" || warn "$NEW_COMPOSE not present — will write it"
|
||
|
|
|
||
|
|
echo ""
|
||
|
|
echo -e " ${BOLD}Everything looks good. Starting migration wizard.${RESET}"
|
||
|
|
echo ""
|
||
|
|
|
||
|
|
# =============================================================================
|
||
|
|
h1 "PHASE 1 — GATHER CONFIGURATION"
|
||
|
|
# =============================================================================
|
||
|
|
|
||
|
|
info "Reading current config from running backend container..."
|
||
|
|
|
||
|
|
get_env() { docker exec "$BACKEND" printenv "$1" 2>/dev/null || echo ""; }
|
||
|
|
|
||
|
|
APP_NAME=$(get_env APP_NAME)
|
||
|
|
CLIENT_NAME=$(get_env CLIENT_NAME)
|
||
|
|
CLIENT_EMAIL=$(get_env CLIENT_EMAIL)
|
||
|
|
CLIENT_TENANT_ID=$(get_env CLIENT_TENANT_ID)
|
||
|
|
ADMIN_NAME=$(get_env ADMIN_NAME)
|
||
|
|
ADMIN_EMAIL=$(get_env ADMIN_EMAIL)
|
||
|
|
CORS_ALLOW_ORIGINS=$(get_env CORS_ALLOW_ORIGINS)
|
||
|
|
ORIGIN=$(docker exec "$FRONTEND" printenv ORIGIN 2>/dev/null || echo "https://clients.lean-101.com.au")
|
||
|
|
PUBLIC_API_BASE_URL=$(docker exec "$FRONTEND" printenv PUBLIC_API_BASE_URL 2>/dev/null || echo "https://clients.lean-101.com.au")
|
||
|
|
PUBLIC_MIX_CALC_HISTORY=$(docker exec "$FRONTEND" printenv PUBLIC_MIX_CALCULATOR_SESSION_HISTORY 2>/dev/null || echo "false")
|
||
|
|
PUBLIC_MIX_CALC_SAVE=$(docker exec "$FRONTEND" printenv PUBLIC_MIX_CALCULATOR_SESSION_SAVE 2>/dev/null || echo "false")
|
||
|
|
CLIENTS_APP_PORT=$(docker inspect "$NGINX" --format='{{range $p, $conf := .NetworkSettings.Ports}}{{if $conf}}{{(index $conf 0).HostPort}}{{end}}{{end}}' 2>/dev/null || echo "8092")
|
||
|
|
|
||
|
|
# Auth secret
|
||
|
|
EXISTING_AUTH_SECRET=$(get_env AUTH_SECRET)
|
||
|
|
|
||
|
|
echo ""
|
||
|
|
echo " Current values extracted from container:"
|
||
|
|
echo " APP_NAME = $APP_NAME"
|
||
|
|
echo " CLIENT_NAME = $CLIENT_NAME"
|
||
|
|
echo " CLIENT_EMAIL = $CLIENT_EMAIL"
|
||
|
|
echo " CLIENT_TENANT_ID = $CLIENT_TENANT_ID"
|
||
|
|
echo " ADMIN_NAME = $ADMIN_NAME"
|
||
|
|
echo " ADMIN_EMAIL = $ADMIN_EMAIL"
|
||
|
|
echo " CORS_ALLOW_ORIGINS = $CORS_ALLOW_ORIGINS"
|
||
|
|
echo " CLIENTS_APP_PORT = $CLIENTS_APP_PORT"
|
||
|
|
|
||
|
|
# Prompt for secrets
|
||
|
|
prompt "Enter PostgreSQL password for user 'lean101' (new — you choose this):"
|
||
|
|
read -r -s POSTGRES_PASSWORD
|
||
|
|
[[ -n "$POSTGRES_PASSWORD" ]] || die "Postgres password cannot be empty."
|
||
|
|
echo ""
|
||
|
|
|
||
|
|
prompt "Enter CLIENT_PASSWORD (current app client password — press Enter to reuse existing):"
|
||
|
|
EXISTING_CLIENT_PW=$(get_env CLIENT_PASSWORD)
|
||
|
|
read -r -s CLIENT_PASSWORD_INPUT
|
||
|
|
echo ""
|
||
|
|
CLIENT_PASSWORD="${CLIENT_PASSWORD_INPUT:-$EXISTING_CLIENT_PW}"
|
||
|
|
[[ -n "$CLIENT_PASSWORD" ]] || die "Client password cannot be empty."
|
||
|
|
|
||
|
|
prompt "Enter ADMIN_PASSWORD (current app admin password — press Enter to reuse existing):"
|
||
|
|
EXISTING_ADMIN_PW=$(get_env ADMIN_PASSWORD)
|
||
|
|
read -r -s ADMIN_PASSWORD_INPUT
|
||
|
|
echo ""
|
||
|
|
ADMIN_PASSWORD="${ADMIN_PASSWORD_INPUT:-$EXISTING_ADMIN_PW}"
|
||
|
|
[[ -n "$ADMIN_PASSWORD" ]] || die "Admin password cannot be empty."
|
||
|
|
|
||
|
|
prompt "Enter AUTH_SECRET (press Enter to reuse existing: ${EXISTING_AUTH_SECRET:0:8}...):"
|
||
|
|
read -r -s AUTH_SECRET_INPUT
|
||
|
|
echo ""
|
||
|
|
AUTH_SECRET="${AUTH_SECRET_INPUT:-$EXISTING_AUTH_SECRET}"
|
||
|
|
[[ -n "$AUTH_SECRET" ]] || die "Auth secret cannot be empty."
|
||
|
|
|
||
|
|
echo ""
|
||
|
|
ok "All credentials collected."
|
||
|
|
|
||
|
|
# =============================================================================
|
||
|
|
h1 "PHASE 2 — BACKUP"
|
||
|
|
# =============================================================================
|
||
|
|
|
||
|
|
info "Creating backup at $BACKUP_DIR ..."
|
||
|
|
mkdir -p "$BACKUP_DIR"
|
||
|
|
|
||
|
|
# Back up SQLite DB from inside the container
|
||
|
|
info "Copying SQLite DB from container..."
|
||
|
|
docker cp "$BACKEND":/data/data_entry_app.db "$BACKUP_DIR/data_entry_app.db"
|
||
|
|
SQLITE_SIZE=$(du -sh "$BACKUP_DIR/data_entry_app.db" | cut -f1)
|
||
|
|
ok "SQLite DB backed up ($SQLITE_SIZE) → $BACKUP_DIR/data_entry_app.db"
|
||
|
|
|
||
|
|
# Back up env and compose files
|
||
|
|
[[ -f .env ]] && cp .env "$BACKUP_DIR/.env.original" && ok ".env backed up"
|
||
|
|
[[ -f .env.alpha ]] && cp .env.alpha "$BACKUP_DIR/.env.alpha.original"
|
||
|
|
cp "$OLD_COMPOSE" "$BACKUP_DIR/$OLD_COMPOSE.original" && ok "$OLD_COMPOSE backed up"
|
||
|
|
|
||
|
|
# Record current container state
|
||
|
|
docker ps -a > "$BACKUP_DIR/containers_before.txt"
|
||
|
|
docker volume ls > "$BACKUP_DIR/volumes_before.txt"
|
||
|
|
ok "Container/volume state recorded"
|
||
|
|
|
||
|
|
# SQLite row counts for comparison later
|
||
|
|
info "Recording SQLite row counts..."
|
||
|
|
docker exec "$BACKEND" python -c "
|
||
|
|
import sqlite3
|
||
|
|
conn = sqlite3.connect('/data/data_entry_app.db')
|
||
|
|
tables = conn.execute(\"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name\").fetchall()
|
||
|
|
print('SQLite row counts:')
|
||
|
|
for (t,) in tables:
|
||
|
|
try:
|
||
|
|
count = conn.execute(f'SELECT COUNT(*) FROM {t}').fetchone()[0]
|
||
|
|
print(f' {t}: {count}')
|
||
|
|
except Exception as e:
|
||
|
|
print(f' {t}: ERROR ({e})')
|
||
|
|
conn.close()
|
||
|
|
" 2>/dev/null | tee "$BACKUP_DIR/sqlite_row_counts.txt"
|
||
|
|
|
||
|
|
echo ""
|
||
|
|
ok "Backup complete at $BACKUP_DIR"
|
||
|
|
|
||
|
|
# =============================================================================
|
||
|
|
h1 "PHASE 3 — WRITE PRODUCTION CONFIG"
|
||
|
|
# =============================================================================
|
||
|
|
|
||
|
|
DATABASE_URL="postgresql+psycopg://lean101:${POSTGRES_PASSWORD}@db:5432/lean101"
|
||
|
|
|
||
|
|
# Write .env.production
|
||
|
|
info "Writing .env.production..."
|
||
|
|
cat > "$WORK_DIR/.env.production" <<ENVEOF
|
||
|
|
APP_NAME=${APP_NAME}
|
||
|
|
DATABASE_URL=${DATABASE_URL}
|
||
|
|
CLIENT_NAME=${CLIENT_NAME}
|
||
|
|
CLIENT_EMAIL=${CLIENT_EMAIL}
|
||
|
|
CLIENT_PASSWORD=${CLIENT_PASSWORD}
|
||
|
|
CLIENT_TENANT_ID=${CLIENT_TENANT_ID}
|
||
|
|
ADMIN_NAME=${ADMIN_NAME}
|
||
|
|
ADMIN_EMAIL=${ADMIN_EMAIL}
|
||
|
|
ADMIN_PASSWORD=${ADMIN_PASSWORD}
|
||
|
|
AUTH_SECRET=${AUTH_SECRET}
|
||
|
|
CORS_ALLOW_ORIGINS=${CORS_ALLOW_ORIGINS}
|
||
|
|
ORIGIN=${ORIGIN}
|
||
|
|
PUBLIC_API_BASE_URL=${PUBLIC_API_BASE_URL}
|
||
|
|
PUBLIC_MIX_CALCULATOR_SESSION_HISTORY=${PUBLIC_MIX_CALC_HISTORY}
|
||
|
|
PUBLIC_MIX_CALCULATOR_SESSION_SAVE=${PUBLIC_MIX_CALC_SAVE}
|
||
|
|
CLIENTS_APP_PORT=${CLIENTS_APP_PORT}
|
||
|
|
POSTGRES_USER=lean101
|
||
|
|
POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
||
|
|
POSTGRES_DB=lean101
|
||
|
|
ENVEOF
|
||
|
|
chmod 600 "$WORK_DIR/.env.production"
|
||
|
|
ok ".env.production written (600 permissions)"
|
||
|
|
|
||
|
|
# Write docker-compose.production.yml if not present
|
||
|
|
if [[ ! -f "$WORK_DIR/$NEW_COMPOSE" ]]; then
|
||
|
|
info "Writing $NEW_COMPOSE..."
|
||
|
|
cat > "$WORK_DIR/$NEW_COMPOSE" <<'COMPOSEEOF'
|
||
|
|
services:
|
||
|
|
db:
|
||
|
|
container_name: lean101-clients-db
|
||
|
|
image: postgres:16-alpine
|
||
|
|
restart: unless-stopped
|
||
|
|
environment:
|
||
|
|
POSTGRES_USER: ${POSTGRES_USER:-lean101}
|
||
|
|
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required}
|
||
|
|
POSTGRES_DB: ${POSTGRES_DB:-lean101}
|
||
|
|
volumes:
|
||
|
|
- clients_db_data:/var/lib/postgresql/data
|
||
|
|
healthcheck:
|
||
|
|
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-lean101} -d ${POSTGRES_DB:-lean101}"]
|
||
|
|
interval: 10s
|
||
|
|
timeout: 5s
|
||
|
|
retries: 10
|
||
|
|
start_period: 15s
|
||
|
|
|
||
|
|
backend:
|
||
|
|
container_name: lean101-clients-backend
|
||
|
|
build:
|
||
|
|
context: .
|
||
|
|
dockerfile: backend/Dockerfile
|
||
|
|
restart: unless-stopped
|
||
|
|
environment:
|
||
|
|
APP_NAME: ${APP_NAME:-Lean 101 Clients API}
|
||
|
|
DATABASE_URL: ${DATABASE_URL:-postgresql+psycopg://${POSTGRES_USER:-lean101}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB:-lean101}}
|
||
|
|
CLIENT_NAME: ${CLIENT_NAME:-Hunter Premium Produce}
|
||
|
|
CLIENT_EMAIL: ${CLIENT_EMAIL:-operator@example.com}
|
||
|
|
CLIENT_PASSWORD: ${CLIENT_PASSWORD:?CLIENT_PASSWORD is required}
|
||
|
|
CLIENT_TENANT_ID: ${CLIENT_TENANT_ID:-hunter-premium-produce}
|
||
|
|
ADMIN_NAME: ${ADMIN_NAME:-Lean 101}
|
||
|
|
ADMIN_EMAIL: ${ADMIN_EMAIL:-admin@lean101.local}
|
||
|
|
ADMIN_PASSWORD: ${ADMIN_PASSWORD:?ADMIN_PASSWORD is required}
|
||
|
|
AUTH_SECRET: ${AUTH_SECRET:?AUTH_SECRET is required}
|
||
|
|
CORS_ALLOW_ORIGINS: ${CORS_ALLOW_ORIGINS:-https://clients.lean-101.com.au}
|
||
|
|
depends_on:
|
||
|
|
db:
|
||
|
|
condition: service_healthy
|
||
|
|
healthcheck:
|
||
|
|
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health')"]
|
||
|
|
interval: 30s
|
||
|
|
timeout: 5s
|
||
|
|
retries: 5
|
||
|
|
start_period: 25s
|
||
|
|
|
||
|
|
frontend:
|
||
|
|
container_name: lean101-clients-frontend
|
||
|
|
build:
|
||
|
|
context: .
|
||
|
|
dockerfile: frontend/Dockerfile
|
||
|
|
restart: unless-stopped
|
||
|
|
environment:
|
||
|
|
ORIGIN: ${ORIGIN:-https://clients.lean-101.com.au}
|
||
|
|
PORT: 3000
|
||
|
|
HOST: 0.0.0.0
|
||
|
|
PUBLIC_API_BASE_URL: ${PUBLIC_API_BASE_URL:-https://clients.lean-101.com.au}
|
||
|
|
INTERNAL_API_BASE_URL: ${INTERNAL_API_BASE_URL:-http://backend:8000}
|
||
|
|
PUBLIC_API_PORT: ${PUBLIC_API_PORT:-8000}
|
||
|
|
PUBLIC_MIX_CALCULATOR_SESSION_HISTORY: ${PUBLIC_MIX_CALCULATOR_SESSION_HISTORY:-false}
|
||
|
|
PUBLIC_MIX_CALCULATOR_SESSION_SAVE: ${PUBLIC_MIX_CALCULATOR_SESSION_SAVE:-false}
|
||
|
|
depends_on:
|
||
|
|
backend:
|
||
|
|
condition: service_healthy
|
||
|
|
|
||
|
|
nginx:
|
||
|
|
container_name: lean101-clients
|
||
|
|
image: nginx:1.27-alpine
|
||
|
|
restart: unless-stopped
|
||
|
|
depends_on:
|
||
|
|
frontend:
|
||
|
|
condition: service_started
|
||
|
|
backend:
|
||
|
|
condition: service_healthy
|
||
|
|
ports:
|
||
|
|
- "${CLIENTS_APP_PORT:-8092}:80"
|
||
|
|
volumes:
|
||
|
|
- ./deploy/nginx/clients.lean-101.conf:/etc/nginx/conf.d/default.conf:ro
|
||
|
|
|
||
|
|
volumes:
|
||
|
|
clients_db_data:
|
||
|
|
COMPOSEEOF
|
||
|
|
ok "$NEW_COMPOSE written"
|
||
|
|
else
|
||
|
|
ok "$NEW_COMPOSE already exists — not overwritten"
|
||
|
|
fi
|
||
|
|
|
||
|
|
# =============================================================================
|
||
|
|
h1 "PHASE 4 — START POSTGRES"
|
||
|
|
# =============================================================================
|
||
|
|
|
||
|
|
info "Starting database service from $NEW_COMPOSE..."
|
||
|
|
docker compose --env-file .env.production -f "$NEW_COMPOSE" up -d db
|
||
|
|
|
||
|
|
info "Waiting for Postgres to be healthy (up to 60s)..."
|
||
|
|
for i in $(seq 1 30); do
|
||
|
|
HEALTH=$(docker inspect --format='{{if .State.Health}}{{.State.Health.Status}}{{else}}starting{{end}}' "$PG_DB_SERVICE" 2>/dev/null || echo missing)
|
||
|
|
if [[ "$HEALTH" == "healthy" ]]; then
|
||
|
|
ok "Postgres is healthy"
|
||
|
|
break
|
||
|
|
fi
|
||
|
|
printf " attempt %d/30: %s\n" "$i" "$HEALTH"
|
||
|
|
sleep 2
|
||
|
|
if [[ $i -eq 30 ]]; then
|
||
|
|
docker logs "$PG_DB_SERVICE" --tail=20
|
||
|
|
die "Postgres did not become healthy in time."
|
||
|
|
fi
|
||
|
|
done
|
||
|
|
|
||
|
|
# =============================================================================
|
||
|
|
h1 "PHASE 5 — BOOTSTRAP POSTGRES SCHEMA"
|
||
|
|
# =============================================================================
|
||
|
|
|
||
|
|
info "Running bootstrap_schema on Postgres via backend container..."
|
||
|
|
|
||
|
|
docker exec \
|
||
|
|
-e DATABASE_URL="$DATABASE_URL" \
|
||
|
|
"$BACKEND" \
|
||
|
|
python -c "
|
||
|
|
import os
|
||
|
|
from sqlalchemy import create_engine
|
||
|
|
from app.db.migrations import bootstrap_schema
|
||
|
|
from app.db.session import Base
|
||
|
|
import app.models # registers all models onto Base.metadata
|
||
|
|
|
||
|
|
url = os.environ['DATABASE_URL']
|
||
|
|
print(f' Connecting to: {url.split(\"@\")[1] if \"@\" in url else url}')
|
||
|
|
pg_engine = create_engine(url)
|
||
|
|
result = bootstrap_schema(pg_engine, Base.metadata)
|
||
|
|
print(f' Result: {result.summary()}')
|
||
|
|
"
|
||
|
|
|
||
|
|
ok "Postgres schema bootstrapped"
|
||
|
|
|
||
|
|
# =============================================================================
|
||
|
|
h1 "PHASE 6 — MIGRATE DATA (SQLite → PostgreSQL)"
|
||
|
|
# =============================================================================
|
||
|
|
|
||
|
|
info "Writing Python migration script..."
|
||
|
|
|
||
|
|
cat > "$MIGRATE_SCRIPT" <<'PYEOF'
|
||
|
|
#!/usr/bin/env python3
|
||
|
|
"""
|
||
|
|
Migrate all rows from SQLite (/data/data_entry_app.db) to PostgreSQL.
|
||
|
|
Runs inside the lean101-clients-backend container.
|
||
|
|
"""
|
||
|
|
import os
|
||
|
|
import sys
|
||
|
|
from sqlalchemy import create_engine, text, inspect
|
||
|
|
|
||
|
|
SQLITE_URL = "sqlite:////data/data_entry_app.db"
|
||
|
|
PG_URL = os.environ["PG_DATABASE_URL"]
|
||
|
|
|
||
|
|
# FK-safe insertion order based on model relationships
|
||
|
|
TABLE_ORDER = [
|
||
|
|
"roles",
|
||
|
|
"permissions",
|
||
|
|
"role_permissions",
|
||
|
|
"users",
|
||
|
|
"client_accounts",
|
||
|
|
"client_users",
|
||
|
|
"client_feature_access",
|
||
|
|
"client_user_module_permissions",
|
||
|
|
"client_access_audit_events",
|
||
|
|
"raw_materials",
|
||
|
|
"raw_material_price_versions",
|
||
|
|
"mixes",
|
||
|
|
"mix_ingredients",
|
||
|
|
"process_cost_rules",
|
||
|
|
"packaging_cost_rules",
|
||
|
|
"freight_cost_rules",
|
||
|
|
"products",
|
||
|
|
"scenarios",
|
||
|
|
"costing_results",
|
||
|
|
"mix_calculator_sessions",
|
||
|
|
"mix_calculator_session_lines",
|
||
|
|
]
|
||
|
|
|
||
|
|
def migrate():
|
||
|
|
from app.db.session import Base
|
||
|
|
import app.models # registers all models onto Base.metadata
|
||
|
|
|
||
|
|
src = create_engine(SQLITE_URL, connect_args={"check_same_thread": False})
|
||
|
|
dst = create_engine(PG_URL)
|
||
|
|
|
||
|
|
sqlite_tables = set(inspect(src).get_table_names())
|
||
|
|
print(f"\nFound {len(sqlite_tables)} tables in SQLite: {', '.join(sorted(sqlite_tables))}\n")
|
||
|
|
|
||
|
|
totals = {}
|
||
|
|
|
||
|
|
with dst.begin() as dst_conn:
|
||
|
|
# Disable FK checks for bulk insert
|
||
|
|
dst_conn.execute(text("SET session_replication_role = 'replica'"))
|
||
|
|
|
||
|
|
for table_name in TABLE_ORDER:
|
||
|
|
if table_name not in sqlite_tables:
|
||
|
|
print(f" SKIP {table_name:<45} (not in SQLite)")
|
||
|
|
continue
|
||
|
|
|
||
|
|
table = Base.metadata.tables.get(table_name)
|
||
|
|
if table is None:
|
||
|
|
print(f" SKIP {table_name:<45} (not in SQLAlchemy metadata)")
|
||
|
|
continue
|
||
|
|
|
||
|
|
try:
|
||
|
|
with src.connect() as src_conn:
|
||
|
|
rows = src_conn.execute(table.select()).fetchall()
|
||
|
|
except Exception as e:
|
||
|
|
print(f" ERROR {table_name:<45} SQLite read failed: {e}")
|
||
|
|
continue
|
||
|
|
|
||
|
|
if not rows:
|
||
|
|
print(f" SKIP {table_name:<45} (0 rows)")
|
||
|
|
continue
|
||
|
|
|
||
|
|
try:
|
||
|
|
dst_conn.execute(table.insert(), [dict(row._mapping) for row in rows])
|
||
|
|
print(f" OK {table_name:<45} {len(rows):>6} rows")
|
||
|
|
totals[table_name] = len(rows)
|
||
|
|
except Exception as e:
|
||
|
|
print(f" ERROR {table_name:<45} Insert failed: {e}")
|
||
|
|
sys.exit(1)
|
||
|
|
|
||
|
|
# Re-enable FK checks
|
||
|
|
dst_conn.execute(text("SET session_replication_role = 'origin'"))
|
||
|
|
|
||
|
|
# Reset auto-increment sequences
|
||
|
|
print("\n Resetting sequences...")
|
||
|
|
with dst.begin() as conn:
|
||
|
|
for table_name in TABLE_ORDER:
|
||
|
|
try:
|
||
|
|
conn.execute(text(
|
||
|
|
f"SELECT setval("
|
||
|
|
f" pg_get_serial_sequence('{table_name}', 'id'),"
|
||
|
|
f" COALESCE((SELECT MAX(id) FROM {table_name}), 1)"
|
||
|
|
f")"
|
||
|
|
))
|
||
|
|
except Exception:
|
||
|
|
pass
|
||
|
|
|
||
|
|
print(f"\n Migration complete. {sum(totals.values())} rows across {len(totals)} tables.")
|
||
|
|
return totals
|
||
|
|
|
||
|
|
if __name__ == "__main__":
|
||
|
|
migrate()
|
||
|
|
PYEOF
|
||
|
|
|
||
|
|
info "Copying migration script into backend container..."
|
||
|
|
docker cp "$MIGRATE_SCRIPT" "$BACKEND:$MIGRATE_SCRIPT"
|
||
|
|
|
||
|
|
info "Running migration..."
|
||
|
|
docker exec \
|
||
|
|
-e DATABASE_URL="$DATABASE_URL" \
|
||
|
|
-e PG_DATABASE_URL="$DATABASE_URL" \
|
||
|
|
"$BACKEND" \
|
||
|
|
python "$MIGRATE_SCRIPT"
|
||
|
|
|
||
|
|
ok "Data migration complete"
|
||
|
|
|
||
|
|
# =============================================================================
|
||
|
|
h1 "PHASE 7 — VERIFY MIGRATION"
|
||
|
|
# =============================================================================
|
||
|
|
|
||
|
|
info "Postgres row counts:"
|
||
|
|
docker exec "$PG_DB_SERVICE" psql -U lean101 -d lean101 -c "
|
||
|
|
SELECT
|
||
|
|
relname AS table_name,
|
||
|
|
n_live_tup AS row_count
|
||
|
|
FROM pg_stat_user_tables
|
||
|
|
ORDER BY n_live_tup DESC;
|
||
|
|
" | sed 's/^/ /'
|
||
|
|
|
||
|
|
echo ""
|
||
|
|
echo " SQLite row counts (from backup):"
|
||
|
|
cat "$BACKUP_DIR/sqlite_row_counts.txt" | sed 's/^/ /'
|
||
|
|
|
||
|
|
echo ""
|
||
|
|
prompt "Review the counts above. Do they match? Type 'yes' to proceed with cutover, anything else to abort:"
|
||
|
|
read -r CONFIRM
|
||
|
|
if [[ "$CONFIRM" != "yes" ]]; then
|
||
|
|
echo ""
|
||
|
|
warn "Cutover aborted by user. Old SQLite stack is still running."
|
||
|
|
warn "Postgres is running but old stack is untouched."
|
||
|
|
echo ""
|
||
|
|
echo " To retry from data migration step:"
|
||
|
|
echo " bash $0"
|
||
|
|
echo ""
|
||
|
|
echo " To tear down the Postgres container:"
|
||
|
|
echo " docker compose -f $NEW_COMPOSE --env-file .env.production down"
|
||
|
|
exit 0
|
||
|
|
fi
|
||
|
|
|
||
|
|
# =============================================================================
|
||
|
|
h1 "PHASE 8 — CUTOVER (Stop SQLite stack, Start Postgres stack)"
|
||
|
|
# =============================================================================
|
||
|
|
|
||
|
|
info "Stopping old SQLite stack (backend, frontend, nginx)..."
|
||
|
|
docker stop "$BACKEND" "$FRONTEND" "$NGINX" 2>/dev/null || true
|
||
|
|
docker rm "$BACKEND" "$FRONTEND" "$NGINX" 2>/dev/null || true
|
||
|
|
ok "Old containers stopped and removed"
|
||
|
|
|
||
|
|
info "Starting full production stack from $NEW_COMPOSE..."
|
||
|
|
docker compose --env-file .env.production -f "$NEW_COMPOSE" up -d --build
|
||
|
|
|
||
|
|
info "Waiting for production backend to become healthy (up to 90s)..."
|
||
|
|
for i in $(seq 1 30); do
|
||
|
|
HEALTH=$(docker inspect --format='{{if .State.Health}}{{.State.Health.Status}}{{else}}starting{{end}}' "$BACKEND" 2>/dev/null || echo missing)
|
||
|
|
if [[ "$HEALTH" == "healthy" ]]; then
|
||
|
|
ok "Backend healthy"
|
||
|
|
break
|
||
|
|
fi
|
||
|
|
printf " attempt %d/30: %s\n" "$i" "$HEALTH"
|
||
|
|
sleep 3
|
||
|
|
if [[ $i -eq 30 ]]; then
|
||
|
|
docker logs "$BACKEND" --tail=30
|
||
|
|
die "Backend did not become healthy. Check logs above. Run rollback if needed."
|
||
|
|
fi
|
||
|
|
done
|
||
|
|
|
||
|
|
# =============================================================================
|
||
|
|
h1 "PHASE 9 — FINAL VERIFICATION"
|
||
|
|
# =============================================================================
|
||
|
|
|
||
|
|
info "Stack status:"
|
||
|
|
docker compose --env-file .env.production -f "$NEW_COMPOSE" ps | sed 's/^/ /'
|
||
|
|
|
||
|
|
echo ""
|
||
|
|
info "Backend health endpoint:"
|
||
|
|
docker exec "$BACKEND" python -c \
|
||
|
|
"import urllib.request; r=urllib.request.urlopen('http://127.0.0.1:8000/health'); print(' ', r.read().decode())"
|
||
|
|
|
||
|
|
echo ""
|
||
|
|
info "DATABASE_URL in production backend:"
|
||
|
|
docker exec "$BACKEND" printenv DATABASE_URL | \
|
||
|
|
sed 's|://[^:]*:[^@]*@|://***:***@|g' | sed 's/^/ /'
|
||
|
|
|
||
|
|
echo ""
|
||
|
|
info "Final Postgres table row counts:"
|
||
|
|
docker exec "$PG_DB_SERVICE" psql -U lean101 -d lean101 -c "
|
||
|
|
SELECT relname AS table_name, n_live_tup AS rows
|
||
|
|
FROM pg_stat_user_tables
|
||
|
|
ORDER BY n_live_tup DESC;
|
||
|
|
" | sed 's/^/ /'
|
||
|
|
|
||
|
|
# Clean up migration script from container
|
||
|
|
docker exec "$BACKEND" rm -f "$MIGRATE_SCRIPT" 2>/dev/null || true
|
||
|
|
rm -f "$MIGRATE_SCRIPT"
|
||
|
|
|
||
|
|
# Remove the error trap since we succeeded
|
||
|
|
trap - ERR
|
||
|
|
|
||
|
|
sep
|
||
|
|
echo -e "${GREEN}${BOLD} Migration complete!${RESET}"
|
||
|
|
echo ""
|
||
|
|
echo -e " ${BOLD}Production is now running on PostgreSQL.${RESET}"
|
||
|
|
echo ""
|
||
|
|
echo " Backup preserved at: $BACKUP_DIR"
|
||
|
|
echo " SQLite volume (lean101-clients_clients_app_data) was NOT deleted."
|
||
|
|
echo " Once you're confident in production, you can remove it with:"
|
||
|
|
echo " docker volume rm lean101-clients_clients_app_data"
|
||
|
|
echo ""
|
||
|
|
echo " Next steps:"
|
||
|
|
echo " 1. Verify the app at https://clients.lean-101.com.au"
|
||
|
|
echo " 2. Bootstrap git repo: ./deploy/Deploy.ps1 -RemoteHost <ip> -Bootstrap -RepoUrl <url>"
|
||
|
|
echo " 3. Future deploys use: ./deploy/Deploy.ps1 -RemoteHost <ip>"
|
||
|
|
sep
|
||
|
|
echo ""
|