Deployment Script, Postgres migration, UX improvements
This commit is contained in:
+102
-139
@@ -1,219 +1,182 @@
|
||||
<#
|
||||
.SYNOPSIS
|
||||
Build and deploy the Lean 101 Clients app to a Digital Ocean droplet over SSH.
|
||||
Deploy the Lean 101 Clients app to a Digital Ocean droplet over SSH.
|
||||
|
||||
.DESCRIPTION
|
||||
Runs `docker compose` against `docker-compose.production.yml` on the remote
|
||||
host. The same script handles first-time bootstrap and subsequent updates:
|
||||
Tars the local source tree, uploads it to the droplet, and runs
|
||||
docker compose up --build. No git required on the server.
|
||||
|
||||
* On bootstrap (-Bootstrap): creates the remote directory, clones the
|
||||
repo (or updates if already present), uploads the local env file, and
|
||||
brings the stack up with `docker compose ... up -d --build`.
|
||||
|
||||
* On update (default): SSHes to the host, fetches the requested branch,
|
||||
uploads a refreshed env file (if changed), then runs
|
||||
`docker compose ... up -d --build` followed by a healthcheck.
|
||||
|
||||
The script never executes destructive commands without asking, except for
|
||||
recreating containers (which preserves the named Postgres volume).
|
||||
The same script handles first-time setup and subsequent updates.
|
||||
|
||||
.PARAMETER RemoteHost
|
||||
Hostname or IP of the Digital Ocean droplet. Required.
|
||||
|
||||
.PARAMETER RemoteUser
|
||||
SSH user on the droplet. Defaults to `root`.
|
||||
SSH user. Defaults to 'root'.
|
||||
|
||||
.PARAMETER RemotePath
|
||||
Absolute path on the droplet where the repo lives. Defaults to
|
||||
`/srv/lean101-clients`.
|
||||
|
||||
.PARAMETER Branch
|
||||
Git branch to deploy. Defaults to `main`.
|
||||
|
||||
.PARAMETER RepoUrl
|
||||
Git URL used during bootstrap when the remote directory is empty.
|
||||
Required only with -Bootstrap.
|
||||
Absolute path on the droplet. Defaults to '/srv/lean101-clients'.
|
||||
|
||||
.PARAMETER EnvFile
|
||||
Local path to the env file that should land on the droplet as
|
||||
`<RemotePath>/.env.production`. Defaults to `.env.production`.
|
||||
Local path to the production env file. Defaults to '.env.production'.
|
||||
|
||||
.PARAMETER SshKey
|
||||
Optional path to an SSH private key. If omitted, the script relies on
|
||||
ssh-agent / default keys.
|
||||
Optional path to an SSH private key.
|
||||
|
||||
.PARAMETER ComposeFile
|
||||
Compose file name on the remote host. Defaults to
|
||||
`docker-compose.production.yml`.
|
||||
|
||||
.PARAMETER Bootstrap
|
||||
Run first-time setup (clone, upload env, build, up).
|
||||
|
||||
.PARAMETER SkipBuild
|
||||
Pass `--no-build` to docker compose (use when only env changed).
|
||||
Compose file name on the remote host. Defaults to 'docker-compose.production.yml'.
|
||||
|
||||
.PARAMETER Seed
|
||||
Run `python -m app.seed` inside the backend container after the stack is up.
|
||||
Run 'python -m app.seed' inside the backend container after the stack is up.
|
||||
|
||||
.PARAMETER Logs
|
||||
After deploy, tail logs for ~20 lines so you can verify the stack came up.
|
||||
Tail logs for ~60 lines after deploy to verify the stack came up.
|
||||
|
||||
.PARAMETER SkipBuild
|
||||
Pass --no-build to docker compose (use when only env changed).
|
||||
|
||||
.EXAMPLE
|
||||
./deploy/Deploy.ps1 -RemoteHost 203.0.113.10 -Bootstrap -RepoUrl git@github.com:ponzischeme89/data-entry-app.git
|
||||
./deploy/Deploy.ps1 -RemoteHost 209.38.24.231
|
||||
|
||||
.EXAMPLE
|
||||
./deploy/Deploy.ps1 -RemoteHost 203.0.113.10
|
||||
./deploy/Deploy.ps1 -RemoteHost 209.38.24.231 -Seed -Logs
|
||||
#>
|
||||
|
||||
[CmdletBinding()]
|
||||
param(
|
||||
[Parameter(Mandatory = $true)] [string] $RemoteHost,
|
||||
[string] $RemoteUser = "root",
|
||||
[string] $RemotePath = "/srv/lean101-clients",
|
||||
[string] $Branch = "main",
|
||||
[string] $RepoUrl,
|
||||
[string] $EnvFile = ".env.production",
|
||||
[string] $RemoteUser = "root",
|
||||
[string] $RemotePath = "/srv/lean101-clients",
|
||||
[string] $EnvFile = ".env.production",
|
||||
[string] $SshKey,
|
||||
[string] $ComposeFile = "docker-compose.production.yml",
|
||||
[switch] $Bootstrap,
|
||||
[switch] $SkipBuild,
|
||||
[string] $ComposeFile = "docker-compose.production.yml",
|
||||
[switch] $Seed,
|
||||
[switch] $Logs
|
||||
[switch] $Logs,
|
||||
[switch] $SkipBuild
|
||||
)
|
||||
|
||||
$ErrorActionPreference = "Stop"
|
||||
Set-StrictMode -Version Latest
|
||||
|
||||
function Write-Step($message) {
|
||||
Write-Host "==> $message" -ForegroundColor Cyan
|
||||
# ── Helpers ───────────────────────────────────────────────────────────────────
|
||||
function Write-Step($msg) { Write-Host "==> $msg" -ForegroundColor Cyan }
|
||||
function Write-Warn($msg) { Write-Host "!! $msg" -ForegroundColor Yellow }
|
||||
|
||||
function Get-RepoRoot {
|
||||
$dir = Split-Path -Parent $PSScriptRoot
|
||||
if (-not $dir) { $dir = (Get-Location).Path }
|
||||
return $dir
|
||||
}
|
||||
|
||||
function Write-Warn($message) {
|
||||
Write-Host "!! $message" -ForegroundColor Yellow
|
||||
$RepoRoot = Get-RepoRoot
|
||||
$SshTarget = "$RemoteUser@$RemoteHost"
|
||||
$SshOpts = @("-o", "StrictHostKeyChecking=accept-new", "-o", "BatchMode=no")
|
||||
if ($SshKey) { $SshOpts += @("-i", $SshKey) }
|
||||
|
||||
function Invoke-Ssh([string] $cmd) {
|
||||
& ssh @SshOpts $SshTarget $cmd
|
||||
if ($LASTEXITCODE -ne 0) { throw "Remote command failed (exit $LASTEXITCODE): $cmd" }
|
||||
}
|
||||
|
||||
function Resolve-RepoRoot {
|
||||
$scriptDir = Split-Path -Parent $MyInvocation.ScriptName
|
||||
if (-not $scriptDir) { $scriptDir = $PSScriptRoot }
|
||||
return (Resolve-Path (Join-Path $scriptDir "..")).Path
|
||||
function Invoke-Scp([string] $local, [string] $remote) {
|
||||
& scp @SshOpts $local "${SshTarget}:${remote}"
|
||||
if ($LASTEXITCODE -ne 0) { throw "scp failed: $local -> $remote" }
|
||||
}
|
||||
|
||||
$RepoRoot = Resolve-RepoRoot
|
||||
# ── Resolve paths ─────────────────────────────────────────────────────────────
|
||||
Push-Location $RepoRoot
|
||||
try {
|
||||
$envPath = if ([System.IO.Path]::IsPathRooted($EnvFile)) { $EnvFile } else { Join-Path $RepoRoot $EnvFile }
|
||||
if (-not (Test-Path $envPath)) {
|
||||
throw "Env file not found at '$envPath'. Copy .env.production.example to $EnvFile and fill in production secrets first."
|
||||
$EnvPath = if ([System.IO.Path]::IsPathRooted($EnvFile)) { $EnvFile } else { Join-Path $RepoRoot $EnvFile }
|
||||
if (-not (Test-Path $EnvPath)) {
|
||||
throw "Env file not found at '$EnvPath'. Copy .env.production.example and fill in secrets."
|
||||
}
|
||||
|
||||
$sshTarget = "$RemoteUser@$RemoteHost"
|
||||
$sshOpts = @("-o", "StrictHostKeyChecking=accept-new")
|
||||
if ($SshKey) { $sshOpts += @("-i", $SshKey) }
|
||||
|
||||
function Invoke-Ssh([string] $remoteCommand) {
|
||||
& ssh @sshOpts $sshTarget $remoteCommand
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
throw "Remote command failed (exit $LASTEXITCODE): $remoteCommand"
|
||||
}
|
||||
}
|
||||
|
||||
function Invoke-Scp([string] $localPath, [string] $remoteDest) {
|
||||
& scp @sshOpts $localPath "$($sshTarget):$remoteDest"
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
throw "scp failed for $localPath -> $remoteDest"
|
||||
}
|
||||
}
|
||||
|
||||
Write-Step "Verifying SSH connectivity to $sshTarget"
|
||||
# ── Connectivity check ──────────────────────────────────────────────────────
|
||||
Write-Step "Checking SSH connectivity to $SshTarget"
|
||||
Invoke-Ssh "echo connected as `$(whoami) on `$(hostname)"
|
||||
|
||||
Write-Step "Verifying Docker is installed on the droplet"
|
||||
Invoke-Ssh "command -v docker >/dev/null 2>&1 && docker --version && docker compose version"
|
||||
# ── Package source files ────────────────────────────────────────────────────
|
||||
Write-Step "Packaging source files (excluding node_modules, caches, etc.)"
|
||||
|
||||
if ($Bootstrap) {
|
||||
if (-not $RepoUrl) {
|
||||
throw "-RepoUrl is required when using -Bootstrap."
|
||||
}
|
||||
Write-Step "Bootstrapping $RemotePath from $RepoUrl ($Branch)"
|
||||
$bootstrapScript = @"
|
||||
set -euo pipefail
|
||||
mkdir -p '$RemotePath'
|
||||
cd '$RemotePath'
|
||||
if [ ! -d .git ]; then
|
||||
git clone --branch '$Branch' '$RepoUrl' .
|
||||
else
|
||||
git remote set-url origin '$RepoUrl'
|
||||
git fetch origin '$Branch'
|
||||
git checkout '$Branch'
|
||||
git reset --hard 'origin/$Branch'
|
||||
fi
|
||||
"@
|
||||
Invoke-Ssh $bootstrapScript
|
||||
} else {
|
||||
Write-Step "Updating $RemotePath to latest $Branch"
|
||||
$updateScript = @"
|
||||
set -euo pipefail
|
||||
cd '$RemotePath'
|
||||
git fetch origin '$Branch'
|
||||
git checkout '$Branch'
|
||||
git reset --hard 'origin/$Branch'
|
||||
"@
|
||||
Invoke-Ssh $updateScript
|
||||
}
|
||||
$TarFile = Join-Path $env:TEMP "lean101-deploy-$(Get-Date -Format 'yyyyMMdd-HHmmss').tar.gz"
|
||||
|
||||
Write-Step "Uploading $EnvFile to $RemotePath/.env.production"
|
||||
Invoke-Scp $envPath "$RemotePath/.env.production"
|
||||
$excludes = @(
|
||||
"--exclude=./node_modules",
|
||||
"--exclude=./frontend/node_modules",
|
||||
"--exclude=./frontend/.svelte-kit",
|
||||
"--exclude=./frontend/build",
|
||||
"--exclude=./.git",
|
||||
"--exclude=./__pycache__",
|
||||
"--exclude=./backend/__pycache__",
|
||||
"--exclude=./backend/app/__pycache__",
|
||||
"--exclude=./**/__pycache__",
|
||||
"--exclude=./*.pyc",
|
||||
"--exclude=./.env",
|
||||
"--exclude=./.env.production",
|
||||
"--exclude=./.env.alpha",
|
||||
"--exclude=./data_entry_app.db",
|
||||
"--exclude=./*.db"
|
||||
)
|
||||
|
||||
& tar -czf $TarFile @excludes -C $RepoRoot .
|
||||
if ($LASTEXITCODE -ne 0) { throw "tar failed" }
|
||||
|
||||
$TarSize = [math]::Round((Get-Item $TarFile).Length / 1MB, 1)
|
||||
Write-Host " Archive: $TarFile ($TarSize MB)"
|
||||
|
||||
# ── Upload env file ─────────────────────────────────────────────────────────
|
||||
Write-Step "Uploading env file"
|
||||
Invoke-Scp $EnvPath "$RemotePath/.env.production"
|
||||
Invoke-Ssh "chmod 600 '$RemotePath/.env.production'"
|
||||
|
||||
$composeArgs = @(
|
||||
"--env-file", ".env.production",
|
||||
"-f", $ComposeFile
|
||||
) -join " "
|
||||
# ── Upload and extract source ────────────────────────────────────────────────
|
||||
Write-Step "Uploading source archive"
|
||||
Invoke-Scp $TarFile "/tmp/lean101-deploy.tar.gz"
|
||||
Remove-Item $TarFile -Force
|
||||
|
||||
$buildFlag = if ($SkipBuild) { "" } else { "--build" }
|
||||
Write-Step "Extracting on server"
|
||||
Invoke-Ssh "mkdir -p '$RemotePath' && tar -xzf /tmp/lean101-deploy.tar.gz -C '$RemotePath' && rm /tmp/lean101-deploy.tar.gz"
|
||||
|
||||
Write-Step "Pulling base images"
|
||||
Invoke-Ssh "cd '$RemotePath' && docker compose $composeArgs pull --ignore-pull-failures || true"
|
||||
# ── Docker compose up ───────────────────────────────────────────────────────
|
||||
$ComposeArgs = "--env-file .env.production -f $ComposeFile"
|
||||
$BuildFlag = if ($SkipBuild) { "--no-build" } else { "--build" }
|
||||
|
||||
Write-Step "Bringing the stack up (build=$([bool](-not $SkipBuild)))"
|
||||
Invoke-Ssh "cd '$RemotePath' && docker compose $composeArgs up -d $buildFlag --remove-orphans"
|
||||
Write-Step "Bringing stack up (build=$(-not $SkipBuild))"
|
||||
Invoke-Ssh "cd '$RemotePath' && docker compose $ComposeArgs up -d $BuildFlag --remove-orphans"
|
||||
|
||||
Write-Step "Waiting for backend healthcheck"
|
||||
# ── Health check ────────────────────────────────────────────────────────────
|
||||
Write-Step "Waiting for backend health check"
|
||||
$healthScript = @"
|
||||
set -e
|
||||
cd '$RemotePath'
|
||||
for attempt in `$(seq 1 30); do
|
||||
for i in `$(seq 1 30); do
|
||||
status=`$(docker inspect --format='{{if .State.Health}}{{.State.Health.Status}}{{else}}{{.State.Status}}{{end}}' lean101-clients-backend 2>/dev/null || echo missing)
|
||||
case "`$status" in
|
||||
healthy|running)
|
||||
echo "backend is `$status"
|
||||
exit 0;;
|
||||
*)
|
||||
printf '.'
|
||||
sleep 4;;
|
||||
healthy|running) echo "backend is `$status"; exit 0 ;;
|
||||
*) printf '.'; sleep 4 ;;
|
||||
esac
|
||||
done
|
||||
echo
|
||||
echo 'backend did not become healthy in time' >&2
|
||||
docker compose $composeArgs ps backend
|
||||
exit 1
|
||||
echo; echo 'backend did not become healthy in time' >&2; exit 1
|
||||
"@
|
||||
Invoke-Ssh $healthScript
|
||||
|
||||
# ── Optional seed ───────────────────────────────────────────────────────────
|
||||
if ($Seed) {
|
||||
Write-Step "Seeding reference data"
|
||||
Invoke-Ssh "cd '$RemotePath' && docker compose $composeArgs exec -T backend python -m app.seed"
|
||||
Invoke-Ssh "cd '$RemotePath' && docker compose $ComposeArgs exec -T backend python -m app.seed"
|
||||
}
|
||||
|
||||
Write-Step "Final container status"
|
||||
Invoke-Ssh "cd '$RemotePath' && docker compose $composeArgs ps"
|
||||
# ── Final status ────────────────────────────────────────────────────────────
|
||||
Write-Step "Stack status"
|
||||
Invoke-Ssh "cd '$RemotePath' && docker compose $ComposeArgs ps"
|
||||
|
||||
if ($Logs) {
|
||||
Write-Step "Recent logs (last 60 lines)"
|
||||
Invoke-Ssh "cd '$RemotePath' && docker compose $composeArgs logs --tail=60"
|
||||
Invoke-Ssh "cd '$RemotePath' && docker compose $ComposeArgs logs --tail=60"
|
||||
}
|
||||
|
||||
Write-Host "Deployment complete." -ForegroundColor Green
|
||||
Write-Host ""
|
||||
Write-Host "Deployment complete -> https://clients.lean-101.com.au" -ForegroundColor Green
|
||||
}
|
||||
finally {
|
||||
Pop-Location
|
||||
|
||||
@@ -0,0 +1,598 @@
|
||||
#!/usr/bin/env bash
|
||||
# =============================================================================
|
||||
# migrate-to-postgres.sh
|
||||
#
|
||||
# Migrates the lean101-clients production stack from SQLite to PostgreSQL.
|
||||
# Safe: backs up everything before touching anything. Old stack stays live
|
||||
# until you confirm migration succeeded.
|
||||
#
|
||||
# HOW TO USE:
|
||||
# 1. Copy this file to the server:
|
||||
# scp deploy/migrate-to-postgres.sh root@<host>:/srv/lean101-clients/deploy/
|
||||
#
|
||||
# 2. SSH in and run it directly (must be interactive — not piped):
|
||||
# ssh root@<host>
|
||||
# bash /srv/lean101-clients/deploy/migrate-to-postgres.sh
|
||||
# =============================================================================
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# ── Config ────────────────────────────────────────────────────────────────────
|
||||
WORK_DIR="/srv/lean101-clients"
|
||||
BACKEND="lean101-clients-backend"
|
||||
FRONTEND="lean101-clients-frontend"
|
||||
NGINX="lean101-clients"
|
||||
OLD_COMPOSE="docker-compose.yml"
|
||||
NEW_COMPOSE="docker-compose.production.yml"
|
||||
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
|
||||
BACKUP_DIR="/srv/lean101-clients-backup-$TIMESTAMP"
|
||||
MIGRATE_SCRIPT="/tmp/lean101_migrate_data.py"
|
||||
PG_DB_SERVICE="lean101-clients-db"
|
||||
PG_IMAGE="postgres:16-alpine"
|
||||
|
||||
# ── Colour helpers ────────────────────────────────────────────────────────────
|
||||
RED='\033[0;31m'; YELLOW='\033[1;33m'; GREEN='\033[0;32m'
|
||||
CYAN='\033[0;36m'; BOLD='\033[1m'; RESET='\033[0m'
|
||||
|
||||
sep() { echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"; }
|
||||
h1() { sep; echo -e "${BOLD}${CYAN} $1${RESET}"; sep; }
|
||||
ok() { echo -e " ${GREEN}✔${RESET} $1"; }
|
||||
warn() { echo -e " ${YELLOW}⚠${RESET} $1"; }
|
||||
die() { echo -e " ${RED}✘ FATAL: $1${RESET}" >&2; exit 1; }
|
||||
info() { echo -e " ${CYAN}→${RESET} $1"; }
|
||||
prompt() { echo -e "\n ${BOLD}$1${RESET}"; }
|
||||
|
||||
# ── Rollback instructions ─────────────────────────────────────────────────────
|
||||
print_rollback() {
|
||||
echo ""
|
||||
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
|
||||
echo -e "${YELLOW} ROLLBACK — to restore the original SQLite stack:${RESET}"
|
||||
echo ""
|
||||
echo " cd $WORK_DIR"
|
||||
echo " docker compose -f $NEW_COMPOSE --env-file .env.production down 2>/dev/null || true"
|
||||
echo " docker compose -f $OLD_COMPOSE --env-file .env up -d"
|
||||
echo ""
|
||||
echo " SQLite backup is at: $BACKUP_DIR/data_entry_app.db"
|
||||
echo -e "${YELLOW}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"
|
||||
echo ""
|
||||
}
|
||||
trap print_rollback ERR
|
||||
|
||||
# =============================================================================
|
||||
h1 "PHASE 0 — PRE-FLIGHT"
|
||||
# =============================================================================
|
||||
|
||||
[[ $EUID -eq 0 ]] || die "Run this script as root."
|
||||
[[ -t 0 ]] || die "This script must be run interactively (not piped via stdin)."
|
||||
|
||||
cd "$WORK_DIR" || die "Cannot cd to $WORK_DIR"
|
||||
|
||||
# Check required containers are running
|
||||
for C in "$BACKEND" "$FRONTEND" "$NGINX"; do
|
||||
STATUS=$(docker inspect --format='{{.State.Status}}' "$C" 2>/dev/null || echo missing)
|
||||
[[ "$STATUS" == "running" ]] || die "Container $C is not running (status: $STATUS). Cannot migrate."
|
||||
ok "$C is running"
|
||||
done
|
||||
|
||||
# Confirm SQLite DB is reachable inside backend container
|
||||
SQLITE_EXISTS=$(docker exec "$BACKEND" python -c \
|
||||
"import os; print('yes' if os.path.exists('/data/data_entry_app.db') else 'no')" 2>/dev/null || echo no)
|
||||
[[ "$SQLITE_EXISTS" == "yes" ]] || die "SQLite DB not found at /data/data_entry_app.db inside $BACKEND"
|
||||
ok "SQLite DB reachable inside backend container"
|
||||
|
||||
# Check production compose file
|
||||
[[ -f "$WORK_DIR/$NEW_COMPOSE" ]] && ok "$NEW_COMPOSE already present" || warn "$NEW_COMPOSE not present — will write it"
|
||||
|
||||
echo ""
|
||||
echo -e " ${BOLD}Everything looks good. Starting migration wizard.${RESET}"
|
||||
echo ""
|
||||
|
||||
# =============================================================================
|
||||
h1 "PHASE 1 — GATHER CONFIGURATION"
|
||||
# =============================================================================
|
||||
|
||||
info "Reading current config from running backend container..."
|
||||
|
||||
get_env() { docker exec "$BACKEND" printenv "$1" 2>/dev/null || echo ""; }
|
||||
|
||||
APP_NAME=$(get_env APP_NAME)
|
||||
CLIENT_NAME=$(get_env CLIENT_NAME)
|
||||
CLIENT_EMAIL=$(get_env CLIENT_EMAIL)
|
||||
CLIENT_TENANT_ID=$(get_env CLIENT_TENANT_ID)
|
||||
ADMIN_NAME=$(get_env ADMIN_NAME)
|
||||
ADMIN_EMAIL=$(get_env ADMIN_EMAIL)
|
||||
CORS_ALLOW_ORIGINS=$(get_env CORS_ALLOW_ORIGINS)
|
||||
ORIGIN=$(docker exec "$FRONTEND" printenv ORIGIN 2>/dev/null || echo "https://clients.lean-101.com.au")
|
||||
PUBLIC_API_BASE_URL=$(docker exec "$FRONTEND" printenv PUBLIC_API_BASE_URL 2>/dev/null || echo "https://clients.lean-101.com.au")
|
||||
PUBLIC_MIX_CALC_HISTORY=$(docker exec "$FRONTEND" printenv PUBLIC_MIX_CALCULATOR_SESSION_HISTORY 2>/dev/null || echo "false")
|
||||
PUBLIC_MIX_CALC_SAVE=$(docker exec "$FRONTEND" printenv PUBLIC_MIX_CALCULATOR_SESSION_SAVE 2>/dev/null || echo "false")
|
||||
CLIENTS_APP_PORT=$(docker inspect "$NGINX" --format='{{range $p, $conf := .NetworkSettings.Ports}}{{if $conf}}{{(index $conf 0).HostPort}}{{end}}{{end}}' 2>/dev/null || echo "8092")
|
||||
|
||||
# Auth secret
|
||||
EXISTING_AUTH_SECRET=$(get_env AUTH_SECRET)
|
||||
|
||||
echo ""
|
||||
echo " Current values extracted from container:"
|
||||
echo " APP_NAME = $APP_NAME"
|
||||
echo " CLIENT_NAME = $CLIENT_NAME"
|
||||
echo " CLIENT_EMAIL = $CLIENT_EMAIL"
|
||||
echo " CLIENT_TENANT_ID = $CLIENT_TENANT_ID"
|
||||
echo " ADMIN_NAME = $ADMIN_NAME"
|
||||
echo " ADMIN_EMAIL = $ADMIN_EMAIL"
|
||||
echo " CORS_ALLOW_ORIGINS = $CORS_ALLOW_ORIGINS"
|
||||
echo " CLIENTS_APP_PORT = $CLIENTS_APP_PORT"
|
||||
|
||||
# Prompt for secrets
|
||||
prompt "Enter PostgreSQL password for user 'lean101' (new — you choose this):"
|
||||
read -r -s POSTGRES_PASSWORD
|
||||
[[ -n "$POSTGRES_PASSWORD" ]] || die "Postgres password cannot be empty."
|
||||
echo ""
|
||||
|
||||
prompt "Enter CLIENT_PASSWORD (current app client password — press Enter to reuse existing):"
|
||||
EXISTING_CLIENT_PW=$(get_env CLIENT_PASSWORD)
|
||||
read -r -s CLIENT_PASSWORD_INPUT
|
||||
echo ""
|
||||
CLIENT_PASSWORD="${CLIENT_PASSWORD_INPUT:-$EXISTING_CLIENT_PW}"
|
||||
[[ -n "$CLIENT_PASSWORD" ]] || die "Client password cannot be empty."
|
||||
|
||||
prompt "Enter ADMIN_PASSWORD (current app admin password — press Enter to reuse existing):"
|
||||
EXISTING_ADMIN_PW=$(get_env ADMIN_PASSWORD)
|
||||
read -r -s ADMIN_PASSWORD_INPUT
|
||||
echo ""
|
||||
ADMIN_PASSWORD="${ADMIN_PASSWORD_INPUT:-$EXISTING_ADMIN_PW}"
|
||||
[[ -n "$ADMIN_PASSWORD" ]] || die "Admin password cannot be empty."
|
||||
|
||||
prompt "Enter AUTH_SECRET (press Enter to reuse existing: ${EXISTING_AUTH_SECRET:0:8}...):"
|
||||
read -r -s AUTH_SECRET_INPUT
|
||||
echo ""
|
||||
AUTH_SECRET="${AUTH_SECRET_INPUT:-$EXISTING_AUTH_SECRET}"
|
||||
[[ -n "$AUTH_SECRET" ]] || die "Auth secret cannot be empty."
|
||||
|
||||
echo ""
|
||||
ok "All credentials collected."
|
||||
|
||||
# =============================================================================
|
||||
h1 "PHASE 2 — BACKUP"
|
||||
# =============================================================================
|
||||
|
||||
info "Creating backup at $BACKUP_DIR ..."
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
|
||||
# Back up SQLite DB from inside the container
|
||||
info "Copying SQLite DB from container..."
|
||||
docker cp "$BACKEND":/data/data_entry_app.db "$BACKUP_DIR/data_entry_app.db"
|
||||
SQLITE_SIZE=$(du -sh "$BACKUP_DIR/data_entry_app.db" | cut -f1)
|
||||
ok "SQLite DB backed up ($SQLITE_SIZE) → $BACKUP_DIR/data_entry_app.db"
|
||||
|
||||
# Back up env and compose files
|
||||
[[ -f .env ]] && cp .env "$BACKUP_DIR/.env.original" && ok ".env backed up"
|
||||
[[ -f .env.alpha ]] && cp .env.alpha "$BACKUP_DIR/.env.alpha.original"
|
||||
cp "$OLD_COMPOSE" "$BACKUP_DIR/$OLD_COMPOSE.original" && ok "$OLD_COMPOSE backed up"
|
||||
|
||||
# Record current container state
|
||||
docker ps -a > "$BACKUP_DIR/containers_before.txt"
|
||||
docker volume ls > "$BACKUP_DIR/volumes_before.txt"
|
||||
ok "Container/volume state recorded"
|
||||
|
||||
# SQLite row counts for comparison later
|
||||
info "Recording SQLite row counts..."
|
||||
docker exec "$BACKEND" python -c "
|
||||
import sqlite3
|
||||
conn = sqlite3.connect('/data/data_entry_app.db')
|
||||
tables = conn.execute(\"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name\").fetchall()
|
||||
print('SQLite row counts:')
|
||||
for (t,) in tables:
|
||||
try:
|
||||
count = conn.execute(f'SELECT COUNT(*) FROM {t}').fetchone()[0]
|
||||
print(f' {t}: {count}')
|
||||
except Exception as e:
|
||||
print(f' {t}: ERROR ({e})')
|
||||
conn.close()
|
||||
" 2>/dev/null | tee "$BACKUP_DIR/sqlite_row_counts.txt"
|
||||
|
||||
echo ""
|
||||
ok "Backup complete at $BACKUP_DIR"
|
||||
|
||||
# =============================================================================
|
||||
h1 "PHASE 3 — WRITE PRODUCTION CONFIG"
|
||||
# =============================================================================
|
||||
|
||||
DATABASE_URL="postgresql+psycopg://lean101:${POSTGRES_PASSWORD}@db:5432/lean101"
|
||||
|
||||
# Write .env.production
|
||||
info "Writing .env.production..."
|
||||
cat > "$WORK_DIR/.env.production" <<ENVEOF
|
||||
APP_NAME=${APP_NAME}
|
||||
DATABASE_URL=${DATABASE_URL}
|
||||
CLIENT_NAME=${CLIENT_NAME}
|
||||
CLIENT_EMAIL=${CLIENT_EMAIL}
|
||||
CLIENT_PASSWORD=${CLIENT_PASSWORD}
|
||||
CLIENT_TENANT_ID=${CLIENT_TENANT_ID}
|
||||
ADMIN_NAME=${ADMIN_NAME}
|
||||
ADMIN_EMAIL=${ADMIN_EMAIL}
|
||||
ADMIN_PASSWORD=${ADMIN_PASSWORD}
|
||||
AUTH_SECRET=${AUTH_SECRET}
|
||||
CORS_ALLOW_ORIGINS=${CORS_ALLOW_ORIGINS}
|
||||
ORIGIN=${ORIGIN}
|
||||
PUBLIC_API_BASE_URL=${PUBLIC_API_BASE_URL}
|
||||
PUBLIC_MIX_CALCULATOR_SESSION_HISTORY=${PUBLIC_MIX_CALC_HISTORY}
|
||||
PUBLIC_MIX_CALCULATOR_SESSION_SAVE=${PUBLIC_MIX_CALC_SAVE}
|
||||
CLIENTS_APP_PORT=${CLIENTS_APP_PORT}
|
||||
POSTGRES_USER=lean101
|
||||
POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
|
||||
POSTGRES_DB=lean101
|
||||
ENVEOF
|
||||
chmod 600 "$WORK_DIR/.env.production"
|
||||
ok ".env.production written (600 permissions)"
|
||||
|
||||
# Write docker-compose.production.yml if not present
|
||||
if [[ ! -f "$WORK_DIR/$NEW_COMPOSE" ]]; then
|
||||
info "Writing $NEW_COMPOSE..."
|
||||
cat > "$WORK_DIR/$NEW_COMPOSE" <<'COMPOSEEOF'
|
||||
services:
|
||||
db:
|
||||
container_name: lean101-clients-db
|
||||
image: postgres:16-alpine
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_USER: ${POSTGRES_USER:-lean101}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:?POSTGRES_PASSWORD is required}
|
||||
POSTGRES_DB: ${POSTGRES_DB:-lean101}
|
||||
volumes:
|
||||
- clients_db_data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-lean101} -d ${POSTGRES_DB:-lean101}"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 10
|
||||
start_period: 15s
|
||||
|
||||
backend:
|
||||
container_name: lean101-clients-backend
|
||||
build:
|
||||
context: .
|
||||
dockerfile: backend/Dockerfile
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
APP_NAME: ${APP_NAME:-Lean 101 Clients API}
|
||||
DATABASE_URL: ${DATABASE_URL:-postgresql+psycopg://${POSTGRES_USER:-lean101}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_DB:-lean101}}
|
||||
CLIENT_NAME: ${CLIENT_NAME:-Hunter Premium Produce}
|
||||
CLIENT_EMAIL: ${CLIENT_EMAIL:-operator@example.com}
|
||||
CLIENT_PASSWORD: ${CLIENT_PASSWORD:?CLIENT_PASSWORD is required}
|
||||
CLIENT_TENANT_ID: ${CLIENT_TENANT_ID:-hunter-premium-produce}
|
||||
ADMIN_NAME: ${ADMIN_NAME:-Lean 101}
|
||||
ADMIN_EMAIL: ${ADMIN_EMAIL:-admin@lean101.local}
|
||||
ADMIN_PASSWORD: ${ADMIN_PASSWORD:?ADMIN_PASSWORD is required}
|
||||
AUTH_SECRET: ${AUTH_SECRET:?AUTH_SECRET is required}
|
||||
CORS_ALLOW_ORIGINS: ${CORS_ALLOW_ORIGINS:-https://clients.lean-101.com.au}
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://127.0.0.1:8000/health')"]
|
||||
interval: 30s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 25s
|
||||
|
||||
frontend:
|
||||
container_name: lean101-clients-frontend
|
||||
build:
|
||||
context: .
|
||||
dockerfile: frontend/Dockerfile
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
ORIGIN: ${ORIGIN:-https://clients.lean-101.com.au}
|
||||
PORT: 3000
|
||||
HOST: 0.0.0.0
|
||||
PUBLIC_API_BASE_URL: ${PUBLIC_API_BASE_URL:-https://clients.lean-101.com.au}
|
||||
INTERNAL_API_BASE_URL: ${INTERNAL_API_BASE_URL:-http://backend:8000}
|
||||
PUBLIC_API_PORT: ${PUBLIC_API_PORT:-8000}
|
||||
PUBLIC_MIX_CALCULATOR_SESSION_HISTORY: ${PUBLIC_MIX_CALCULATOR_SESSION_HISTORY:-false}
|
||||
PUBLIC_MIX_CALCULATOR_SESSION_SAVE: ${PUBLIC_MIX_CALCULATOR_SESSION_SAVE:-false}
|
||||
depends_on:
|
||||
backend:
|
||||
condition: service_healthy
|
||||
|
||||
nginx:
|
||||
container_name: lean101-clients
|
||||
image: nginx:1.27-alpine
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
frontend:
|
||||
condition: service_started
|
||||
backend:
|
||||
condition: service_healthy
|
||||
ports:
|
||||
- "${CLIENTS_APP_PORT:-8092}:80"
|
||||
volumes:
|
||||
- ./deploy/nginx/clients.lean-101.conf:/etc/nginx/conf.d/default.conf:ro
|
||||
|
||||
volumes:
|
||||
clients_db_data:
|
||||
COMPOSEEOF
|
||||
ok "$NEW_COMPOSE written"
|
||||
else
|
||||
ok "$NEW_COMPOSE already exists — not overwritten"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
h1 "PHASE 4 — START POSTGRES"
|
||||
# =============================================================================
|
||||
|
||||
info "Starting database service from $NEW_COMPOSE..."
|
||||
docker compose --env-file .env.production -f "$NEW_COMPOSE" up -d db
|
||||
|
||||
info "Waiting for Postgres to be healthy (up to 60s)..."
|
||||
for i in $(seq 1 30); do
|
||||
HEALTH=$(docker inspect --format='{{if .State.Health}}{{.State.Health.Status}}{{else}}starting{{end}}' "$PG_DB_SERVICE" 2>/dev/null || echo missing)
|
||||
if [[ "$HEALTH" == "healthy" ]]; then
|
||||
ok "Postgres is healthy"
|
||||
break
|
||||
fi
|
||||
printf " attempt %d/30: %s\n" "$i" "$HEALTH"
|
||||
sleep 2
|
||||
if [[ $i -eq 30 ]]; then
|
||||
docker logs "$PG_DB_SERVICE" --tail=20
|
||||
die "Postgres did not become healthy in time."
|
||||
fi
|
||||
done
|
||||
|
||||
# =============================================================================
|
||||
h1 "PHASE 5 — BOOTSTRAP POSTGRES SCHEMA"
|
||||
# =============================================================================
|
||||
|
||||
info "Running bootstrap_schema on Postgres via backend container..."
|
||||
|
||||
docker exec \
|
||||
-e DATABASE_URL="$DATABASE_URL" \
|
||||
"$BACKEND" \
|
||||
python -c "
|
||||
import os
|
||||
from sqlalchemy import create_engine
|
||||
from app.db.migrations import bootstrap_schema
|
||||
from app.db.session import Base
|
||||
import app.models # registers all models onto Base.metadata
|
||||
|
||||
url = os.environ['DATABASE_URL']
|
||||
print(f' Connecting to: {url.split(\"@\")[1] if \"@\" in url else url}')
|
||||
pg_engine = create_engine(url)
|
||||
result = bootstrap_schema(pg_engine, Base.metadata)
|
||||
print(f' Result: {result.summary()}')
|
||||
"
|
||||
|
||||
ok "Postgres schema bootstrapped"
|
||||
|
||||
# =============================================================================
|
||||
h1 "PHASE 6 — MIGRATE DATA (SQLite → PostgreSQL)"
|
||||
# =============================================================================
|
||||
|
||||
info "Writing Python migration script..."
|
||||
|
||||
cat > "$MIGRATE_SCRIPT" <<'PYEOF'
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Migrate all rows from SQLite (/data/data_entry_app.db) to PostgreSQL.
|
||||
Runs inside the lean101-clients-backend container.
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
from sqlalchemy import create_engine, text, inspect
|
||||
|
||||
SQLITE_URL = "sqlite:////data/data_entry_app.db"
|
||||
PG_URL = os.environ["PG_DATABASE_URL"]
|
||||
|
||||
# FK-safe insertion order based on model relationships
|
||||
TABLE_ORDER = [
|
||||
"roles",
|
||||
"permissions",
|
||||
"role_permissions",
|
||||
"users",
|
||||
"client_accounts",
|
||||
"client_users",
|
||||
"client_feature_access",
|
||||
"client_user_module_permissions",
|
||||
"client_access_audit_events",
|
||||
"raw_materials",
|
||||
"raw_material_price_versions",
|
||||
"mixes",
|
||||
"mix_ingredients",
|
||||
"process_cost_rules",
|
||||
"packaging_cost_rules",
|
||||
"freight_cost_rules",
|
||||
"products",
|
||||
"scenarios",
|
||||
"costing_results",
|
||||
"mix_calculator_sessions",
|
||||
"mix_calculator_session_lines",
|
||||
]
|
||||
|
||||
def migrate():
|
||||
from app.db.session import Base
|
||||
import app.models # registers all models onto Base.metadata
|
||||
|
||||
src = create_engine(SQLITE_URL, connect_args={"check_same_thread": False})
|
||||
dst = create_engine(PG_URL)
|
||||
|
||||
sqlite_tables = set(inspect(src).get_table_names())
|
||||
print(f"\nFound {len(sqlite_tables)} tables in SQLite: {', '.join(sorted(sqlite_tables))}\n")
|
||||
|
||||
totals = {}
|
||||
|
||||
with dst.begin() as dst_conn:
|
||||
# Disable FK checks for bulk insert
|
||||
dst_conn.execute(text("SET session_replication_role = 'replica'"))
|
||||
|
||||
for table_name in TABLE_ORDER:
|
||||
if table_name not in sqlite_tables:
|
||||
print(f" SKIP {table_name:<45} (not in SQLite)")
|
||||
continue
|
||||
|
||||
table = Base.metadata.tables.get(table_name)
|
||||
if table is None:
|
||||
print(f" SKIP {table_name:<45} (not in SQLAlchemy metadata)")
|
||||
continue
|
||||
|
||||
try:
|
||||
with src.connect() as src_conn:
|
||||
rows = src_conn.execute(table.select()).fetchall()
|
||||
except Exception as e:
|
||||
print(f" ERROR {table_name:<45} SQLite read failed: {e}")
|
||||
continue
|
||||
|
||||
if not rows:
|
||||
print(f" SKIP {table_name:<45} (0 rows)")
|
||||
continue
|
||||
|
||||
try:
|
||||
dst_conn.execute(table.insert(), [dict(row._mapping) for row in rows])
|
||||
print(f" OK {table_name:<45} {len(rows):>6} rows")
|
||||
totals[table_name] = len(rows)
|
||||
except Exception as e:
|
||||
print(f" ERROR {table_name:<45} Insert failed: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
# Re-enable FK checks
|
||||
dst_conn.execute(text("SET session_replication_role = 'origin'"))
|
||||
|
||||
# Reset auto-increment sequences
|
||||
print("\n Resetting sequences...")
|
||||
with dst.begin() as conn:
|
||||
for table_name in TABLE_ORDER:
|
||||
try:
|
||||
conn.execute(text(
|
||||
f"SELECT setval("
|
||||
f" pg_get_serial_sequence('{table_name}', 'id'),"
|
||||
f" COALESCE((SELECT MAX(id) FROM {table_name}), 1)"
|
||||
f")"
|
||||
))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
print(f"\n Migration complete. {sum(totals.values())} rows across {len(totals)} tables.")
|
||||
return totals
|
||||
|
||||
if __name__ == "__main__":
|
||||
migrate()
|
||||
PYEOF
|
||||
|
||||
info "Copying migration script into backend container..."
|
||||
docker cp "$MIGRATE_SCRIPT" "$BACKEND:$MIGRATE_SCRIPT"
|
||||
|
||||
info "Running migration..."
|
||||
docker exec \
|
||||
-e DATABASE_URL="$DATABASE_URL" \
|
||||
-e PG_DATABASE_URL="$DATABASE_URL" \
|
||||
"$BACKEND" \
|
||||
python "$MIGRATE_SCRIPT"
|
||||
|
||||
ok "Data migration complete"
|
||||
|
||||
# =============================================================================
|
||||
h1 "PHASE 7 — VERIFY MIGRATION"
|
||||
# =============================================================================
|
||||
|
||||
info "Postgres row counts:"
|
||||
docker exec "$PG_DB_SERVICE" psql -U lean101 -d lean101 -c "
|
||||
SELECT
|
||||
relname AS table_name,
|
||||
n_live_tup AS row_count
|
||||
FROM pg_stat_user_tables
|
||||
ORDER BY n_live_tup DESC;
|
||||
" | sed 's/^/ /'
|
||||
|
||||
echo ""
|
||||
echo " SQLite row counts (from backup):"
|
||||
cat "$BACKUP_DIR/sqlite_row_counts.txt" | sed 's/^/ /'
|
||||
|
||||
echo ""
|
||||
prompt "Review the counts above. Do they match? Type 'yes' to proceed with cutover, anything else to abort:"
|
||||
read -r CONFIRM
|
||||
if [[ "$CONFIRM" != "yes" ]]; then
|
||||
echo ""
|
||||
warn "Cutover aborted by user. Old SQLite stack is still running."
|
||||
warn "Postgres is running but old stack is untouched."
|
||||
echo ""
|
||||
echo " To retry from data migration step:"
|
||||
echo " bash $0"
|
||||
echo ""
|
||||
echo " To tear down the Postgres container:"
|
||||
echo " docker compose -f $NEW_COMPOSE --env-file .env.production down"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
h1 "PHASE 8 — CUTOVER (Stop SQLite stack, Start Postgres stack)"
|
||||
# =============================================================================
|
||||
|
||||
info "Stopping old SQLite stack (backend, frontend, nginx)..."
|
||||
docker stop "$BACKEND" "$FRONTEND" "$NGINX" 2>/dev/null || true
|
||||
docker rm "$BACKEND" "$FRONTEND" "$NGINX" 2>/dev/null || true
|
||||
ok "Old containers stopped and removed"
|
||||
|
||||
info "Starting full production stack from $NEW_COMPOSE..."
|
||||
docker compose --env-file .env.production -f "$NEW_COMPOSE" up -d --build
|
||||
|
||||
info "Waiting for production backend to become healthy (up to 90s)..."
|
||||
for i in $(seq 1 30); do
|
||||
HEALTH=$(docker inspect --format='{{if .State.Health}}{{.State.Health.Status}}{{else}}starting{{end}}' "$BACKEND" 2>/dev/null || echo missing)
|
||||
if [[ "$HEALTH" == "healthy" ]]; then
|
||||
ok "Backend healthy"
|
||||
break
|
||||
fi
|
||||
printf " attempt %d/30: %s\n" "$i" "$HEALTH"
|
||||
sleep 3
|
||||
if [[ $i -eq 30 ]]; then
|
||||
docker logs "$BACKEND" --tail=30
|
||||
die "Backend did not become healthy. Check logs above. Run rollback if needed."
|
||||
fi
|
||||
done
|
||||
|
||||
# =============================================================================
|
||||
h1 "PHASE 9 — FINAL VERIFICATION"
|
||||
# =============================================================================
|
||||
|
||||
info "Stack status:"
|
||||
docker compose --env-file .env.production -f "$NEW_COMPOSE" ps | sed 's/^/ /'
|
||||
|
||||
echo ""
|
||||
info "Backend health endpoint:"
|
||||
docker exec "$BACKEND" python -c \
|
||||
"import urllib.request; r=urllib.request.urlopen('http://127.0.0.1:8000/health'); print(' ', r.read().decode())"
|
||||
|
||||
echo ""
|
||||
info "DATABASE_URL in production backend:"
|
||||
docker exec "$BACKEND" printenv DATABASE_URL | \
|
||||
sed 's|://[^:]*:[^@]*@|://***:***@|g' | sed 's/^/ /'
|
||||
|
||||
echo ""
|
||||
info "Final Postgres table row counts:"
|
||||
docker exec "$PG_DB_SERVICE" psql -U lean101 -d lean101 -c "
|
||||
SELECT relname AS table_name, n_live_tup AS rows
|
||||
FROM pg_stat_user_tables
|
||||
ORDER BY n_live_tup DESC;
|
||||
" | sed 's/^/ /'
|
||||
|
||||
# Clean up migration script from container
|
||||
docker exec "$BACKEND" rm -f "$MIGRATE_SCRIPT" 2>/dev/null || true
|
||||
rm -f "$MIGRATE_SCRIPT"
|
||||
|
||||
# Remove the error trap since we succeeded
|
||||
trap - ERR
|
||||
|
||||
sep
|
||||
echo -e "${GREEN}${BOLD} Migration complete!${RESET}"
|
||||
echo ""
|
||||
echo -e " ${BOLD}Production is now running on PostgreSQL.${RESET}"
|
||||
echo ""
|
||||
echo " Backup preserved at: $BACKUP_DIR"
|
||||
echo " SQLite volume (lean101-clients_clients_app_data) was NOT deleted."
|
||||
echo " Once you're confident in production, you can remove it with:"
|
||||
echo " docker volume rm lean101-clients_clients_app_data"
|
||||
echo ""
|
||||
echo " Next steps:"
|
||||
echo " 1. Verify the app at https://clients.lean-101.com.au"
|
||||
echo " 2. Bootstrap git repo: ./deploy/Deploy.ps1 -RemoteHost <ip> -Bootstrap -RepoUrl <url>"
|
||||
echo " 3. Future deploys use: ./deploy/Deploy.ps1 -RemoteHost <ip>"
|
||||
sep
|
||||
echo ""
|
||||
@@ -0,0 +1,306 @@
|
||||
#!/usr/bin/env bash
|
||||
# =============================================================================
|
||||
# predeployment-check.sh
|
||||
#
|
||||
# Run on the production server to capture full environment state before making
|
||||
# changes. Safe — read-only, no writes, no restarts.
|
||||
#
|
||||
# Usage (from local machine):
|
||||
# ssh root@<host> 'bash -s' < deploy/predeployment-check.sh
|
||||
#
|
||||
# Usage (on the server directly):
|
||||
# bash /srv/lean101-clients/deploy/predeployment-check.sh
|
||||
# =============================================================================
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
REMOTE_PATH="${REMOTE_PATH:-/srv/lean101-clients}"
|
||||
COMPOSE_FILE="${COMPOSE_FILE:-docker-compose.production.yml}"
|
||||
ENV_FILE="${ENV_FILE:-.env.production}"
|
||||
BACKEND_CONTAINER="lean101-clients-backend"
|
||||
FRONTEND_CONTAINER="lean101-clients-frontend"
|
||||
NGINX_CONTAINER="lean101-clients"
|
||||
DB_CONTAINER="lean101-clients-db"
|
||||
|
||||
# Colours
|
||||
RED='\033[0;31m'; YELLOW='\033[1;33m'; GREEN='\033[0;32m'
|
||||
CYAN='\033[0;36m'; BOLD='\033[1m'; RESET='\033[0m'
|
||||
|
||||
sep() { echo -e "${CYAN}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${RESET}"; }
|
||||
h1() { sep; echo -e "${BOLD}${CYAN} $1${RESET}"; sep; }
|
||||
ok() { echo -e " ${GREEN}✔${RESET} $1"; }
|
||||
warn() { echo -e " ${YELLOW}⚠${RESET} $1"; }
|
||||
fail() { echo -e " ${RED}✘${RESET} $1"; }
|
||||
kv() { printf " %-30s %s\n" "$1" "$2"; }
|
||||
|
||||
# Helper: run a command inside a container, return empty string on failure
|
||||
cexec() {
|
||||
local container="$1"; shift
|
||||
docker exec "$container" "$@" 2>/dev/null || true
|
||||
}
|
||||
|
||||
echo ""
|
||||
echo -e "${BOLD} LEAN 101 CLIENTS — Pre-Deployment Check${RESET}"
|
||||
echo -e " Generated: $(date -u '+%Y-%m-%d %H:%M:%S UTC')"
|
||||
echo -e " Host: $(hostname -f 2>/dev/null || hostname)"
|
||||
echo ""
|
||||
|
||||
# =============================================================================
|
||||
h1 "1. HOST SYSTEM"
|
||||
# =============================================================================
|
||||
kv "OS:" "$(. /etc/os-release 2>/dev/null && echo "$PRETTY_NAME" || uname -s)"
|
||||
kv "Kernel:" "$(uname -r)"
|
||||
kv "Uptime:" "$(uptime -p 2>/dev/null || uptime)"
|
||||
|
||||
echo ""
|
||||
echo " Disk usage (df -h):"
|
||||
df -h --output=source,size,used,avail,pcent,target 2>/dev/null | grep -v tmpfs | grep -v udev | sed 's/^/ /'
|
||||
|
||||
echo ""
|
||||
echo " Memory (free -h):"
|
||||
free -h 2>/dev/null | sed 's/^/ /' || vm_stat 2>/dev/null | sed 's/^/ /'
|
||||
|
||||
# =============================================================================
|
||||
h1 "2. DOCKER & COMPOSE"
|
||||
# =============================================================================
|
||||
if command -v docker &>/dev/null; then
|
||||
ok "Docker installed"
|
||||
kv "Docker version:" "$(docker --version)"
|
||||
kv "Compose version:" "$(docker compose version 2>/dev/null || docker-compose --version 2>/dev/null || echo 'not found')"
|
||||
else
|
||||
fail "Docker not found on PATH"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
h1 "3. REPOSITORY STATE"
|
||||
# =============================================================================
|
||||
if [ -d "$REMOTE_PATH/.git" ]; then
|
||||
cd "$REMOTE_PATH"
|
||||
ok "Repo found at $REMOTE_PATH"
|
||||
kv "Branch:" "$(git rev-parse --abbrev-ref HEAD 2>/dev/null)"
|
||||
kv "Latest commit:" "$(git log -1 --format='%h %s (%cr)' 2>/dev/null)"
|
||||
kv "Commit author:" "$(git log -1 --format='%an <%ae>' 2>/dev/null)"
|
||||
kv "Remote origin:" "$(git remote get-url origin 2>/dev/null)"
|
||||
|
||||
DIRTY=$(git status --porcelain 2>/dev/null)
|
||||
if [ -n "$DIRTY" ]; then
|
||||
warn "Working tree has uncommitted changes:"
|
||||
git status --short | sed 's/^/ /'
|
||||
else
|
||||
ok "Working tree clean"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo " Recent commits (last 5):"
|
||||
git log -5 --format=' %h %s (%cr)' 2>/dev/null
|
||||
else
|
||||
fail "No git repo found at $REMOTE_PATH"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
h1 "4. ENVIRONMENT FILE"
|
||||
# =============================================================================
|
||||
ENV_PATH="$REMOTE_PATH/$ENV_FILE"
|
||||
if [ -f "$ENV_PATH" ]; then
|
||||
ok "Env file: $ENV_PATH"
|
||||
kv "Modified:" "$(stat -c '%y' "$ENV_PATH" 2>/dev/null | cut -d'.' -f1 || stat -f '%Sm' "$ENV_PATH" 2>/dev/null)"
|
||||
kv "Permissions:" "$(stat -c '%a %U:%G' "$ENV_PATH" 2>/dev/null || stat -f '%Sp %Su:%Sg' "$ENV_PATH" 2>/dev/null)"
|
||||
echo ""
|
||||
echo " Env keys present (values redacted):"
|
||||
grep -v '^#' "$ENV_PATH" | grep '=' | cut -d'=' -f1 | sort | sed 's/^/ /'
|
||||
else
|
||||
fail "Env file NOT found at $ENV_PATH"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
h1 "5. DOCKER STACK — CONTAINER STATUS"
|
||||
# =============================================================================
|
||||
cd "$REMOTE_PATH" 2>/dev/null || true
|
||||
|
||||
echo " All containers on this host:"
|
||||
docker ps -a --format 'table {{.Names}}\t{{.Status}}\t{{.Image}}\t{{.Ports}}' | sed 's/^/ /'
|
||||
|
||||
echo ""
|
||||
echo " Compose stack status ($COMPOSE_FILE):"
|
||||
docker compose --env-file "$ENV_FILE" -f "$COMPOSE_FILE" ps 2>/dev/null | sed 's/^/ /' || \
|
||||
warn "Could not run docker compose ps (compose file or env file missing?)"
|
||||
|
||||
# Per-container inspection
|
||||
for CNAME in "$BACKEND_CONTAINER" "$FRONTEND_CONTAINER" "$NGINX_CONTAINER" "$DB_CONTAINER"; do
|
||||
STATUS=$(docker inspect --format='{{.State.Status}}' "$CNAME" 2>/dev/null || echo "missing")
|
||||
HEALTH=$(docker inspect --format='{{if .State.Health}}{{.State.Health.Status}}{{else}}n/a{{end}}' "$CNAME" 2>/dev/null || echo "missing")
|
||||
IMAGE=$(docker inspect --format='{{.Config.Image}}' "$CNAME" 2>/dev/null || echo "unknown")
|
||||
STARTED=$(docker inspect --format='{{.State.StartedAt}}' "$CNAME" 2>/dev/null | cut -d'.' -f1 || echo "unknown")
|
||||
|
||||
echo ""
|
||||
echo -e " ${BOLD}$CNAME${RESET}"
|
||||
kv " Status:" "$STATUS"
|
||||
kv " Health:" "$HEALTH"
|
||||
kv " Image:" "$IMAGE"
|
||||
kv " Started:" "$STARTED"
|
||||
|
||||
if [ "$STATUS" = "running" ]; then
|
||||
MEM=$(docker stats "$CNAME" --no-stream --format '{{.MemUsage}}' 2>/dev/null || echo "n/a")
|
||||
CPU=$(docker stats "$CNAME" --no-stream --format '{{.CPUPerc}}' 2>/dev/null || echo "n/a")
|
||||
kv " CPU / Mem:" "$CPU / $MEM"
|
||||
fi
|
||||
done
|
||||
|
||||
# =============================================================================
|
||||
h1 "6. DOCKER VOLUMES"
|
||||
# =============================================================================
|
||||
echo " Named volumes:"
|
||||
docker volume ls --format 'table {{.Name}}\t{{.Driver}}\t{{.Mountpoint}}' | grep -i lean | sed 's/^/ /' || echo " (none matching lean)"
|
||||
|
||||
echo ""
|
||||
echo " All volumes:"
|
||||
docker volume ls --format ' {{.Name}}' | head -30
|
||||
|
||||
# Postgres data volume size
|
||||
PG_MOUNT=$(docker volume inspect lean101-clients_clients_db_data --format '{{.Mountpoint}}' 2>/dev/null || \
|
||||
docker volume inspect clients_db_data --format '{{.Mountpoint}}' 2>/dev/null || true)
|
||||
if [ -n "$PG_MOUNT" ]; then
|
||||
PG_SIZE=$(du -sh "$PG_MOUNT" 2>/dev/null | cut -f1 || echo "n/a")
|
||||
kv " Postgres volume size:" "$PG_SIZE ($PG_MOUNT)"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
h1 "7. POSTGRESQL — DATABASE STATE"
|
||||
# =============================================================================
|
||||
DB_STATUS=$(docker inspect --format='{{.State.Status}}' "$DB_CONTAINER" 2>/dev/null || echo "missing")
|
||||
|
||||
if [ "$DB_STATUS" = "running" ]; then
|
||||
ok "Database container is running"
|
||||
|
||||
PG_USER=$(docker exec "$DB_CONTAINER" printenv POSTGRES_USER 2>/dev/null || echo "lean101")
|
||||
PG_DB=$(docker exec "$DB_CONTAINER" printenv POSTGRES_DB 2>/dev/null || echo "lean101")
|
||||
|
||||
echo ""
|
||||
echo " PostgreSQL version:"
|
||||
cexec "$DB_CONTAINER" psql -U "$PG_USER" -d "$PG_DB" -c "SELECT version();" | sed 's/^/ /'
|
||||
|
||||
echo ""
|
||||
echo " Database size:"
|
||||
cexec "$DB_CONTAINER" psql -U "$PG_USER" -d "$PG_DB" \
|
||||
-c "SELECT pg_database.datname, pg_size_pretty(pg_database_size(pg_database.datname)) AS size FROM pg_database ORDER BY pg_database_size(pg_database.datname) DESC;" \
|
||||
| sed 's/^/ /'
|
||||
|
||||
echo ""
|
||||
echo " Tables and row counts:"
|
||||
cexec "$DB_CONTAINER" psql -U "$PG_USER" -d "$PG_DB" -c "
|
||||
SELECT
|
||||
schemaname,
|
||||
relname AS table_name,
|
||||
n_live_tup AS row_count,
|
||||
pg_size_pretty(pg_total_relation_size(quote_ident(schemaname)||'.'||quote_ident(relname))) AS total_size
|
||||
FROM pg_stat_user_tables
|
||||
ORDER BY n_live_tup DESC;
|
||||
" | sed 's/^/ /'
|
||||
|
||||
echo ""
|
||||
echo " Alembic migration state:"
|
||||
cexec "$DB_CONTAINER" psql -U "$PG_USER" -d "$PG_DB" -c \
|
||||
"SELECT version_num, is_current FROM alembic_version LEFT JOIN (SELECT true AS is_current) t ON true;" \
|
||||
2>/dev/null | sed 's/^/ /' || \
|
||||
cexec "$DB_CONTAINER" psql -U "$PG_USER" -d "$PG_DB" -c \
|
||||
"SELECT version_num FROM alembic_version;" 2>/dev/null | sed 's/^/ /' || \
|
||||
warn "alembic_version table not found or inaccessible"
|
||||
|
||||
echo ""
|
||||
echo " Active connections:"
|
||||
cexec "$DB_CONTAINER" psql -U "$PG_USER" -d "$PG_DB" -c "
|
||||
SELECT count(*) AS total_connections,
|
||||
sum(CASE WHEN state = 'active' THEN 1 ELSE 0 END) AS active
|
||||
FROM pg_stat_activity
|
||||
WHERE datname = '$PG_DB';
|
||||
" | sed 's/^/ /'
|
||||
else
|
||||
fail "Database container status: $DB_STATUS — skipping DB checks"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
h1 "8. BACKEND — HEALTH & API"
|
||||
# =============================================================================
|
||||
if [ "$(docker inspect --format='{{.State.Status}}' "$BACKEND_CONTAINER" 2>/dev/null)" = "running" ]; then
|
||||
echo " Health endpoint (internal):"
|
||||
HEALTH_RESP=$(cexec "$BACKEND_CONTAINER" python -c \
|
||||
"import urllib.request, json; r=urllib.request.urlopen('http://127.0.0.1:8000/health'); print(r.read().decode())" 2>/dev/null || echo "failed")
|
||||
echo " $HEALTH_RESP"
|
||||
|
||||
echo ""
|
||||
echo " Python / package versions inside backend container:"
|
||||
cexec "$BACKEND_CONTAINER" python --version 2>&1 | sed 's/^/ /'
|
||||
cexec "$BACKEND_CONTAINER" pip show fastapi sqlalchemy alembic psycopg 2>/dev/null | \
|
||||
grep -E '^(Name|Version):' | sed 's/^/ /'
|
||||
|
||||
echo ""
|
||||
echo " DATABASE_URL in backend (secret masked):"
|
||||
cexec "$BACKEND_CONTAINER" printenv DATABASE_URL | \
|
||||
sed 's|://[^:]*:[^@]*@|://***:***@|g' | sed 's/^/ /'
|
||||
else
|
||||
fail "Backend container not running — skipping API checks"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
h1 "9. NGINX — CONFIGURATION"
|
||||
# =============================================================================
|
||||
if [ "$(docker inspect --format='{{.State.Status}}' "$NGINX_CONTAINER" 2>/dev/null)" = "running" ]; then
|
||||
ok "Nginx container is running"
|
||||
echo ""
|
||||
echo " nginx -t output:"
|
||||
cexec "$NGINX_CONTAINER" nginx -t 2>&1 | sed 's/^/ /'
|
||||
echo ""
|
||||
echo " Active listening ports (nginx container):"
|
||||
docker exec "$NGINX_CONTAINER" sh -c 'cat /etc/nginx/conf.d/default.conf 2>/dev/null | grep -E "listen|server_name|proxy_pass"' | sed 's/^/ /' || true
|
||||
echo ""
|
||||
echo " Host port binding:"
|
||||
docker port "$NGINX_CONTAINER" | sed 's/^/ /'
|
||||
else
|
||||
warn "Nginx container not running"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
h1 "10. RECENT CONTAINER LOGS (last 30 lines each)"
|
||||
# =============================================================================
|
||||
for CNAME in "$BACKEND_CONTAINER" "$FRONTEND_CONTAINER" "$NGINX_CONTAINER" "$DB_CONTAINER"; do
|
||||
STATUS=$(docker inspect --format='{{.State.Status}}' "$CNAME" 2>/dev/null || echo "missing")
|
||||
echo ""
|
||||
echo -e " ${BOLD}$CNAME${RESET} [$STATUS]"
|
||||
if [ "$STATUS" != "missing" ]; then
|
||||
docker logs "$CNAME" --tail=30 2>&1 | sed 's/^/ /'
|
||||
fi
|
||||
done
|
||||
|
||||
# =============================================================================
|
||||
h1 "11. HOST NETWORK & FIREWALL"
|
||||
# =============================================================================
|
||||
echo " Listening ports on host (ss -tlnp):"
|
||||
ss -tlnp 2>/dev/null | sed 's/^/ /' || \
|
||||
netstat -tlnp 2>/dev/null | sed 's/^/ /' || \
|
||||
warn "Neither ss nor netstat available"
|
||||
|
||||
echo ""
|
||||
echo " UFW status:"
|
||||
ufw status 2>/dev/null | sed 's/^/ /' || warn "UFW not available"
|
||||
|
||||
echo ""
|
||||
echo " Docker network list:"
|
||||
docker network ls | sed 's/^/ /'
|
||||
|
||||
# =============================================================================
|
||||
h1 "12. COMPOSE FILE DIFF (local vs what would deploy)"
|
||||
# =============================================================================
|
||||
echo " Compose file on server ($COMPOSE_FILE):"
|
||||
if [ -f "$REMOTE_PATH/$COMPOSE_FILE" ]; then
|
||||
kv " Modified:" "$(stat -c '%y' "$REMOTE_PATH/$COMPOSE_FILE" 2>/dev/null | cut -d'.' -f1)"
|
||||
kv " Size:" "$(wc -l < "$REMOTE_PATH/$COMPOSE_FILE") lines"
|
||||
ok "File exists"
|
||||
else
|
||||
fail "$COMPOSE_FILE not found at $REMOTE_PATH"
|
||||
fi
|
||||
|
||||
# =============================================================================
|
||||
sep
|
||||
echo -e "${BOLD} Check complete — paste this output into Claude for analysis.${RESET}"
|
||||
sep
|
||||
echo ""
|
||||
Reference in New Issue
Block a user