feat: first client onboarding — fix env, add loyalty admin, dev infra-check
- Fix .env: wizamart→orion/wizard.lu, Redis port→6380 - Fix .env.example: orion.lu→wizard.lu domain references - Add create_loyalty_admin() to init_production.py (platform-scoped admin for rewardflow.lu) - Add `make infra-check` target running verify-server.sh - Split verify-server.sh into dev/prod modes (auto-detected from DEBUG flag) - Dev checks: .env config, PostgreSQL, Redis, health endpoint, migrations - Remove stale init.sql volume mount from docker-compose.yml Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
14
.env.example
14
.env.example
@@ -24,7 +24,7 @@ DATABASE_URL=postgresql://orion_user:secure_password@localhost:5432/orion_db
|
|||||||
# =============================================================================
|
# =============================================================================
|
||||||
# These are used by init_production.py to create the platform admin
|
# These are used by init_production.py to create the platform admin
|
||||||
# ⚠️ CHANGE THESE IN PRODUCTION!
|
# ⚠️ CHANGE THESE IN PRODUCTION!
|
||||||
ADMIN_EMAIL=admin@orion.lu
|
ADMIN_EMAIL=admin@wizard.lu
|
||||||
ADMIN_USERNAME=admin
|
ADMIN_USERNAME=admin
|
||||||
ADMIN_PASSWORD=change-me-in-production
|
ADMIN_PASSWORD=change-me-in-production
|
||||||
ADMIN_FIRST_NAME=Platform
|
ADMIN_FIRST_NAME=Platform
|
||||||
@@ -49,9 +49,9 @@ API_PORT=8000
|
|||||||
# Development
|
# Development
|
||||||
DOCUMENTATION_URL=http://localhost:8001
|
DOCUMENTATION_URL=http://localhost:8001
|
||||||
# Staging
|
# Staging
|
||||||
# DOCUMENTATION_URL=https://staging-docs.orion.lu
|
# DOCUMENTATION_URL=https://staging-docs.wizard.lu
|
||||||
# Production
|
# Production
|
||||||
# DOCUMENTATION_URL=https://docs.orion.lu
|
# DOCUMENTATION_URL=https://docs.wizard.lu
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
# RATE LIMITING
|
# RATE LIMITING
|
||||||
@@ -70,7 +70,7 @@ LOG_FILE=logs/app.log
|
|||||||
# PLATFORM DOMAIN CONFIGURATION
|
# PLATFORM DOMAIN CONFIGURATION
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
# Your main platform domain
|
# Your main platform domain
|
||||||
PLATFORM_DOMAIN=orion.lu
|
PLATFORM_DOMAIN=wizard.lu
|
||||||
|
|
||||||
# Custom domain features
|
# Custom domain features
|
||||||
# Enable/disable custom domains
|
# Enable/disable custom domains
|
||||||
@@ -85,7 +85,7 @@ SSL_PROVIDER=letsencrypt
|
|||||||
AUTO_PROVISION_SSL=False
|
AUTO_PROVISION_SSL=False
|
||||||
|
|
||||||
# DNS verification
|
# DNS verification
|
||||||
DNS_VERIFICATION_PREFIX=_orion-verify
|
DNS_VERIFICATION_PREFIX=_wizard-verify
|
||||||
DNS_VERIFICATION_TTL=3600
|
DNS_VERIFICATION_TTL=3600
|
||||||
|
|
||||||
# =============================================================================
|
# =============================================================================
|
||||||
@@ -103,8 +103,8 @@ STRIPE_TRIAL_DAYS=30
|
|||||||
# =============================================================================
|
# =============================================================================
|
||||||
# Provider: smtp, sendgrid, mailgun, ses
|
# Provider: smtp, sendgrid, mailgun, ses
|
||||||
EMAIL_PROVIDER=smtp
|
EMAIL_PROVIDER=smtp
|
||||||
EMAIL_FROM_ADDRESS=noreply@orion.lu
|
EMAIL_FROM_ADDRESS=noreply@wizard.lu
|
||||||
EMAIL_FROM_NAME=Orion
|
EMAIL_FROM_NAME=Wizard
|
||||||
EMAIL_REPLY_TO=
|
EMAIL_REPLY_TO=
|
||||||
|
|
||||||
# SMTP Settings (used when EMAIL_PROVIDER=smtp)
|
# SMTP Settings (used when EMAIL_PROVIDER=smtp)
|
||||||
|
|||||||
7
Makefile
7
Makefile
@@ -1,7 +1,7 @@
|
|||||||
# Orion Multi-Tenant E-Commerce Platform Makefile
|
# Orion Multi-Tenant E-Commerce Platform Makefile
|
||||||
# Cross-platform compatible (Windows & Linux)
|
# Cross-platform compatible (Windows & Linux)
|
||||||
|
|
||||||
.PHONY: install install-dev install-docs install-all dev test test-coverage lint format check docker-build docker-up docker-down clean help tailwind-install tailwind-dev tailwind-build tailwind-watch arch-check arch-check-file arch-check-object test-db-up test-db-down test-db-reset test-db-status celery-worker celery-beat celery-dev flower celery-status celery-purge urls
|
.PHONY: install install-dev install-docs install-all dev test test-coverage lint format check docker-build docker-up docker-down clean help tailwind-install tailwind-dev tailwind-build tailwind-watch arch-check arch-check-file arch-check-object test-db-up test-db-down test-db-reset test-db-status celery-worker celery-beat celery-dev flower celery-status celery-purge urls infra-check
|
||||||
|
|
||||||
# Detect OS
|
# Detect OS
|
||||||
ifeq ($(OS),Windows_NT)
|
ifeq ($(OS),Windows_NT)
|
||||||
@@ -497,6 +497,10 @@ urls-prod:
|
|||||||
urls-check:
|
urls-check:
|
||||||
@$(PYTHON) scripts/show_urls.py --check
|
@$(PYTHON) scripts/show_urls.py --check
|
||||||
|
|
||||||
|
infra-check:
|
||||||
|
@echo "Running infrastructure verification..."
|
||||||
|
bash scripts/verify-server.sh
|
||||||
|
|
||||||
check-env:
|
check-env:
|
||||||
@echo "Checking Python environment..."
|
@echo "Checking Python environment..."
|
||||||
@echo "Detected OS: $(DETECTED_OS)"
|
@echo "Detected OS: $(DETECTED_OS)"
|
||||||
@@ -602,6 +606,7 @@ help:
|
|||||||
@echo " urls-dev - Show development URLs only"
|
@echo " urls-dev - Show development URLs only"
|
||||||
@echo " urls-prod - Show production URLs only"
|
@echo " urls-prod - Show production URLs only"
|
||||||
@echo " urls-check - Check dev URLs with curl (server must be running)"
|
@echo " urls-check - Check dev URLs with curl (server must be running)"
|
||||||
|
@echo " infra-check - Run infrastructure verification (verify-server.sh)"
|
||||||
@echo " clean - Clean build artifacts"
|
@echo " clean - Clean build artifacts"
|
||||||
@echo " check-env - Check Python environment and OS"
|
@echo " check-env - Check Python environment and OS"
|
||||||
@echo ""
|
@echo ""
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ services:
|
|||||||
POSTGRES_PASSWORD: secure_password
|
POSTGRES_PASSWORD: secure_password
|
||||||
volumes:
|
volumes:
|
||||||
- postgres_data:/var/lib/postgresql/data
|
- postgres_data:/var/lib/postgresql/data
|
||||||
- ./init.sql:/docker-entrypoint-initdb.d/init.sql
|
|
||||||
ports:
|
ports:
|
||||||
- "5432:5432"
|
- "5432:5432"
|
||||||
mem_limit: 512m
|
mem_limit: 512m
|
||||||
|
|||||||
@@ -132,6 +132,44 @@ def create_admin_user(db: Session, auth_manager: AuthManager) -> User:
|
|||||||
return admin
|
return admin
|
||||||
|
|
||||||
|
|
||||||
|
def create_loyalty_admin(db: Session, auth_manager: AuthManager, loyalty_platform: Platform) -> User | None:
|
||||||
|
"""Create a platform admin for the Loyalty+ platform."""
|
||||||
|
from app.modules.tenancy.models.admin_platform import AdminPlatform
|
||||||
|
|
||||||
|
email = "admin@rewardflow.lu"
|
||||||
|
existing = db.execute(select(User).where(User.email == email)).scalar_one_or_none()
|
||||||
|
if existing:
|
||||||
|
print_warning(f"Loyalty admin already exists: {email}")
|
||||||
|
return existing
|
||||||
|
|
||||||
|
password = "admin123" # Dev default, change in production
|
||||||
|
admin = User(
|
||||||
|
username="loyalty_admin",
|
||||||
|
email=email,
|
||||||
|
hashed_password=auth_manager.hash_password(password),
|
||||||
|
role="admin",
|
||||||
|
is_super_admin=False,
|
||||||
|
first_name="Loyalty",
|
||||||
|
last_name="Administrator",
|
||||||
|
is_active=True,
|
||||||
|
is_email_verified=True,
|
||||||
|
)
|
||||||
|
db.add(admin)
|
||||||
|
db.flush()
|
||||||
|
|
||||||
|
# Assign to loyalty platform
|
||||||
|
assignment = AdminPlatform(
|
||||||
|
user_id=admin.id,
|
||||||
|
platform_id=loyalty_platform.id,
|
||||||
|
is_active=True,
|
||||||
|
)
|
||||||
|
db.add(assignment)
|
||||||
|
db.flush()
|
||||||
|
|
||||||
|
print_success(f"Created loyalty admin: {email} (password: {password})")
|
||||||
|
return admin
|
||||||
|
|
||||||
|
|
||||||
def create_default_platforms(db: Session) -> list[Platform]:
|
def create_default_platforms(db: Session) -> list[Platform]:
|
||||||
"""Create all default platforms (OMS, Main, Loyalty+)."""
|
"""Create all default platforms (OMS, Main, Loyalty+)."""
|
||||||
|
|
||||||
@@ -559,6 +597,14 @@ def initialize_production(db: Session, auth_manager: AuthManager):
|
|||||||
print_step(3, "Creating default platforms...")
|
print_step(3, "Creating default platforms...")
|
||||||
platforms = create_default_platforms(db)
|
platforms = create_default_platforms(db)
|
||||||
|
|
||||||
|
# Step 3b: Create loyalty platform admin
|
||||||
|
print_step("3b", "Creating loyalty platform admin...")
|
||||||
|
loyalty_platform = next((p for p in platforms if p.code == "loyalty"), None)
|
||||||
|
if loyalty_platform:
|
||||||
|
create_loyalty_admin(db, auth_manager, loyalty_platform)
|
||||||
|
else:
|
||||||
|
print_warning("Loyalty platform not found, skipping loyalty admin creation")
|
||||||
|
|
||||||
# Step 4: Set up default role templates
|
# Step 4: Set up default role templates
|
||||||
print_step(4, "Setting up role templates...")
|
print_step(4, "Setting up role templates...")
|
||||||
create_default_role_templates(db)
|
create_default_role_templates(db)
|
||||||
@@ -606,9 +652,15 @@ def print_summary(db: Session):
|
|||||||
print("\n" + "─" * 70)
|
print("\n" + "─" * 70)
|
||||||
print("🔐 ADMIN CREDENTIALS")
|
print("🔐 ADMIN CREDENTIALS")
|
||||||
print("─" * 70)
|
print("─" * 70)
|
||||||
print(" URL: /admin/login")
|
print(" Super Admin (all platforms):")
|
||||||
print(f" Username: {settings.admin_username}")
|
print(" URL: /admin/login")
|
||||||
print(f" Password: {settings.admin_password}") # noqa: SEC021
|
print(f" Username: {settings.admin_username}")
|
||||||
|
print(f" Password: {settings.admin_password}") # noqa: SEC021
|
||||||
|
print()
|
||||||
|
print(" Loyalty Platform Admin (loyalty only):")
|
||||||
|
print(" URL: /admin/login")
|
||||||
|
print(" Username: loyalty_admin")
|
||||||
|
print(" Password: admin123")
|
||||||
print("─" * 70)
|
print("─" * 70)
|
||||||
|
|
||||||
# Show security warnings if in production
|
# Show security warnings if in production
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
# verify-server.sh — Check all Orion infrastructure is properly deployed
|
# verify-server.sh — Check Orion infrastructure health
|
||||||
# Run on the production server: bash scripts/verify-server.sh
|
# Automatically detects dev vs production from .env DEBUG flag.
|
||||||
|
# Override with: bash scripts/verify-server.sh --dev | --prod
|
||||||
set -uo pipefail
|
set -uo pipefail
|
||||||
|
|
||||||
PASS=0
|
PASS=0
|
||||||
@@ -14,238 +15,403 @@ warn() { echo " [WARN] $1"; WARN=$((WARN + 1)); }
|
|||||||
section() { echo ""; echo "=== $1 ==="; }
|
section() { echo ""; echo "=== $1 ==="; }
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
section "1. fail2ban"
|
# Detect environment
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
|
|
||||||
if systemctl is-active --quiet fail2ban; then
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
pass "fail2ban service running"
|
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
|
||||||
else
|
ENV_FILE="$PROJECT_DIR/.env"
|
||||||
fail "fail2ban service not running"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if sudo fail2ban-client status sshd &>/dev/null; then
|
MODE=""
|
||||||
pass "SSH jail active"
|
if [ "${1:-}" = "--dev" ]; then
|
||||||
else
|
MODE="dev"
|
||||||
fail "SSH jail not active"
|
elif [ "${1:-}" = "--prod" ]; then
|
||||||
fi
|
MODE="prod"
|
||||||
|
elif [ -f "$ENV_FILE" ]; then
|
||||||
if sudo fail2ban-client status caddy-auth &>/dev/null; then
|
if grep -qE '^DEBUG=True' "$ENV_FILE" 2>/dev/null; then
|
||||||
pass "Caddy auth jail active"
|
MODE="dev"
|
||||||
else
|
|
||||||
fail "Caddy auth jail not active — deploy /etc/fail2ban/jail.d/caddy.conf"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
section "2. Unattended Upgrades"
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if dpkg -l unattended-upgrades &>/dev/null; then
|
|
||||||
pass "unattended-upgrades package installed"
|
|
||||||
else
|
|
||||||
fail "unattended-upgrades not installed"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -f /etc/apt/apt.conf.d/20auto-upgrades ]; then
|
|
||||||
if grep -q 'Unattended-Upgrade "1"' /etc/apt/apt.conf.d/20auto-upgrades; then
|
|
||||||
pass "Automatic upgrades enabled"
|
|
||||||
else
|
else
|
||||||
fail "Automatic upgrades not enabled in 20auto-upgrades"
|
MODE="prod"
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
fail "/etc/apt/apt.conf.d/20auto-upgrades missing"
|
# No .env found — assume production (server deployment)
|
||||||
|
MODE="prod"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
echo "==========================================="
|
||||||
section "3. Docker Containers"
|
echo " Orion Infrastructure Check (${MODE})"
|
||||||
# ---------------------------------------------------------------------------
|
echo "==========================================="
|
||||||
|
|
||||||
ORION_DIR="${ORION_DIR:-$HOME/apps/orion}"
|
# Helper: read a value from .env
|
||||||
|
env_val() {
|
||||||
|
grep -E "^${1}=" "$ENV_FILE" 2>/dev/null | head -1 | cut -d= -f2-
|
||||||
|
}
|
||||||
|
|
||||||
EXPECTED_CONTAINERS="db redis api celery-worker celery-beat flower prometheus grafana node-exporter cadvisor alertmanager"
|
# ===========================================================================
|
||||||
for name in $EXPECTED_CONTAINERS; do
|
# DEVELOPMENT CHECKS
|
||||||
container=$(docker compose --profile full -f "$ORION_DIR/docker-compose.yml" ps --format '{{.Name}}' 2>/dev/null | grep "$name" || true)
|
# ===========================================================================
|
||||||
if [ -n "$container" ]; then
|
|
||||||
state=$(docker inspect --format='{{.State.Status}}' "$container" 2>/dev/null || echo "unknown")
|
if [ "$MODE" = "dev" ]; then
|
||||||
if [ "$state" = "running" ]; then
|
|
||||||
pass "Container $name: running"
|
# -----------------------------------------------------------------------
|
||||||
|
section "1. .env Configuration"
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
if [ -f "$ENV_FILE" ]; then
|
||||||
|
pass ".env file exists"
|
||||||
|
else
|
||||||
|
fail ".env file not found — copy from .env.example"
|
||||||
|
fi
|
||||||
|
|
||||||
|
REQUIRED_KEYS="DATABASE_URL REDIS_URL JWT_SECRET_KEY ADMIN_EMAIL PLATFORM_DOMAIN"
|
||||||
|
for key in $REQUIRED_KEYS; do
|
||||||
|
val=$(env_val "$key")
|
||||||
|
if [ -n "$val" ]; then
|
||||||
|
pass "$key is set"
|
||||||
else
|
else
|
||||||
fail "Container $name: $state (expected running)"
|
fail "$key is missing or empty"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Check for stale wizamart references
|
||||||
|
if grep -qiE 'wizamart' "$ENV_FILE" 2>/dev/null; then
|
||||||
|
fail "Stale 'wizamart' references found in .env"
|
||||||
|
else
|
||||||
|
pass "No stale wizamart references"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
section "2. PostgreSQL"
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
DB_URL=$(env_val "DATABASE_URL")
|
||||||
|
if [ -n "$DB_URL" ]; then
|
||||||
|
# Extract host and port from DATABASE_URL
|
||||||
|
DB_HOST=$(echo "$DB_URL" | sed -E 's|.*@([^:/]+).*|\1|')
|
||||||
|
DB_PORT=$(echo "$DB_URL" | sed -E 's|.*:([0-9]+)/.*|\1|')
|
||||||
|
DB_PORT="${DB_PORT:-5432}"
|
||||||
|
|
||||||
|
if command -v pg_isready &>/dev/null; then
|
||||||
|
if pg_isready -h "$DB_HOST" -p "$DB_PORT" &>/dev/null; then
|
||||||
|
pass "PostgreSQL reachable at $DB_HOST:$DB_PORT"
|
||||||
|
else
|
||||||
|
fail "PostgreSQL not reachable at $DB_HOST:$DB_PORT — start with: docker compose up -d db"
|
||||||
|
fi
|
||||||
|
elif (echo > /dev/tcp/"$DB_HOST"/"$DB_PORT") &>/dev/null; then
|
||||||
|
pass "PostgreSQL port open at $DB_HOST:$DB_PORT"
|
||||||
|
else
|
||||||
|
fail "PostgreSQL not reachable at $DB_HOST:$DB_PORT — start with: docker compose up -d db"
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
fail "Container $name: not found"
|
fail "DATABASE_URL not set"
|
||||||
fi
|
fi
|
||||||
done
|
|
||||||
|
|
||||||
# Check for healthy status on containers with healthchecks
|
# -----------------------------------------------------------------------
|
||||||
for name in db redis api celery-worker; do
|
section "3. Redis"
|
||||||
container=$(docker compose --profile full -f "$ORION_DIR/docker-compose.yml" ps --format '{{.Name}}' 2>/dev/null | grep "$name" || true)
|
# -----------------------------------------------------------------------
|
||||||
if [ -n "$container" ]; then
|
|
||||||
health=$(docker inspect --format='{{.State.Health.Status}}' "$container" 2>/dev/null || echo "none")
|
REDIS_URL=$(env_val "REDIS_URL")
|
||||||
if [ "$health" = "healthy" ]; then
|
if [ -n "$REDIS_URL" ]; then
|
||||||
pass "Container $name: healthy"
|
# Extract host and port from redis://host:port/db
|
||||||
elif [ "$health" = "none" ]; then
|
REDIS_HOST=$(echo "$REDIS_URL" | sed -E 's|redis://([^:/]+).*|\1|')
|
||||||
warn "Container $name: no healthcheck configured"
|
REDIS_PORT=$(echo "$REDIS_URL" | sed -E 's|redis://[^:]+:([0-9]+).*|\1|')
|
||||||
|
REDIS_PORT="${REDIS_PORT:-6379}"
|
||||||
|
|
||||||
|
if redis-cli -h "$REDIS_HOST" -p "$REDIS_PORT" ping &>/dev/null; then
|
||||||
|
pass "Redis reachable at $REDIS_HOST:$REDIS_PORT"
|
||||||
else
|
else
|
||||||
fail "Container $name: $health (expected healthy)"
|
fail "Redis not reachable at $REDIS_HOST:$REDIS_PORT — start with: docker compose up -d redis"
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
section "4. Caddy"
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if systemctl is-active --quiet caddy; then
|
|
||||||
pass "Caddy service running"
|
|
||||||
else
|
|
||||||
fail "Caddy service not running"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -f /etc/caddy/Caddyfile ]; then
|
|
||||||
pass "Caddyfile exists"
|
|
||||||
else
|
|
||||||
fail "Caddyfile not found"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
section "5. Backup Timer"
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if systemctl is-active --quiet orion-backup.timer; then
|
|
||||||
pass "Backup timer active"
|
|
||||||
else
|
|
||||||
fail "Backup timer not active — enable with: sudo systemctl enable --now orion-backup.timer"
|
|
||||||
fi
|
|
||||||
|
|
||||||
LATEST_BACKUP=$(find "$HOME/backups/orion/daily/" -name "*.sql.gz" -mtime -2 2>/dev/null | head -1)
|
|
||||||
if [ -n "$LATEST_BACKUP" ]; then
|
|
||||||
pass "Recent backup found: $(basename "$LATEST_BACKUP")"
|
|
||||||
else
|
|
||||||
warn "No backup found from the last 2 days"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
section "6. Gitea Runner"
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if systemctl is-active --quiet gitea-runner; then
|
|
||||||
pass "Gitea runner service running"
|
|
||||||
else
|
|
||||||
fail "Gitea runner service not running"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
section "7. SSL Certificates"
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
DOMAINS="wizard.lu api.wizard.lu git.wizard.lu omsflow.lu rewardflow.lu"
|
|
||||||
for domain in $DOMAINS; do
|
|
||||||
expiry=$(echo | openssl s_client -servername "$domain" -connect "$domain":443 2>/dev/null | openssl x509 -noout -enddate 2>/dev/null | cut -d= -f2)
|
|
||||||
if [ -n "$expiry" ]; then
|
|
||||||
expiry_epoch=$(date -d "$expiry" +%s 2>/dev/null || echo 0)
|
|
||||||
now_epoch=$(date +%s)
|
|
||||||
days_left=$(( (expiry_epoch - now_epoch) / 86400 ))
|
|
||||||
if [ "$days_left" -gt 14 ]; then
|
|
||||||
pass "SSL $domain: valid ($days_left days remaining)"
|
|
||||||
elif [ "$days_left" -gt 0 ]; then
|
|
||||||
warn "SSL $domain: expiring soon ($days_left days remaining)"
|
|
||||||
else
|
|
||||||
fail "SSL $domain: expired"
|
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
fail "SSL $domain: could not check certificate"
|
fail "REDIS_URL not set"
|
||||||
fi
|
fi
|
||||||
done
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
section "8. Flower Password"
|
section "4. Dev Server Health"
|
||||||
# ---------------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
if [ -f "$ORION_DIR/.env" ]; then
|
# Dev server runs on port 9999 (make dev)
|
||||||
FLOWER_PW=$(grep -E '^FLOWER_PASSWORD=' "$ORION_DIR/.env" 2>/dev/null | cut -d= -f2- || echo "")
|
DEV_PORT=9999
|
||||||
if [ -z "$FLOWER_PW" ] || [ "$FLOWER_PW" = "changeme" ]; then
|
HEALTH_URL="http://localhost:$DEV_PORT/health"
|
||||||
fail "Flower password is default or empty — change FLOWER_PASSWORD in .env"
|
READY_URL="http://localhost:$DEV_PORT/health/ready"
|
||||||
|
|
||||||
|
status=$(curl -s -o /dev/null -w '%{http_code}' "$HEALTH_URL" 2>/dev/null || echo "000")
|
||||||
|
if [ "$status" = "200" ]; then
|
||||||
|
pass "/health endpoint: HTTP 200 (port $DEV_PORT)"
|
||||||
|
elif [ "$status" = "000" ]; then
|
||||||
|
warn "Dev server not running on port $DEV_PORT — start with: make dev"
|
||||||
else
|
else
|
||||||
pass "Flower password changed from default"
|
fail "/health endpoint: HTTP $status (port $DEV_PORT)"
|
||||||
fi
|
fi
|
||||||
else
|
|
||||||
warn ".env file not found at $ORION_DIR/.env"
|
if [ "$status" = "200" ]; then
|
||||||
|
ready_response=$(curl -s "$READY_URL" 2>/dev/null || echo "")
|
||||||
|
if echo "$ready_response" | grep -q '"healthy"'; then
|
||||||
|
pass "/health/ready: healthy"
|
||||||
|
else
|
||||||
|
fail "/health/ready: not healthy"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
section "5. Migrations"
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
if command -v python3 &>/dev/null; then
|
||||||
|
alembic_output=$(cd "$PROJECT_DIR" && python3 -m alembic current 2>&1 || echo "ERROR")
|
||||||
|
if echo "$alembic_output" | grep -q "head"; then
|
||||||
|
pass "Alembic migrations at head"
|
||||||
|
elif echo "$alembic_output" | grep -q "ERROR"; then
|
||||||
|
fail "Could not check migration status"
|
||||||
|
else
|
||||||
|
warn "Migrations may not be at head — run: make migrate-up"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
warn "python3 not found, cannot check migrations"
|
||||||
|
fi
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ===========================================================================
|
||||||
section "9. DNS Resolution"
|
# PRODUCTION CHECKS
|
||||||
# ---------------------------------------------------------------------------
|
# ===========================================================================
|
||||||
|
|
||||||
EXPECTED_DOMAINS="wizard.lu api.wizard.lu git.wizard.lu grafana.wizard.lu flower.wizard.lu omsflow.lu rewardflow.lu"
|
if [ "$MODE" = "prod" ]; then
|
||||||
for domain in $EXPECTED_DOMAINS; do
|
|
||||||
resolved=$(dig +short "$domain" A 2>/dev/null | head -1)
|
ORION_DIR="${ORION_DIR:-$HOME/apps/orion}"
|
||||||
if [ -n "$resolved" ]; then
|
|
||||||
pass "DNS $domain: $resolved"
|
# -----------------------------------------------------------------------
|
||||||
|
section "1. fail2ban"
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
if systemctl is-active --quiet fail2ban; then
|
||||||
|
pass "fail2ban service running"
|
||||||
else
|
else
|
||||||
fail "DNS $domain: no A record found"
|
fail "fail2ban service not running"
|
||||||
fi
|
fi
|
||||||
done
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
if sudo fail2ban-client status sshd &>/dev/null; then
|
||||||
section "10. Health Endpoints"
|
pass "SSH jail active"
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
HEALTH_URL="http://localhost:8001/health"
|
|
||||||
READY_URL="http://localhost:8001/health/ready"
|
|
||||||
|
|
||||||
status=$(curl -s -o /dev/null -w '%{http_code}' "$HEALTH_URL" 2>/dev/null || echo "000")
|
|
||||||
if [ "$status" = "200" ]; then
|
|
||||||
pass "/health endpoint: HTTP 200"
|
|
||||||
else
|
|
||||||
fail "/health endpoint: HTTP $status"
|
|
||||||
fi
|
|
||||||
|
|
||||||
ready_response=$(curl -s "$READY_URL" 2>/dev/null || echo "")
|
|
||||||
if echo "$ready_response" | grep -q '"healthy"'; then
|
|
||||||
pass "/health/ready: healthy"
|
|
||||||
# Check individual checks
|
|
||||||
if echo "$ready_response" | grep -q '"database"'; then
|
|
||||||
pass "/health/ready: database check registered"
|
|
||||||
else
|
else
|
||||||
warn "/health/ready: database check not found"
|
fail "SSH jail not active"
|
||||||
fi
|
fi
|
||||||
if echo "$ready_response" | grep -q '"redis"'; then
|
|
||||||
pass "/health/ready: redis check registered"
|
if sudo fail2ban-client status caddy-auth &>/dev/null; then
|
||||||
|
pass "Caddy auth jail active"
|
||||||
else
|
else
|
||||||
warn "/health/ready: redis check not found"
|
fail "Caddy auth jail not active — deploy /etc/fail2ban/jail.d/caddy.conf"
|
||||||
fi
|
fi
|
||||||
else
|
|
||||||
fail "/health/ready: not healthy — $ready_response"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
section "11. Prometheus Targets"
|
section "2. Unattended Upgrades"
|
||||||
# ---------------------------------------------------------------------------
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
targets=$(curl -s http://localhost:9090/api/v1/targets 2>/dev/null || echo "")
|
if dpkg -l unattended-upgrades &>/dev/null; then
|
||||||
if [ -n "$targets" ]; then
|
pass "unattended-upgrades package installed"
|
||||||
up_count=$(echo "$targets" | grep -o '"health":"up"' | wc -l)
|
|
||||||
down_count=$(echo "$targets" | grep -o '"health":"down"' | wc -l)
|
|
||||||
if [ "$down_count" -eq 0 ] && [ "$up_count" -gt 0 ]; then
|
|
||||||
pass "Prometheus: all $up_count targets up"
|
|
||||||
elif [ "$down_count" -gt 0 ]; then
|
|
||||||
fail "Prometheus: $down_count target(s) down ($up_count up)"
|
|
||||||
else
|
else
|
||||||
warn "Prometheus: no targets found"
|
fail "unattended-upgrades not installed"
|
||||||
fi
|
fi
|
||||||
else
|
|
||||||
fail "Prometheus: could not reach API at localhost:9090"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
if [ -f /etc/apt/apt.conf.d/20auto-upgrades ]; then
|
||||||
section "12. Grafana"
|
if grep -q 'Unattended-Upgrade "1"' /etc/apt/apt.conf.d/20auto-upgrades; then
|
||||||
# ---------------------------------------------------------------------------
|
pass "Automatic upgrades enabled"
|
||||||
|
else
|
||||||
|
fail "Automatic upgrades not enabled in 20auto-upgrades"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
fail "/etc/apt/apt.conf.d/20auto-upgrades missing"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
section "3. Docker Containers"
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
EXPECTED_CONTAINERS="db redis api celery-worker celery-beat flower prometheus grafana node-exporter cadvisor alertmanager"
|
||||||
|
for name in $EXPECTED_CONTAINERS; do
|
||||||
|
container=$(docker compose --profile full -f "$ORION_DIR/docker-compose.yml" ps --format '{{.Name}}' 2>/dev/null | grep "$name" || true)
|
||||||
|
if [ -n "$container" ]; then
|
||||||
|
state=$(docker inspect --format='{{.State.Status}}' "$container" 2>/dev/null || echo "unknown")
|
||||||
|
if [ "$state" = "running" ]; then
|
||||||
|
pass "Container $name: running"
|
||||||
|
else
|
||||||
|
fail "Container $name: $state (expected running)"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
fail "Container $name: not found"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Check for healthy status on containers with healthchecks
|
||||||
|
for name in db redis api celery-worker; do
|
||||||
|
container=$(docker compose --profile full -f "$ORION_DIR/docker-compose.yml" ps --format '{{.Name}}' 2>/dev/null | grep "$name" || true)
|
||||||
|
if [ -n "$container" ]; then
|
||||||
|
health=$(docker inspect --format='{{.State.Health.Status}}' "$container" 2>/dev/null || echo "none")
|
||||||
|
if [ "$health" = "healthy" ]; then
|
||||||
|
pass "Container $name: healthy"
|
||||||
|
elif [ "$health" = "none" ]; then
|
||||||
|
warn "Container $name: no healthcheck configured"
|
||||||
|
else
|
||||||
|
fail "Container $name: $health (expected healthy)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
section "4. Caddy"
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
if systemctl is-active --quiet caddy; then
|
||||||
|
pass "Caddy service running"
|
||||||
|
else
|
||||||
|
fail "Caddy service not running"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f /etc/caddy/Caddyfile ]; then
|
||||||
|
pass "Caddyfile exists"
|
||||||
|
else
|
||||||
|
fail "Caddyfile not found"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
section "5. Backup Timer"
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
if systemctl is-active --quiet orion-backup.timer; then
|
||||||
|
pass "Backup timer active"
|
||||||
|
else
|
||||||
|
fail "Backup timer not active — enable with: sudo systemctl enable --now orion-backup.timer"
|
||||||
|
fi
|
||||||
|
|
||||||
|
LATEST_BACKUP=$(find "$HOME/backups/orion/daily/" -name "*.sql.gz" -mtime -2 2>/dev/null | head -1)
|
||||||
|
if [ -n "$LATEST_BACKUP" ]; then
|
||||||
|
pass "Recent backup found: $(basename "$LATEST_BACKUP")"
|
||||||
|
else
|
||||||
|
warn "No backup found from the last 2 days"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
section "6. Gitea Runner"
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
if systemctl is-active --quiet gitea-runner; then
|
||||||
|
pass "Gitea runner service running"
|
||||||
|
else
|
||||||
|
fail "Gitea runner service not running"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
section "7. SSL Certificates"
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
DOMAINS="wizard.lu api.wizard.lu git.wizard.lu omsflow.lu rewardflow.lu"
|
||||||
|
for domain in $DOMAINS; do
|
||||||
|
expiry=$(echo | openssl s_client -servername "$domain" -connect "$domain":443 2>/dev/null | openssl x509 -noout -enddate 2>/dev/null | cut -d= -f2)
|
||||||
|
if [ -n "$expiry" ]; then
|
||||||
|
expiry_epoch=$(date -d "$expiry" +%s 2>/dev/null || echo 0)
|
||||||
|
now_epoch=$(date +%s)
|
||||||
|
days_left=$(( (expiry_epoch - now_epoch) / 86400 ))
|
||||||
|
if [ "$days_left" -gt 14 ]; then
|
||||||
|
pass "SSL $domain: valid ($days_left days remaining)"
|
||||||
|
elif [ "$days_left" -gt 0 ]; then
|
||||||
|
warn "SSL $domain: expiring soon ($days_left days remaining)"
|
||||||
|
else
|
||||||
|
fail "SSL $domain: expired"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
fail "SSL $domain: could not check certificate"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
section "8. Flower Password"
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
if [ -f "$ORION_DIR/.env" ]; then
|
||||||
|
FLOWER_PW=$(grep -E '^FLOWER_PASSWORD=' "$ORION_DIR/.env" 2>/dev/null | cut -d= -f2- || echo "")
|
||||||
|
if [ -z "$FLOWER_PW" ] || [ "$FLOWER_PW" = "changeme" ]; then
|
||||||
|
fail "Flower password is default or empty — change FLOWER_PASSWORD in .env"
|
||||||
|
else
|
||||||
|
pass "Flower password changed from default"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
warn ".env file not found at $ORION_DIR/.env"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
section "9. DNS Resolution"
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
EXPECTED_DOMAINS="wizard.lu api.wizard.lu git.wizard.lu grafana.wizard.lu flower.wizard.lu omsflow.lu rewardflow.lu"
|
||||||
|
for domain in $EXPECTED_DOMAINS; do
|
||||||
|
resolved=$(dig +short "$domain" A 2>/dev/null | head -1)
|
||||||
|
if [ -n "$resolved" ]; then
|
||||||
|
pass "DNS $domain: $resolved"
|
||||||
|
else
|
||||||
|
fail "DNS $domain: no A record found"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
section "10. Health Endpoints"
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
HEALTH_URL="http://localhost:8001/health"
|
||||||
|
READY_URL="http://localhost:8001/health/ready"
|
||||||
|
|
||||||
|
status=$(curl -s -o /dev/null -w '%{http_code}' "$HEALTH_URL" 2>/dev/null || echo "000")
|
||||||
|
if [ "$status" = "200" ]; then
|
||||||
|
pass "/health endpoint: HTTP 200"
|
||||||
|
else
|
||||||
|
fail "/health endpoint: HTTP $status"
|
||||||
|
fi
|
||||||
|
|
||||||
|
ready_response=$(curl -s "$READY_URL" 2>/dev/null || echo "")
|
||||||
|
if echo "$ready_response" | grep -q '"healthy"'; then
|
||||||
|
pass "/health/ready: healthy"
|
||||||
|
if echo "$ready_response" | grep -q '"database"'; then
|
||||||
|
pass "/health/ready: database check registered"
|
||||||
|
else
|
||||||
|
warn "/health/ready: database check not found"
|
||||||
|
fi
|
||||||
|
if echo "$ready_response" | grep -q '"redis"'; then
|
||||||
|
pass "/health/ready: redis check registered"
|
||||||
|
else
|
||||||
|
warn "/health/ready: redis check not found"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
fail "/health/ready: not healthy — $ready_response"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
section "11. Prometheus Targets"
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
targets=$(curl -s http://localhost:9090/api/v1/targets 2>/dev/null || echo "")
|
||||||
|
if [ -n "$targets" ]; then
|
||||||
|
up_count=$(echo "$targets" | grep -o '"health":"up"' | wc -l)
|
||||||
|
down_count=$(echo "$targets" | grep -o '"health":"down"' | wc -l)
|
||||||
|
if [ "$down_count" -eq 0 ] && [ "$up_count" -gt 0 ]; then
|
||||||
|
pass "Prometheus: all $up_count targets up"
|
||||||
|
elif [ "$down_count" -gt 0 ]; then
|
||||||
|
fail "Prometheus: $down_count target(s) down ($up_count up)"
|
||||||
|
else
|
||||||
|
warn "Prometheus: no targets found"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
fail "Prometheus: could not reach API at localhost:9090"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
section "12. Grafana"
|
||||||
|
# -----------------------------------------------------------------------
|
||||||
|
|
||||||
|
grafana_status=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:3001/api/health 2>/dev/null || echo "000")
|
||||||
|
if [ "$grafana_status" = "200" ]; then
|
||||||
|
pass "Grafana: accessible (HTTP 200)"
|
||||||
|
else
|
||||||
|
fail "Grafana: HTTP $grafana_status (expected 200)"
|
||||||
|
fi
|
||||||
|
|
||||||
grafana_status=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:3001/api/health 2>/dev/null || echo "000")
|
|
||||||
if [ "$grafana_status" = "200" ]; then
|
|
||||||
pass "Grafana: accessible (HTTP 200)"
|
|
||||||
else
|
|
||||||
fail "Grafana: HTTP $grafana_status (expected 200)"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
|
|||||||
Reference in New Issue
Block a user