chore: PostgreSQL migration compatibility and infrastructure improvements

Database & Migrations:
- Update all Alembic migrations for PostgreSQL compatibility
- Remove SQLite-specific syntax (AUTOINCREMENT, etc.)
- Add database utility helpers for PostgreSQL operations
- Fix services to use PostgreSQL-compatible queries

Documentation:
- Add comprehensive Docker deployment guide
- Add production deployment documentation
- Add infrastructure architecture documentation
- Update database setup guide for PostgreSQL-only
- Expand troubleshooting guide

Architecture & Validation:
- Add migration.yaml rules for SQL compatibility checking
- Enhance validate_architecture.py with migration validation
- Update architecture rules to validate Alembic migrations

Development:
- Fix duplicate install-all target in Makefile
- Add Celery/Redis validation to install.py script
- Add docker-compose.test.yml for CI testing
- Add squash_migrations.py utility script
- Update tests for PostgreSQL compatibility
- Improve test fixtures in conftest.py

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-11 17:52:28 +01:00
parent 2792414395
commit 3614d448e4
45 changed files with 3179 additions and 507 deletions

View File

@@ -47,6 +47,7 @@ includes:
- language.yaml
- quality.yaml
- money.yaml
- migration.yaml
# ============================================================================
# VALIDATION SEVERITY LEVELS
@@ -75,7 +76,7 @@ ignore:
- "**/test_*.py"
- "**/__pycache__/**"
- "**/migrations/**"
- "**/alembic/versions/**"
# Note: alembic/versions is NOT ignored - we validate migrations for SQLite compatibility
- "**/node_modules/**"
- "**/.venv/**"
- "**/venv/**"

View File

@@ -0,0 +1,68 @@
# Architecture Rules - Database Migration Rules
# Rules for alembic/versions/*.py migration files
#
# NOTE: This project uses PostgreSQL only. SQLite is not supported.
# PostgreSQL supports native ALTER TABLE operations, so batch_alter_table
# is not required. These rules ensure migrations are clean and reversible.
migration_rules:
- id: "MIG-001"
name: "Migrations should have meaningful downgrade functions"
severity: "warning"
description: |
All migrations should have a proper downgrade() function that reverses
the upgrade changes. This enables rolling back deployments and resetting
development databases.
WRONG:
def downgrade() -> None:
pass
CORRECT:
def downgrade() -> None:
op.drop_column('users', 'email')
pattern:
file_pattern: "alembic/versions/**/*.py"
check: "migration_has_downgrade"
- id: "MIG-002"
name: "Foreign keys must have explicit constraint names"
severity: "error"
description: |
Foreign key constraints must have explicit names for easier debugging
and consistent schema management.
WRONG:
op.create_foreign_key(None, 'other_table', ...)
CORRECT:
op.create_foreign_key('fk_table_column', 'other_table', ...)
pattern:
file_pattern: "alembic/versions/**/*.py"
anti_patterns:
- "create_foreign_key\\(None,"
- id: "MIG-003"
name: "Indexes must have explicit names"
severity: "warning"
description: |
Index names should be explicit for clarity and easier debugging.
CORRECT:
op.create_index('idx_users_email', 'users', ['email'])
pattern:
file_pattern: "alembic/versions/**/*.py"
check: "migration_explicit_index_names"
- id: "MIG-004"
name: "Avoid batch_alter_table (not needed for PostgreSQL)"
severity: "info"
description: |
This project uses PostgreSQL only. The batch_alter_table context manager
is a SQLite workaround and is not needed for PostgreSQL.
PostgreSQL supports native ALTER TABLE operations.
pattern:
file_pattern: "alembic/versions/**/*.py"
note: "batch_alter_table is acceptable but unnecessary for PostgreSQL"

View File

@@ -38,9 +38,24 @@ ruff:
pytest:
stage: test
image: python:${PYTHON_VERSION}
services:
- name: postgres:15
alias: postgres
variables:
# PostgreSQL service configuration
POSTGRES_DB: wizamart_test
POSTGRES_USER: test_user
POSTGRES_PASSWORD: test_password
# Application database URL for tests
TEST_DATABASE_URL: "postgresql://test_user:test_password@postgres:5432/wizamart_test"
# Skip database validation during import (tests use TEST_DATABASE_URL)
DATABASE_URL: "postgresql://test_user:test_password@postgres:5432/wizamart_test"
before_script:
- pip install uv
- uv sync --frozen
# Wait for PostgreSQL to be ready
- apt-get update && apt-get install -y postgresql-client
- for i in $(seq 1 30); do pg_isready -h postgres -U test_user && break || sleep 1; done
script:
- .venv/bin/python -m pytest tests/ -v --tb=short
coverage: '/TOTAL.*\s+(\d+%)/'
@@ -57,6 +72,9 @@ pytest:
architecture:
stage: test
image: python:${PYTHON_VERSION}
variables:
# Set DATABASE_URL to satisfy validation (not actually used by validator)
DATABASE_URL: "postgresql://dummy:dummy@localhost:5432/dummy"
before_script:
- pip install uv
- uv sync --frozen

View File

@@ -21,10 +21,10 @@ export PYTHONPATH := $(shell pwd)
# INSTALLATION & SETUP
# =============================================================================
install-all:
install:
$(PIP) install -r requirements.txt
install-dev: install-all
install-dev: install
$(PIP) install -r requirements-dev.txt
install-test:

View File

@@ -9,7 +9,7 @@ from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import sqlite
# Removed: from sqlalchemy.dialects import sqlite (using sa.JSON for PostgreSQL)
# revision identifiers, used by Alembic.
revision: str = '204273a59d73'
@@ -34,8 +34,8 @@ def upgrade() -> None:
sa.Column('orders_skipped', sa.Integer(), nullable=True),
sa.Column('products_matched', sa.Integer(), nullable=True),
sa.Column('products_not_found', sa.Integer(), nullable=True),
sa.Column('confirmed_stats', sqlite.JSON(), nullable=True),
sa.Column('declined_stats', sqlite.JSON(), nullable=True),
sa.Column('confirmed_stats', sa.JSON(), nullable=True),
sa.Column('declined_stats', sa.JSON(), nullable=True),
sa.Column('error_message', sa.Text(), nullable=True),
sa.Column('started_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('completed_at', sa.DateTime(timezone=True), nullable=True),

View File

@@ -10,7 +10,7 @@ from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import sqlite
# Removed: from sqlalchemy.dialects import sqlite (using sa.JSON for PostgreSQL)
# revision identifiers, used by Alembic.
revision: str = '2953ed10d22c'
@@ -36,7 +36,7 @@ def upgrade() -> None:
sa.Column('products_limit', sa.Integer(), nullable=True),
sa.Column('team_members', sa.Integer(), nullable=True),
sa.Column('order_history_months', sa.Integer(), nullable=True),
sa.Column('features', sqlite.JSON(), nullable=True),
sa.Column('features', sa.JSON(), nullable=True),
sa.Column('stripe_product_id', sa.String(length=100), nullable=True),
sa.Column('stripe_price_monthly_id', sa.String(length=100), nullable=True),
sa.Column('stripe_price_annual_id', sa.String(length=100), nullable=True),
@@ -91,7 +91,7 @@ def upgrade() -> None:
sa.Column('invoice_pdf_url', sa.String(length=500), nullable=True),
sa.Column('hosted_invoice_url', sa.String(length=500), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('line_items', sqlite.JSON(), nullable=True),
sa.Column('line_items', sa.JSON(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['vendor_id'], ['vendors.id'], ),
@@ -182,7 +182,7 @@ def upgrade() -> None:
sa.column('products_limit', sa.Integer),
sa.column('team_members', sa.Integer),
sa.column('order_history_months', sa.Integer),
sa.column('features', sqlite.JSON),
sa.column('features', sa.JSON),
sa.column('display_order', sa.Integer),
sa.column('is_active', sa.Boolean),
sa.column('is_public', sa.Boolean),

View File

@@ -28,7 +28,7 @@ def upgrade() -> None:
sa.Column(
"timestamp",
sa.DateTime(timezone=True),
server_default=sa.text("(datetime('now'))"),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
),
sa.Column("total_files", sa.Integer(), nullable=True),
@@ -64,13 +64,13 @@ def upgrade() -> None:
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("(datetime('now'))"),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("(datetime('now'))"),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
),
sa.PrimaryKeyConstraint("id"),
@@ -107,7 +107,7 @@ def upgrade() -> None:
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("(datetime('now'))"),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
),
sa.ForeignKeyConstraint(
@@ -170,7 +170,7 @@ def upgrade() -> None:
sa.Column(
"assigned_at",
sa.DateTime(timezone=True),
server_default=sa.text("(datetime('now'))"),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
),
sa.Column("assigned_by", sa.Integer(), nullable=True),
@@ -215,7 +215,7 @@ def upgrade() -> None:
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("(datetime('now'))"),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
),
sa.ForeignKeyConstraint(

View File

@@ -29,12 +29,13 @@ depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Rename product_id to vendor_sku for clarity
op.alter_column(
"products",
"product_id",
new_column_name="vendor_sku",
)
# Use batch mode for SQLite compatibility
with op.batch_alter_table("products", schema=None) as batch_op:
# Rename product_id to vendor_sku for clarity
batch_op.alter_column(
"product_id",
new_column_name="vendor_sku",
)
# Add new override fields
op.add_column(
@@ -118,9 +119,10 @@ def downgrade() -> None:
op.drop_column("products", "primary_image_url")
op.drop_column("products", "brand")
# Rename vendor_sku back to product_id
op.alter_column(
"products",
"vendor_sku",
new_column_name="product_id",
)
# Use batch mode for SQLite compatibility
with op.batch_alter_table("products", schema=None) as batch_op:
# Rename vendor_sku back to product_id
batch_op.alter_column(
"vendor_sku",
new_column_name="product_id",
)

View File

@@ -29,11 +29,11 @@ def upgrade() -> None:
sa.Column('show_in_legal', sa.Boolean(), nullable=True, default=False)
)
# Set default value for existing rows
op.execute("UPDATE content_pages SET show_in_legal = 0 WHERE show_in_legal IS NULL")
# Set default value for existing rows (PostgreSQL uses true/false for boolean)
op.execute("UPDATE content_pages SET show_in_legal = false WHERE show_in_legal IS NULL")
# Set privacy and terms pages to show in legal by default
op.execute("UPDATE content_pages SET show_in_legal = 1 WHERE slug IN ('privacy', 'terms')")
op.execute("UPDATE content_pages SET show_in_legal = true WHERE slug IN ('privacy', 'terms')")
def downgrade() -> None:

View File

@@ -9,6 +9,7 @@ from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
from sqlalchemy import text
# revision identifiers, used by Alembic.
@@ -19,59 +20,60 @@ depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
# Create email_templates table
op.create_table('email_templates',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('code', sa.String(length=100), nullable=False),
sa.Column('language', sa.String(length=5), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('category', sa.String(length=50), nullable=False),
sa.Column('subject', sa.String(length=500), nullable=False),
sa.Column('body_html', sa.Text(), nullable=False),
sa.Column('body_text', sa.Text(), nullable=True),
sa.Column('variables', sa.Text(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sqlite_autoincrement=True
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('code', sa.String(length=100), nullable=False),
sa.Column('language', sa.String(length=5), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('category', sa.String(length=50), nullable=False),
sa.Column('subject', sa.String(length=500), nullable=False),
sa.Column('body_html', sa.Text(), nullable=False),
sa.Column('body_text', sa.Text(), nullable=True),
sa.Column('variables', sa.Text(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
)
op.create_index(op.f('ix_email_templates_category'), 'email_templates', ['category'], unique=False)
op.create_index(op.f('ix_email_templates_code'), 'email_templates', ['code'], unique=False)
op.create_index(op.f('ix_email_templates_id'), 'email_templates', ['id'], unique=False)
# Create email_logs table
op.create_table('email_logs',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('template_code', sa.String(length=100), nullable=True),
sa.Column('template_id', sa.Integer(), nullable=True),
sa.Column('recipient_email', sa.String(length=255), nullable=False),
sa.Column('recipient_name', sa.String(length=255), nullable=True),
sa.Column('subject', sa.String(length=500), nullable=False),
sa.Column('body_html', sa.Text(), nullable=True),
sa.Column('body_text', sa.Text(), nullable=True),
sa.Column('from_email', sa.String(length=255), nullable=False),
sa.Column('from_name', sa.String(length=255), nullable=True),
sa.Column('reply_to', sa.String(length=255), nullable=True),
sa.Column('status', sa.String(length=20), nullable=False),
sa.Column('sent_at', sa.DateTime(), nullable=True),
sa.Column('delivered_at', sa.DateTime(), nullable=True),
sa.Column('opened_at', sa.DateTime(), nullable=True),
sa.Column('clicked_at', sa.DateTime(), nullable=True),
sa.Column('error_message', sa.Text(), nullable=True),
sa.Column('retry_count', sa.Integer(), nullable=False),
sa.Column('provider', sa.String(length=50), nullable=True),
sa.Column('provider_message_id', sa.String(length=255), nullable=True),
sa.Column('vendor_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('related_type', sa.String(length=50), nullable=True),
sa.Column('related_id', sa.Integer(), nullable=True),
sa.Column('extra_data', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['template_id'], ['email_templates.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['vendor_id'], ['vendors.id'], ),
sa.PrimaryKeyConstraint('id')
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('template_code', sa.String(length=100), nullable=True),
sa.Column('template_id', sa.Integer(), nullable=True),
sa.Column('recipient_email', sa.String(length=255), nullable=False),
sa.Column('recipient_name', sa.String(length=255), nullable=True),
sa.Column('subject', sa.String(length=500), nullable=False),
sa.Column('body_html', sa.Text(), nullable=True),
sa.Column('body_text', sa.Text(), nullable=True),
sa.Column('from_email', sa.String(length=255), nullable=False),
sa.Column('from_name', sa.String(length=255), nullable=True),
sa.Column('reply_to', sa.String(length=255), nullable=True),
sa.Column('status', sa.String(length=20), nullable=False),
sa.Column('sent_at', sa.DateTime(), nullable=True),
sa.Column('delivered_at', sa.DateTime(), nullable=True),
sa.Column('opened_at', sa.DateTime(), nullable=True),
sa.Column('clicked_at', sa.DateTime(), nullable=True),
sa.Column('error_message', sa.Text(), nullable=True),
sa.Column('retry_count', sa.Integer(), nullable=False),
sa.Column('provider', sa.String(length=50), nullable=True),
sa.Column('provider_message_id', sa.String(length=255), nullable=True),
sa.Column('vendor_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('related_type', sa.String(length=50), nullable=True),
sa.Column('related_id', sa.Integer(), nullable=True),
sa.Column('extra_data', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['template_id'], ['email_templates.id']),
sa.ForeignKeyConstraint(['user_id'], ['users.id']),
sa.ForeignKeyConstraint(['vendor_id'], ['vendors.id']),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_email_logs_id'), 'email_logs', ['id'], unique=False)
op.create_index(op.f('ix_email_logs_provider_message_id'), 'email_logs', ['provider_message_id'], unique=False)
@@ -80,181 +82,242 @@ def upgrade() -> None:
op.create_index(op.f('ix_email_logs_template_code'), 'email_logs', ['template_code'], unique=False)
op.create_index(op.f('ix_email_logs_user_id'), 'email_logs', ['user_id'], unique=False)
op.create_index(op.f('ix_email_logs_vendor_id'), 'email_logs', ['vendor_id'], unique=False)
op.alter_column('application_logs', 'created_at',
existing_type=sa.DATETIME(),
nullable=False)
op.alter_column('application_logs', 'updated_at',
existing_type=sa.DATETIME(),
nullable=False)
op.drop_index(op.f('ix_capacity_snapshots_date'), table_name='capacity_snapshots')
op.create_index('ix_capacity_snapshots_date', 'capacity_snapshots', ['snapshot_date'], unique=False)
op.create_index(op.f('ix_capacity_snapshots_snapshot_date'), 'capacity_snapshots', ['snapshot_date'], unique=True)
op.alter_column('cart_items', 'created_at',
existing_type=sa.DATETIME(),
nullable=False)
op.alter_column('cart_items', 'updated_at',
existing_type=sa.DATETIME(),
nullable=False)
op.drop_index(op.f('ix_customers_addresses_id'), table_name='customer_addresses')
op.create_index(op.f('ix_customer_addresses_id'), 'customer_addresses', ['id'], unique=False)
op.alter_column('inventory', 'warehouse',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('inventory', 'bin_location',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('inventory', 'location',
existing_type=sa.VARCHAR(),
nullable=True)
op.drop_index(op.f('idx_inventory_product_location'), table_name='inventory')
op.drop_constraint(op.f('uq_inventory_product_location'), 'inventory', type_='unique')
op.create_unique_constraint('uq_inventory_product_warehouse_bin', 'inventory', ['product_id', 'warehouse', 'bin_location'])
op.create_index(op.f('ix_marketplace_import_errors_import_job_id'), 'marketplace_import_errors', ['import_job_id'], unique=False)
op.create_index(op.f('ix_marketplace_product_translations_id'), 'marketplace_product_translations', ['id'], unique=False)
op.alter_column('marketplace_products', 'is_digital',
existing_type=sa.BOOLEAN(),
nullable=True,
existing_server_default=sa.text('0'))
op.alter_column('marketplace_products', 'is_active',
existing_type=sa.BOOLEAN(),
nullable=True,
existing_server_default=sa.text('1'))
op.drop_index(op.f('idx_mp_is_active'), table_name='marketplace_products')
op.drop_index(op.f('idx_mp_platform'), table_name='marketplace_products')
op.drop_index(op.f('idx_mp_sku'), table_name='marketplace_products')
op.create_index(op.f('ix_marketplace_products_is_active'), 'marketplace_products', ['is_active'], unique=False)
op.create_index(op.f('ix_marketplace_products_is_digital'), 'marketplace_products', ['is_digital'], unique=False)
op.create_index(op.f('ix_marketplace_products_mpn'), 'marketplace_products', ['mpn'], unique=False)
op.create_index(op.f('ix_marketplace_products_platform'), 'marketplace_products', ['platform'], unique=False)
op.create_index(op.f('ix_marketplace_products_sku'), 'marketplace_products', ['sku'], unique=False)
op.drop_index(op.f('uq_order_item_exception'), table_name='order_item_exceptions')
op.create_index(op.f('ix_order_item_exceptions_original_gtin'), 'order_item_exceptions', ['original_gtin'], unique=False)
op.create_unique_constraint(None, 'order_item_exceptions', ['order_item_id'])
op.alter_column('order_items', 'needs_product_match',
existing_type=sa.BOOLEAN(),
nullable=True,
existing_server_default=sa.text("'0'"))
op.drop_index(op.f('ix_order_items_gtin'), table_name='order_items')
op.drop_index(op.f('ix_order_items_product_id'), table_name='order_items')
op.create_index(op.f('ix_product_translations_id'), 'product_translations', ['id'], unique=False)
op.drop_index(op.f('idx_product_active'), table_name='products')
op.drop_index(op.f('idx_product_featured'), table_name='products')
op.drop_index(op.f('idx_product_gtin'), table_name='products')
op.drop_index(op.f('idx_product_vendor_gtin'), table_name='products')
op.drop_constraint(op.f('uq_product'), 'products', type_='unique')
op.create_index('idx_product_vendor_active', 'products', ['vendor_id', 'is_active'], unique=False)
op.create_index('idx_product_vendor_featured', 'products', ['vendor_id', 'is_featured'], unique=False)
op.create_index(op.f('ix_products_gtin'), 'products', ['gtin'], unique=False)
op.create_index(op.f('ix_products_vendor_sku'), 'products', ['vendor_sku'], unique=False)
op.create_unique_constraint('uq_vendor_marketplace_product', 'products', ['vendor_id', 'marketplace_product_id'])
op.drop_index(op.f('ix_vendors_domains_domain'), table_name='vendor_domains')
op.drop_index(op.f('ix_vendors_domains_id'), table_name='vendor_domains')
op.create_index(op.f('ix_vendor_domains_domain'), 'vendor_domains', ['domain'], unique=True)
op.create_index(op.f('ix_vendor_domains_id'), 'vendor_domains', ['id'], unique=False)
op.alter_column('vendor_subscriptions', 'payment_retry_count',
existing_type=sa.INTEGER(),
nullable=False,
existing_server_default=sa.text('0'))
op.create_foreign_key(None, 'vendor_subscriptions', 'subscription_tiers', ['tier_id'], ['id'])
op.drop_index(op.f('ix_vendors_themes_id'), table_name='vendor_themes')
op.create_index(op.f('ix_vendor_themes_id'), 'vendor_themes', ['id'], unique=False)
op.drop_index(op.f('ix_vendors_users_id'), table_name='vendor_users')
op.drop_index(op.f('ix_vendors_users_invitation_token'), table_name='vendor_users')
op.create_index(op.f('ix_vendor_users_id'), 'vendor_users', ['id'], unique=False)
op.create_index(op.f('ix_vendor_users_invitation_token'), 'vendor_users', ['invitation_token'], unique=False)
op.alter_column('vendors', 'company_id',
existing_type=sa.INTEGER(),
nullable=False)
# ### end Alembic commands ###
# application_logs - alter columns
op.alter_column('application_logs', 'created_at', existing_type=sa.DATETIME(), nullable=False)
op.alter_column('application_logs', 'updated_at', existing_type=sa.DATETIME(), nullable=False)
# capacity_snapshots indexes (PostgreSQL IF EXISTS/IF NOT EXISTS)
op.execute(text("DROP INDEX IF EXISTS ix_capacity_snapshots_date"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_capacity_snapshots_date ON capacity_snapshots (snapshot_date)"))
op.execute(text("CREATE UNIQUE INDEX IF NOT EXISTS ix_capacity_snapshots_snapshot_date ON capacity_snapshots (snapshot_date)"))
# cart_items - alter columns
op.alter_column('cart_items', 'created_at', existing_type=sa.DATETIME(), nullable=False)
op.alter_column('cart_items', 'updated_at', existing_type=sa.DATETIME(), nullable=False)
# customer_addresses index rename
op.execute(text("DROP INDEX IF EXISTS ix_customers_addresses_id"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_customer_addresses_id ON customer_addresses (id)"))
# inventory - alter columns and constraints
op.alter_column('inventory', 'warehouse', existing_type=sa.VARCHAR(), nullable=False)
op.alter_column('inventory', 'bin_location', existing_type=sa.VARCHAR(), nullable=False)
op.alter_column('inventory', 'location', existing_type=sa.VARCHAR(), nullable=True)
op.execute(text("DROP INDEX IF EXISTS idx_inventory_product_location"))
op.execute(text("ALTER TABLE inventory DROP CONSTRAINT IF EXISTS uq_inventory_product_location"))
op.execute(text("""
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'uq_inventory_product_warehouse_bin') THEN
ALTER TABLE inventory ADD CONSTRAINT uq_inventory_product_warehouse_bin UNIQUE (product_id, warehouse, bin_location);
END IF;
END $$;
"""))
# marketplace_import_errors and translations indexes
op.execute(text("CREATE INDEX IF NOT EXISTS ix_marketplace_import_errors_import_job_id ON marketplace_import_errors (import_job_id)"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_marketplace_product_translations_id ON marketplace_product_translations (id)"))
# marketplace_products - alter columns
op.alter_column('marketplace_products', 'is_digital', existing_type=sa.BOOLEAN(), nullable=True)
op.alter_column('marketplace_products', 'is_active', existing_type=sa.BOOLEAN(), nullable=True)
# marketplace_products indexes
op.execute(text("DROP INDEX IF EXISTS idx_mp_is_active"))
op.execute(text("DROP INDEX IF EXISTS idx_mp_platform"))
op.execute(text("DROP INDEX IF EXISTS idx_mp_sku"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_marketplace_products_is_active ON marketplace_products (is_active)"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_marketplace_products_is_digital ON marketplace_products (is_digital)"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_marketplace_products_mpn ON marketplace_products (mpn)"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_marketplace_products_platform ON marketplace_products (platform)"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_marketplace_products_sku ON marketplace_products (sku)"))
# order_item_exceptions - constraints and indexes
op.execute(text("DROP INDEX IF EXISTS uq_order_item_exception"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_order_item_exceptions_original_gtin ON order_item_exceptions (original_gtin)"))
op.execute(text("""
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'uq_order_item_exceptions_order_item_id') THEN
ALTER TABLE order_item_exceptions ADD CONSTRAINT uq_order_item_exceptions_order_item_id UNIQUE (order_item_id);
END IF;
END $$;
"""))
# order_items - alter column
op.alter_column('order_items', 'needs_product_match', existing_type=sa.BOOLEAN(), nullable=True)
# order_items indexes
op.execute(text("DROP INDEX IF EXISTS ix_order_items_gtin"))
op.execute(text("DROP INDEX IF EXISTS ix_order_items_product_id"))
# product_translations index
op.execute(text("CREATE INDEX IF NOT EXISTS ix_product_translations_id ON product_translations (id)"))
# products indexes
op.execute(text("DROP INDEX IF EXISTS idx_product_active"))
op.execute(text("DROP INDEX IF EXISTS idx_product_featured"))
op.execute(text("DROP INDEX IF EXISTS idx_product_gtin"))
op.execute(text("DROP INDEX IF EXISTS idx_product_vendor_gtin"))
# products constraint
op.execute(text("ALTER TABLE products DROP CONSTRAINT IF EXISTS uq_product"))
op.execute(text("""
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'uq_vendor_marketplace_product') THEN
ALTER TABLE products ADD CONSTRAINT uq_vendor_marketplace_product UNIQUE (vendor_id, marketplace_product_id);
END IF;
END $$;
"""))
# products new indexes
op.execute(text("CREATE INDEX IF NOT EXISTS idx_product_vendor_active ON products (vendor_id, is_active)"))
op.execute(text("CREATE INDEX IF NOT EXISTS idx_product_vendor_featured ON products (vendor_id, is_featured)"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_products_gtin ON products (gtin)"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_products_vendor_sku ON products (vendor_sku)"))
# vendor_domains indexes
op.execute(text("DROP INDEX IF EXISTS ix_vendors_domains_domain"))
op.execute(text("DROP INDEX IF EXISTS ix_vendors_domains_id"))
op.execute(text("CREATE UNIQUE INDEX IF NOT EXISTS ix_vendor_domains_domain ON vendor_domains (domain)"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_vendor_domains_id ON vendor_domains (id)"))
# vendor_subscriptions - alter column and FK
op.alter_column('vendor_subscriptions', 'payment_retry_count', existing_type=sa.INTEGER(), nullable=False)
op.execute(text("""
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'fk_vendor_subscriptions_tier_id') THEN
ALTER TABLE vendor_subscriptions ADD CONSTRAINT fk_vendor_subscriptions_tier_id
FOREIGN KEY (tier_id) REFERENCES subscription_tiers(id);
END IF;
END $$;
"""))
# vendor_themes indexes
op.execute(text("DROP INDEX IF EXISTS ix_vendors_themes_id"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_vendor_themes_id ON vendor_themes (id)"))
# vendor_users indexes
op.execute(text("DROP INDEX IF EXISTS ix_vendors_users_id"))
op.execute(text("DROP INDEX IF EXISTS ix_vendors_users_invitation_token"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_vendor_users_id ON vendor_users (id)"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_vendor_users_invitation_token ON vendor_users (invitation_token)"))
# vendors - alter column
op.alter_column('vendors', 'company_id', existing_type=sa.INTEGER(), nullable=False)
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('vendors', 'company_id',
existing_type=sa.INTEGER(),
nullable=True)
op.drop_index(op.f('ix_vendor_users_invitation_token'), table_name='vendor_users')
op.drop_index(op.f('ix_vendor_users_id'), table_name='vendor_users')
op.create_index(op.f('ix_vendors_users_invitation_token'), 'vendor_users', ['invitation_token'], unique=False)
op.create_index(op.f('ix_vendors_users_id'), 'vendor_users', ['id'], unique=False)
op.drop_index(op.f('ix_vendor_themes_id'), table_name='vendor_themes')
op.create_index(op.f('ix_vendors_themes_id'), 'vendor_themes', ['id'], unique=False)
op.drop_constraint(None, 'vendor_subscriptions', type_='foreignkey')
op.alter_column('vendor_subscriptions', 'payment_retry_count',
existing_type=sa.INTEGER(),
nullable=True,
existing_server_default=sa.text('0'))
op.drop_index(op.f('ix_vendor_domains_id'), table_name='vendor_domains')
op.drop_index(op.f('ix_vendor_domains_domain'), table_name='vendor_domains')
op.create_index(op.f('ix_vendors_domains_id'), 'vendor_domains', ['id'], unique=False)
op.create_index(op.f('ix_vendors_domains_domain'), 'vendor_domains', ['domain'], unique=1)
op.drop_constraint('uq_vendor_marketplace_product', 'products', type_='unique')
op.drop_index(op.f('ix_products_vendor_sku'), table_name='products')
op.drop_index(op.f('ix_products_gtin'), table_name='products')
op.drop_index('idx_product_vendor_featured', table_name='products')
op.drop_index('idx_product_vendor_active', table_name='products')
op.create_unique_constraint(op.f('uq_product'), 'products', ['vendor_id', 'marketplace_product_id'])
op.create_index(op.f('idx_product_vendor_gtin'), 'products', ['vendor_id', 'gtin'], unique=False)
op.create_index(op.f('idx_product_gtin'), 'products', ['gtin'], unique=False)
op.create_index(op.f('idx_product_featured'), 'products', ['vendor_id', 'is_featured'], unique=False)
op.create_index(op.f('idx_product_active'), 'products', ['vendor_id', 'is_active'], unique=False)
op.drop_index(op.f('ix_product_translations_id'), table_name='product_translations')
op.create_index(op.f('ix_order_items_product_id'), 'order_items', ['product_id'], unique=False)
op.create_index(op.f('ix_order_items_gtin'), 'order_items', ['gtin'], unique=False)
op.alter_column('order_items', 'needs_product_match',
existing_type=sa.BOOLEAN(),
nullable=False,
existing_server_default=sa.text("'0'"))
op.drop_constraint(None, 'order_item_exceptions', type_='unique')
op.drop_index(op.f('ix_order_item_exceptions_original_gtin'), table_name='order_item_exceptions')
op.create_index(op.f('uq_order_item_exception'), 'order_item_exceptions', ['order_item_id'], unique=1)
op.drop_index(op.f('ix_marketplace_products_sku'), table_name='marketplace_products')
op.drop_index(op.f('ix_marketplace_products_platform'), table_name='marketplace_products')
op.drop_index(op.f('ix_marketplace_products_mpn'), table_name='marketplace_products')
op.drop_index(op.f('ix_marketplace_products_is_digital'), table_name='marketplace_products')
op.drop_index(op.f('ix_marketplace_products_is_active'), table_name='marketplace_products')
op.create_index(op.f('idx_mp_sku'), 'marketplace_products', ['sku'], unique=False)
op.create_index(op.f('idx_mp_platform'), 'marketplace_products', ['platform'], unique=False)
op.create_index(op.f('idx_mp_is_active'), 'marketplace_products', ['is_active'], unique=False)
op.alter_column('marketplace_products', 'is_active',
existing_type=sa.BOOLEAN(),
nullable=False,
existing_server_default=sa.text('1'))
op.alter_column('marketplace_products', 'is_digital',
existing_type=sa.BOOLEAN(),
nullable=False,
existing_server_default=sa.text('0'))
op.drop_index(op.f('ix_marketplace_product_translations_id'), table_name='marketplace_product_translations')
op.drop_index(op.f('ix_marketplace_import_errors_import_job_id'), table_name='marketplace_import_errors')
op.drop_constraint('uq_inventory_product_warehouse_bin', 'inventory', type_='unique')
op.create_unique_constraint(op.f('uq_inventory_product_location'), 'inventory', ['product_id', 'location'])
op.create_index(op.f('idx_inventory_product_location'), 'inventory', ['product_id', 'location'], unique=False)
op.alter_column('inventory', 'location',
existing_type=sa.VARCHAR(),
nullable=False)
op.alter_column('inventory', 'bin_location',
existing_type=sa.VARCHAR(),
nullable=True)
op.alter_column('inventory', 'warehouse',
existing_type=sa.VARCHAR(),
nullable=True)
op.drop_index(op.f('ix_customer_addresses_id'), table_name='customer_addresses')
op.create_index(op.f('ix_customers_addresses_id'), 'customer_addresses', ['id'], unique=False)
op.alter_column('cart_items', 'updated_at',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column('cart_items', 'created_at',
existing_type=sa.DATETIME(),
nullable=True)
op.drop_index(op.f('ix_capacity_snapshots_snapshot_date'), table_name='capacity_snapshots')
op.drop_index('ix_capacity_snapshots_date', table_name='capacity_snapshots')
op.create_index(op.f('ix_capacity_snapshots_date'), 'capacity_snapshots', ['snapshot_date'], unique=1)
op.alter_column('application_logs', 'updated_at',
existing_type=sa.DATETIME(),
nullable=True)
op.alter_column('application_logs', 'created_at',
existing_type=sa.DATETIME(),
nullable=True)
# vendors
op.alter_column('vendors', 'company_id', existing_type=sa.INTEGER(), nullable=True)
# vendor_users indexes
op.execute(text("DROP INDEX IF EXISTS ix_vendor_users_invitation_token"))
op.execute(text("DROP INDEX IF EXISTS ix_vendor_users_id"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_vendors_users_invitation_token ON vendor_users (invitation_token)"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_vendors_users_id ON vendor_users (id)"))
# vendor_themes indexes
op.execute(text("DROP INDEX IF EXISTS ix_vendor_themes_id"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_vendors_themes_id ON vendor_themes (id)"))
# vendor_subscriptions
op.execute(text("ALTER TABLE vendor_subscriptions DROP CONSTRAINT IF EXISTS fk_vendor_subscriptions_tier_id"))
op.alter_column('vendor_subscriptions', 'payment_retry_count', existing_type=sa.INTEGER(), nullable=True)
# vendor_domains indexes
op.execute(text("DROP INDEX IF EXISTS ix_vendor_domains_id"))
op.execute(text("DROP INDEX IF EXISTS ix_vendor_domains_domain"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_vendors_domains_id ON vendor_domains (id)"))
op.execute(text("CREATE UNIQUE INDEX IF NOT EXISTS ix_vendors_domains_domain ON vendor_domains (domain)"))
# products constraint and indexes
op.execute(text("ALTER TABLE products DROP CONSTRAINT IF EXISTS uq_vendor_marketplace_product"))
op.execute(text("""
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'uq_product') THEN
ALTER TABLE products ADD CONSTRAINT uq_product UNIQUE (vendor_id, marketplace_product_id);
END IF;
END $$;
"""))
op.execute(text("DROP INDEX IF EXISTS ix_products_vendor_sku"))
op.execute(text("DROP INDEX IF EXISTS ix_products_gtin"))
op.execute(text("DROP INDEX IF EXISTS idx_product_vendor_featured"))
op.execute(text("DROP INDEX IF EXISTS idx_product_vendor_active"))
op.execute(text("CREATE INDEX IF NOT EXISTS idx_product_vendor_gtin ON products (vendor_id, gtin)"))
op.execute(text("CREATE INDEX IF NOT EXISTS idx_product_gtin ON products (gtin)"))
op.execute(text("CREATE INDEX IF NOT EXISTS idx_product_featured ON products (vendor_id, is_featured)"))
op.execute(text("CREATE INDEX IF NOT EXISTS idx_product_active ON products (vendor_id, is_active)"))
# product_translations
op.execute(text("DROP INDEX IF EXISTS ix_product_translations_id"))
# order_items
op.execute(text("CREATE INDEX IF NOT EXISTS ix_order_items_product_id ON order_items (product_id)"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_order_items_gtin ON order_items (gtin)"))
op.alter_column('order_items', 'needs_product_match', existing_type=sa.BOOLEAN(), nullable=False)
# order_item_exceptions
op.execute(text("ALTER TABLE order_item_exceptions DROP CONSTRAINT IF EXISTS uq_order_item_exceptions_order_item_id"))
op.execute(text("DROP INDEX IF EXISTS ix_order_item_exceptions_original_gtin"))
op.execute(text("CREATE UNIQUE INDEX IF NOT EXISTS uq_order_item_exception ON order_item_exceptions (order_item_id)"))
# marketplace_products indexes
op.execute(text("DROP INDEX IF EXISTS ix_marketplace_products_sku"))
op.execute(text("DROP INDEX IF EXISTS ix_marketplace_products_platform"))
op.execute(text("DROP INDEX IF EXISTS ix_marketplace_products_mpn"))
op.execute(text("DROP INDEX IF EXISTS ix_marketplace_products_is_digital"))
op.execute(text("DROP INDEX IF EXISTS ix_marketplace_products_is_active"))
op.execute(text("CREATE INDEX IF NOT EXISTS idx_mp_sku ON marketplace_products (sku)"))
op.execute(text("CREATE INDEX IF NOT EXISTS idx_mp_platform ON marketplace_products (platform)"))
op.execute(text("CREATE INDEX IF NOT EXISTS idx_mp_is_active ON marketplace_products (is_active)"))
# marketplace_products columns
op.alter_column('marketplace_products', 'is_active', existing_type=sa.BOOLEAN(), nullable=False)
op.alter_column('marketplace_products', 'is_digital', existing_type=sa.BOOLEAN(), nullable=False)
# marketplace imports
op.execute(text("DROP INDEX IF EXISTS ix_marketplace_product_translations_id"))
op.execute(text("DROP INDEX IF EXISTS ix_marketplace_import_errors_import_job_id"))
# inventory
op.execute(text("ALTER TABLE inventory DROP CONSTRAINT IF EXISTS uq_inventory_product_warehouse_bin"))
op.execute(text("""
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'uq_inventory_product_location') THEN
ALTER TABLE inventory ADD CONSTRAINT uq_inventory_product_location UNIQUE (product_id, location);
END IF;
END $$;
"""))
op.execute(text("CREATE INDEX IF NOT EXISTS idx_inventory_product_location ON inventory (product_id, location)"))
op.alter_column('inventory', 'location', existing_type=sa.VARCHAR(), nullable=False)
op.alter_column('inventory', 'bin_location', existing_type=sa.VARCHAR(), nullable=True)
op.alter_column('inventory', 'warehouse', existing_type=sa.VARCHAR(), nullable=True)
# customer_addresses
op.execute(text("DROP INDEX IF EXISTS ix_customer_addresses_id"))
op.execute(text("CREATE INDEX IF NOT EXISTS ix_customers_addresses_id ON customer_addresses (id)"))
# cart_items
op.alter_column('cart_items', 'updated_at', existing_type=sa.DATETIME(), nullable=True)
op.alter_column('cart_items', 'created_at', existing_type=sa.DATETIME(), nullable=True)
# capacity_snapshots
op.execute(text("DROP INDEX IF EXISTS ix_capacity_snapshots_snapshot_date"))
op.execute(text("DROP INDEX IF EXISTS ix_capacity_snapshots_date"))
op.execute(text("CREATE UNIQUE INDEX IF NOT EXISTS ix_capacity_snapshots_date ON capacity_snapshots (snapshot_date)"))
# application_logs
op.alter_column('application_logs', 'updated_at', existing_type=sa.DATETIME(), nullable=True)
op.alter_column('application_logs', 'created_at', existing_type=sa.DATETIME(), nullable=True)
# Drop email tables
op.drop_index(op.f('ix_email_logs_vendor_id'), table_name='email_logs')
op.drop_index(op.f('ix_email_logs_user_id'), table_name='email_logs')
op.drop_index(op.f('ix_email_logs_template_code'), table_name='email_logs')
@@ -267,4 +330,3 @@ def downgrade() -> None:
op.drop_index(op.f('ix_email_templates_code'), table_name='email_templates')
op.drop_index(op.f('ix_email_templates_category'), table_name='email_templates')
op.drop_table('email_templates')
# ### end Alembic commands ###

View File

@@ -20,8 +20,6 @@ Google Shopping feed value while using 'product_type' for the new enum.
from typing import Sequence, Union
import sqlalchemy as sa
from sqlalchemy.dialects import sqlite
from alembic import op
# revision identifiers, used by Alembic.
@@ -56,7 +54,7 @@ def upgrade() -> None:
"is_digital",
sa.Boolean(),
nullable=False,
server_default=sa.text("0"),
server_default=sa.text("false"),
),
)
@@ -113,7 +111,7 @@ def upgrade() -> None:
"is_active",
sa.Boolean(),
nullable=False,
server_default=sa.text("1"),
server_default=sa.text("true"),
),
)

View File

@@ -19,12 +19,29 @@ branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def get_column_names(conn, table_name: str) -> set:
"""Get column names for a table (PostgreSQL)."""
result = conn.execute(text(
"SELECT column_name FROM information_schema.columns "
"WHERE table_name = :table AND table_schema = 'public'"
), {"table": table_name})
return {row[0] for row in result.fetchall()}
def get_index_names(conn, table_name: str) -> set:
"""Get index names for a table (PostgreSQL)."""
result = conn.execute(text(
"SELECT indexname FROM pg_indexes "
"WHERE tablename = :table AND schemaname = 'public'"
), {"table": table_name})
return {row[0] for row in result.fetchall()}
def upgrade() -> None:
conn = op.get_bind()
# Check if columns already exist (idempotent)
result = conn.execute(text("PRAGMA table_info(inventory)"))
columns = {row[1] for row in result.fetchall()}
columns = get_column_names(conn, "inventory")
if 'warehouse' not in columns:
op.add_column('inventory', sa.Column('warehouse', sa.String(), nullable=False, server_default='strassen'))
@@ -41,8 +58,7 @@ def upgrade() -> None:
"""))
# Create indexes if they don't exist
indexes = conn.execute(text("PRAGMA index_list(inventory)"))
existing_indexes = {row[1] for row in indexes.fetchall()}
existing_indexes = get_index_names(conn, "inventory")
if 'idx_inventory_warehouse_bin' not in existing_indexes:
op.create_index('idx_inventory_warehouse_bin', 'inventory', ['warehouse', 'bin_location'], unique=False)
@@ -56,8 +72,7 @@ def downgrade() -> None:
conn = op.get_bind()
# Check which indexes exist before dropping
indexes = conn.execute(text("PRAGMA index_list(inventory)"))
existing_indexes = {row[1] for row in indexes.fetchall()}
existing_indexes = get_index_names(conn, "inventory")
if 'ix_inventory_warehouse' in existing_indexes:
op.drop_index(op.f('ix_inventory_warehouse'), table_name='inventory')
@@ -67,8 +82,7 @@ def downgrade() -> None:
op.drop_index('idx_inventory_warehouse_bin', table_name='inventory')
# Check if columns exist before dropping
result = conn.execute(text("PRAGMA table_info(inventory)"))
columns = {row[1] for row in result.fetchall()}
columns = get_column_names(conn, "inventory")
if 'bin_location' in columns:
op.drop_column('inventory', 'bin_location')

View File

@@ -62,12 +62,12 @@ def upgrade() -> None:
)
# Update existing records to have proper started_at and completed_at
# This is done via raw SQL for efficiency
# This is done via raw SQL for efficiency (PostgreSQL syntax)
op.execute(
"""
UPDATE architecture_scans
SET started_at = timestamp,
completed_at = datetime(timestamp, '+' || CAST(duration_seconds AS TEXT) || ' seconds')
completed_at = timestamp + (COALESCE(duration_seconds, 0) || ' seconds')::interval
WHERE started_at IS NULL
"""
)

View File

@@ -59,13 +59,11 @@ def upgrade() -> None:
def downgrade() -> None:
# In SQLite batch mode, we must explicitly drop the index before dropping
# the column, otherwise batch mode will try to recreate the index on the
# new table (which won't have the column).
with op.batch_alter_table("vendor_subscriptions", schema=None) as batch_op:
# Drop FK constraint
batch_op.drop_constraint(
"fk_vendor_subscriptions_tier_id",
type_="foreignkey",
)
# Drop index
# First drop the index on tier_id
batch_op.drop_index("ix_vendor_subscriptions_tier_id")
# Drop column
# Then drop the column (FK is automatically removed with the column)
batch_op.drop_column("tier_id")

View File

@@ -26,26 +26,26 @@ def upgrade() -> None:
sa.Column('status', sa.String(length=20), nullable=False, server_default='not_started'),
sa.Column('current_step', sa.String(length=30), nullable=False, server_default='company_profile'),
# Step 1: Company Profile
sa.Column('step_company_profile_completed', sa.Boolean(), nullable=False, server_default=sa.text('0')),
sa.Column('step_company_profile_completed', sa.Boolean(), nullable=False, server_default=sa.text('false')),
sa.Column('step_company_profile_completed_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('step_company_profile_data', sa.JSON(), nullable=True),
# Step 2: Letzshop API Configuration
sa.Column('step_letzshop_api_completed', sa.Boolean(), nullable=False, server_default=sa.text('0')),
sa.Column('step_letzshop_api_completed', sa.Boolean(), nullable=False, server_default=sa.text('false')),
sa.Column('step_letzshop_api_completed_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('step_letzshop_api_connection_verified', sa.Boolean(), nullable=False, server_default=sa.text('0')),
sa.Column('step_letzshop_api_connection_verified', sa.Boolean(), nullable=False, server_default=sa.text('false')),
# Step 3: Product Import
sa.Column('step_product_import_completed', sa.Boolean(), nullable=False, server_default=sa.text('0')),
sa.Column('step_product_import_completed', sa.Boolean(), nullable=False, server_default=sa.text('false')),
sa.Column('step_product_import_completed_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('step_product_import_csv_url_set', sa.Boolean(), nullable=False, server_default=sa.text('0')),
sa.Column('step_product_import_csv_url_set', sa.Boolean(), nullable=False, server_default=sa.text('false')),
# Step 4: Order Sync
sa.Column('step_order_sync_completed', sa.Boolean(), nullable=False, server_default=sa.text('0')),
sa.Column('step_order_sync_completed', sa.Boolean(), nullable=False, server_default=sa.text('false')),
sa.Column('step_order_sync_completed_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('step_order_sync_job_id', sa.Integer(), nullable=True),
# Completion tracking
sa.Column('started_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('completed_at', sa.DateTime(timezone=True), nullable=True),
# Admin override
sa.Column('skipped_by_admin', sa.Boolean(), nullable=False, server_default=sa.text('0')),
sa.Column('skipped_by_admin', sa.Boolean(), nullable=False, server_default=sa.text('false')),
sa.Column('skipped_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('skipped_reason', sa.Text(), nullable=True),
sa.Column('skipped_by_user_id', sa.Integer(), nullable=True),
@@ -56,7 +56,6 @@ def upgrade() -> None:
sa.ForeignKeyConstraint(['vendor_id'], ['vendors.id'], ondelete='CASCADE'),
sa.ForeignKeyConstraint(['skipped_by_user_id'], ['users.id']),
sa.PrimaryKeyConstraint('id'),
sqlite_autoincrement=True
)
op.create_index(op.f('ix_vendor_onboarding_id'), 'vendor_onboarding', ['id'], unique=False)
op.create_index(op.f('ix_vendor_onboarding_vendor_id'), 'vendor_onboarding', ['vendor_id'], unique=True)

View File

@@ -255,7 +255,7 @@ def upgrade() -> None:
INSERT INTO features (code, name, description, category, ui_location, ui_icon, ui_route,
minimum_tier_id, is_active, is_visible, display_order, created_at, updated_at)
VALUES (:code, :name, :description, :category, :ui_location, :ui_icon, :ui_route,
:minimum_tier_id, 1, 1, :display_order, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP)
:minimum_tier_id, true, true, :display_order, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP)
"""),
{
"code": code,

View File

@@ -31,7 +31,7 @@ def upgrade() -> None:
op.execute("""
UPDATE order_items
SET shipped_quantity = quantity
WHERE inventory_fulfilled = 1
WHERE inventory_fulfilled = true
""")

View File

@@ -13,6 +13,7 @@ making changes.
from alembic import op
import sqlalchemy as sa
from sqlalchemy import text
# revision identifiers, used by Alembic.
@@ -59,9 +60,12 @@ COUNTRY_ISO_MAP = {
def get_column_names(connection, table_name):
"""Get list of column names for a table."""
result = connection.execute(sa.text(f"PRAGMA table_info({table_name})"))
return [row[1] for row in result]
"""Get list of column names for a table (PostgreSQL)."""
result = connection.execute(text(
"SELECT column_name FROM information_schema.columns "
"WHERE table_name = :table AND table_schema = 'public'"
), {"table": table_name})
return [row[0] for row in result]
def upgrade() -> None:
@@ -78,25 +82,25 @@ def upgrade() -> None:
print(" Columns country_name and country_iso already exist, skipping")
return
# If has old 'country' column, rename it and add country_iso
# If has old 'country' column, rename it (PostgreSQL supports direct rename)
if has_country and not has_country_name:
with op.batch_alter_table("customer_addresses") as batch_op:
batch_op.alter_column(
"country",
new_column_name="country_name",
)
op.alter_column(
"customer_addresses",
"country",
new_column_name="country_name",
)
# Add country_iso if it doesn't exist
if not has_country_iso:
with op.batch_alter_table("customer_addresses") as batch_op:
batch_op.add_column(
sa.Column("country_iso", sa.String(5), nullable=True)
)
op.add_column(
"customer_addresses",
sa.Column("country_iso", sa.String(5), nullable=True)
)
# Backfill country_iso from country_name
for country_name, iso_code in COUNTRY_ISO_MAP.items():
connection.execute(
sa.text(
text(
"UPDATE customer_addresses SET country_iso = :iso "
"WHERE country_name = :name"
),
@@ -105,19 +109,19 @@ def upgrade() -> None:
# Set default for any remaining NULL values
connection.execute(
sa.text(
text(
"UPDATE customer_addresses SET country_iso = 'LU' "
"WHERE country_iso IS NULL"
)
)
# Make country_iso NOT NULL using batch operation
with op.batch_alter_table("customer_addresses") as batch_op:
batch_op.alter_column(
"country_iso",
existing_type=sa.String(5),
nullable=False,
)
# Make country_iso NOT NULL (PostgreSQL supports direct alter)
op.alter_column(
"customer_addresses",
"country_iso",
existing_type=sa.String(5),
nullable=False,
)
def downgrade() -> None:
@@ -130,12 +134,11 @@ def downgrade() -> None:
# Only downgrade if in the new state
if has_country_name and not has_country:
with op.batch_alter_table("customer_addresses") as batch_op:
batch_op.alter_column(
"country_name",
new_column_name="country",
)
op.alter_column(
"customer_addresses",
"country_name",
new_column_name="country",
)
if has_country_iso:
with op.batch_alter_table("customer_addresses") as batch_op:
batch_op.drop_column("country_iso")
op.drop_column("customer_addresses", "country_iso")

View File

@@ -3,45 +3,33 @@
Database configuration and session management.
This module provides classes and functions for:
- Database engine creation and configuration
- PostgreSQL database engine creation and configuration
- Session management with connection pooling
- Database dependency for FastAPI routes
Note: This project uses PostgreSQL only. SQLite is not supported.
"""
import logging
from sqlalchemy import create_engine, event
from sqlalchemy import create_engine
from sqlalchemy.orm import declarative_base, sessionmaker
from sqlalchemy.pool import QueuePool
from .config import settings
from .config import settings, validate_database_url
# Validate database URL on import
validate_database_url()
def _configure_sqlite_connection(dbapi_connection, connection_record):
"""Configure SQLite connection for better concurrency.
- WAL mode: Allows concurrent reads during writes
- busy_timeout: Wait up to 30 seconds if database is locked
- synchronous=NORMAL: Balance between safety and performance
"""
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA journal_mode=WAL")
cursor.execute("PRAGMA busy_timeout=30000")
cursor.execute("PRAGMA synchronous=NORMAL")
cursor.close()
# Create engine with SQLite-specific configuration
engine_kwargs = {}
# Add SQLite-specific settings for better concurrent access
if settings.database_url.startswith("sqlite"):
engine_kwargs["connect_args"] = {"check_same_thread": False}
engine = create_engine(settings.database_url, **engine_kwargs)
# Configure SQLite pragmas on connection
if settings.database_url.startswith("sqlite"):
event.listen(engine, "connect", _configure_sqlite_connection)
# Create PostgreSQL engine with connection pooling
engine = create_engine(
settings.database_url,
poolclass=QueuePool,
pool_size=10,
max_overflow=20,
pool_pre_ping=True,
echo=False,
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)

View File

@@ -1012,10 +1012,12 @@ class EmailService:
def _has_feature(self, vendor_id: int, feature_code: str) -> bool:
"""Check if vendor has a specific feature enabled."""
if vendor_id not in self._feature_cache:
from app.core.feature_gate import get_vendor_features
from app.services.feature_service import feature_service
try:
self._feature_cache[vendor_id] = get_vendor_features(self.db, vendor_id)
features = feature_service.get_vendor_features(self.db, vendor_id)
# Convert to set of feature codes
self._feature_cache[vendor_id] = {f.code for f in features.features}
except Exception:
self._feature_cache[vendor_id] = set()
@@ -1161,10 +1163,10 @@ class EmailService:
# Whitelabel: use vendor branding throughout
return BrandingContext(
platform_name=vendor.name,
platform_logo_url=vendor.logo_url,
platform_logo_url=vendor.get_logo_url(),
support_email=vendor.support_email or PLATFORM_SUPPORT_EMAIL,
vendor_name=vendor.name,
vendor_logo_url=vendor.logo_url,
vendor_logo_url=vendor.get_logo_url(),
is_whitelabel=True,
)
else:
@@ -1174,7 +1176,7 @@ class EmailService:
platform_logo_url=None, # Use default platform logo
support_email=PLATFORM_SUPPORT_EMAIL,
vendor_name=vendor.name if vendor else None,
vendor_logo_url=vendor.logo_url if vendor else None,
vendor_logo_url=vendor.get_logo_url() if vendor else None,
is_whitelabel=False,
)

View File

@@ -755,8 +755,17 @@ class InventoryService:
) -> AdminVendorsWithInventoryResponse:
"""Get list of vendors that have inventory entries (admin only)."""
# noqa: SVC-005 - Admin function, intentionally cross-vendor
# Use subquery to avoid DISTINCT on JSON columns (PostgreSQL can't compare JSON)
vendor_ids_subquery = (
db.query(Inventory.vendor_id)
.distinct()
.subquery()
)
vendors = (
db.query(Vendor).join(Inventory).distinct().order_by(Vendor.name).all()
db.query(Vendor)
.filter(Vendor.id.in_(db.query(vendor_ids_subquery.c.vendor_id)))
.order_by(Vendor.name)
.all()
)
return AdminVendorsWithInventoryResponse(

View File

@@ -246,20 +246,27 @@ class MarketplaceProductService:
if search:
# Search in marketplace, vendor_name, brand, and translations
search_term = f"%{search}%"
# Join with translations for title/description search
query = query.outerjoin(MarketplaceProductTranslation).filter(
or_(
MarketplaceProduct.marketplace.ilike(search_term),
MarketplaceProduct.vendor_name.ilike(search_term),
MarketplaceProduct.brand.ilike(search_term),
MarketplaceProduct.gtin.ilike(search_term),
MarketplaceProduct.marketplace_product_id.ilike(search_term),
MarketplaceProductTranslation.title.ilike(search_term),
MarketplaceProductTranslation.description.ilike(search_term),
# Use subquery to get distinct IDs (PostgreSQL can't compare JSON for DISTINCT)
id_subquery = (
db.query(MarketplaceProduct.id)
.outerjoin(MarketplaceProductTranslation)
.filter(
or_(
MarketplaceProduct.marketplace.ilike(search_term),
MarketplaceProduct.vendor_name.ilike(search_term),
MarketplaceProduct.brand.ilike(search_term),
MarketplaceProduct.gtin.ilike(search_term),
MarketplaceProduct.marketplace_product_id.ilike(search_term),
MarketplaceProductTranslation.title.ilike(search_term),
MarketplaceProductTranslation.description.ilike(search_term),
)
)
.distinct()
.subquery()
)
# Remove duplicates from join
query = query.distinct()
query = query.filter(MarketplaceProduct.id.in_(
db.query(id_subquery.c.id)
))
total = query.count()
products = query.offset(skip).limit(limit).all()
@@ -634,8 +641,10 @@ class MarketplaceProductService:
if search:
search_term = f"%{search}%"
query = (
query.outerjoin(MarketplaceProductTranslation)
# Use subquery to get distinct IDs (PostgreSQL can't compare JSON for DISTINCT)
id_subquery = (
db.query(MarketplaceProduct.id)
.outerjoin(MarketplaceProductTranslation)
.filter(
or_(
MarketplaceProductTranslation.title.ilike(search_term),
@@ -647,7 +656,11 @@ class MarketplaceProductService:
)
)
.distinct()
.subquery()
)
query = query.filter(MarketplaceProduct.id.in_(
db.query(id_subquery.c.id)
))
if marketplace:
query = query.filter(MarketplaceProduct.marketplace == marketplace)

View File

@@ -203,6 +203,11 @@ class OnboardingService:
Returns response with next step information.
"""
# Check vendor exists BEFORE creating onboarding record (FK constraint)
vendor = self.db.query(Vendor).filter(Vendor.id == vendor_id).first()
if not vendor:
raise VendorNotFoundException(vendor_id)
onboarding = self.get_or_create_onboarding(vendor_id)
# Update onboarding status if this is the first step
@@ -210,11 +215,6 @@ class OnboardingService:
onboarding.status = OnboardingStatus.IN_PROGRESS.value
onboarding.started_at = datetime.now(UTC)
# Get vendor and company
vendor = self.db.query(Vendor).filter(Vendor.id == vendor_id).first()
if not vendor:
raise VendorNotFoundException(vendor_id)
company = vendor.company
# Update company name if provided

View File

@@ -278,9 +278,9 @@ class ProductService:
# Prepare search pattern for LIKE queries
search_pattern = f"%{query}%"
# Build base query with translation join
base_query = (
db.query(Product)
# Use subquery to get distinct IDs (PostgreSQL can't compare JSON for DISTINCT)
id_subquery = (
db.query(Product.id)
.outerjoin(
ProductTranslation,
(Product.id == ProductTranslation.product_id)
@@ -303,6 +303,11 @@ class ProductService:
)
)
.distinct()
.subquery()
)
base_query = db.query(Product).filter(
Product.id.in_(db.query(id_subquery.c.id))
)
# Get total count

View File

@@ -207,6 +207,18 @@ class SubscriptionService:
)
return subscription
def get_current_tier(
self, db: Session, vendor_id: int
) -> TierCode | None:
"""Get vendor's current subscription tier code."""
subscription = self.get_subscription(db, vendor_id)
if subscription:
try:
return TierCode(subscription.tier)
except ValueError:
return None
return None
def get_or_create_subscription(
self,
db: Session,

View File

@@ -141,7 +141,7 @@ class VendorEmailSettingsService:
raise AuthorizationException(
message=f"Provider '{provider}' requires Business or Enterprise tier. "
"Upgrade your plan to use advanced email providers.",
required_permission="business_tier",
details={"required_permission": "business_tier"},
)
settings = self.get_settings(vendor_id)

View File

@@ -2,8 +2,10 @@
"""Database utilities for database operations.
This module provides utility functions and classes to interact with a database using SQLAlchemy. It includes:
- Creating a database engine with connection pooling.
- Creating a PostgreSQL database engine with connection pooling.
- Generating a session factory for creating sessions.
Note: This project uses PostgreSQL only. SQLite is not supported.
"""
import logging
@@ -16,31 +18,35 @@ logger = logging.getLogger(__name__)
def get_db_engine(database_url: str):
"""Create a database engine with connection pooling.
"""Create a PostgreSQL database engine with connection pooling.
Args:
database_url (str): The URL string to connect to the database. It can be for SQLite or PostgreSQL databases.
database_url (str): The PostgreSQL URL string to connect to the database.
Returns:
sqlalchemy.engine.Engine: A SQLAlchemy Engine instance configured according to the provided database URL.
sqlalchemy.engine.Engine: A SQLAlchemy Engine instance configured for PostgreSQL.
Raises:
ValueError: If database_url is not a PostgreSQL URL.
"""
if database_url.startswith("sqlite"):
# Configuration for SQLite database
engine = create_engine(
database_url, connect_args={"check_same_thread": False}, echo=False
)
else:
# Configuration for PostgreSQL databases with connection pooling
engine = create_engine(
database_url,
poolclass=QueuePool,
pool_size=10,
max_overflow=20,
pool_pre_ping=True,
echo=False,
if not database_url.startswith("postgresql"):
raise ValueError(
f"Unsupported database: {database_url.split(':')[0]}. "
"Only PostgreSQL is supported."
)
logger.info(f"Database engine created for: {database_url.split('@')[0]}@...")
engine = create_engine(
database_url,
poolclass=QueuePool,
pool_size=10,
max_overflow=20,
pool_pre_ping=True,
echo=False,
)
# Log URL without password
safe_url = database_url.split("@")[0] + "@..." if "@" in database_url else database_url
logger.info(f"Database engine created for: {safe_url}")
return engine

19
docker-compose.test.yml Normal file
View File

@@ -0,0 +1,19 @@
# docker-compose.test.yml
# Test database for pytest - uses tmpfs for speed
services:
test_db:
image: postgres:15
restart: "no"
environment:
POSTGRES_DB: wizamart_test
POSTGRES_USER: test_user
POSTGRES_PASSWORD: test_password
ports:
- "5433:5432" # Different port to avoid conflict with dev db
tmpfs:
- /var/lib/postgresql/data # Use RAM for faster tests
healthcheck:
test: ["CMD-SHELL", "pg_isready -U test_user -d wizamart_test"]
interval: 5s
timeout: 5s
retries: 5

View File

@@ -0,0 +1,524 @@
# Docker Deployment
This guide covers deploying Wizamart using Docker and Docker Compose.
**Best for:** Teams who want consistent environments and easy rollbacks.
---
## Development vs Production
| Aspect | Development | Production |
|--------|-------------|------------|
| Compose file | `docker-compose.yml` | `docker-compose.prod.yml` |
| App server | Hot reload enabled | Multiple workers |
| Database | Local volume | Persistent volume with backups |
| SSL | Not needed | Required (via Nginx) |
| Logging | Console | File + centralized |
---
## Development Setup
```bash
# Start all services
make docker-up
# Or manually
docker compose up -d
# View logs
docker compose logs -f
# Stop services
make docker-down
```
### Current Services
| Service | Port | Purpose |
|---------|------|---------|
| db | 5432 | PostgreSQL database |
| redis | 6379 | Cache and queue broker |
| api | 8000 | FastAPI application |
---
## Production Deployment
### 1. Create Production Compose File
```yaml
# docker-compose.prod.yml
services:
api:
build:
context: .
dockerfile: Dockerfile
restart: always
ports:
- "127.0.0.1:8000:8000"
environment:
DATABASE_URL: postgresql://wizamart_user:${DB_PASSWORD}@db:5432/wizamart_db
REDIS_URL: redis://redis:6379/0
CELERY_BROKER_URL: redis://redis:6379/1
env_file:
- .env
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
volumes:
- uploads:/app/uploads
- logs:/app/logs
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
deploy:
resources:
limits:
memory: 1G
celery:
build: .
restart: always
command: celery -A app.celery worker --loglevel=info --concurrency=4
environment:
DATABASE_URL: postgresql://wizamart_user:${DB_PASSWORD}@db:5432/wizamart_db
REDIS_URL: redis://redis:6379/0
CELERY_BROKER_URL: redis://redis:6379/1
env_file:
- .env
depends_on:
- db
- redis
volumes:
- logs:/app/logs
deploy:
resources:
limits:
memory: 512M
celery-beat:
build: .
restart: always
command: celery -A app.celery beat --loglevel=info
environment:
CELERY_BROKER_URL: redis://redis:6379/1
env_file:
- .env
depends_on:
- redis
deploy:
resources:
limits:
memory: 256M
db:
image: postgres:15-alpine
restart: always
environment:
POSTGRES_DB: wizamart_db
POSTGRES_USER: wizamart_user
POSTGRES_PASSWORD: ${DB_PASSWORD}
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U wizamart_user -d wizamart_db"]
interval: 10s
timeout: 5s
retries: 5
deploy:
resources:
limits:
memory: 512M
redis:
image: redis:7-alpine
restart: always
command: redis-server --appendonly yes --maxmemory 256mb --maxmemory-policy allkeys-lru
volumes:
- redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
deploy:
resources:
limits:
memory: 300M
nginx:
image: nginx:alpine
restart: always
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- ./nginx/conf.d:/etc/nginx/conf.d:ro
- ./static:/app/static:ro
- uploads:/app/uploads:ro
- /etc/letsencrypt:/etc/letsencrypt:ro
depends_on:
- api
deploy:
resources:
limits:
memory: 128M
volumes:
postgres_data:
redis_data:
uploads:
logs:
```
### 2. Create Dockerfile
```dockerfile
# Dockerfile
FROM python:3.11-slim
# Install system dependencies
RUN apt-get update && apt-get install -y \
curl \
&& rm -rf /var/lib/apt/lists/*
# Install Tailwind CLI
RUN curl -sLO https://github.com/tailwindlabs/tailwindcss/releases/latest/download/tailwindcss-linux-x64 \
&& chmod +x tailwindcss-linux-x64 \
&& mv tailwindcss-linux-x64 /usr/local/bin/tailwindcss
WORKDIR /app
# Install Python dependencies
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application
COPY . .
# Build Tailwind CSS
RUN tailwindcss -i ./static/admin/css/tailwind.css -o ./static/admin/css/tailwind.output.css --minify \
&& tailwindcss -i ./static/vendor/css/tailwind.css -o ./static/vendor/css/tailwind.output.css --minify \
&& tailwindcss -i ./static/shop/css/tailwind.css -o ./static/shop/css/tailwind.output.css --minify \
&& tailwindcss -i ./static/platform/css/tailwind.css -o ./static/platform/css/tailwind.output.css --minify
# Create non-root user
RUN useradd -m -u 1000 wizamart && chown -R wizamart:wizamart /app
USER wizamart
EXPOSE 8000
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000", "--workers", "4"]
```
### 3. Nginx Configuration
```bash
mkdir -p nginx/conf.d
```
```nginx
# nginx/conf.d/wizamart.conf
upstream api {
server api:8000;
}
server {
listen 80;
server_name yourdomain.com;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
server_name yourdomain.com;
ssl_certificate /etc/letsencrypt/live/yourdomain.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/yourdomain.com/privkey.pem;
# Security headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
# Static files
location /static {
alias /app/static;
expires 30d;
add_header Cache-Control "public, immutable";
}
location /uploads {
alias /app/uploads;
expires 7d;
}
location / {
proxy_pass http://api;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
```
### 4. Deploy
```bash
# Create .env file with production values
cp .env.example .env
nano .env
# Set database password
export DB_PASSWORD=$(openssl rand -hex 16)
echo "DB_PASSWORD=$DB_PASSWORD" >> .env
# Build and start
docker compose -f docker-compose.prod.yml build
docker compose -f docker-compose.prod.yml up -d
# Run migrations
docker compose -f docker-compose.prod.yml exec api alembic upgrade head
# Initialize data
docker compose -f docker-compose.prod.yml exec api python scripts/init_production.py
```
---
## Daily Operations
### View Logs
```bash
# All services
docker compose -f docker-compose.prod.yml logs -f
# Specific service
docker compose -f docker-compose.prod.yml logs -f api
docker compose -f docker-compose.prod.yml logs -f celery
# Last 100 lines
docker compose -f docker-compose.prod.yml logs --tail 100 api
```
### Access Container Shell
```bash
# API container
docker compose -f docker-compose.prod.yml exec api bash
# Database
docker compose -f docker-compose.prod.yml exec db psql -U wizamart_user -d wizamart_db
# Redis
docker compose -f docker-compose.prod.yml exec redis redis-cli
```
### Restart Services
```bash
# Single service
docker compose -f docker-compose.prod.yml restart api
# All services
docker compose -f docker-compose.prod.yml restart
```
### Deploy Updates
```bash
# Pull latest code
git pull origin main
# Rebuild and restart
docker compose -f docker-compose.prod.yml build api celery
docker compose -f docker-compose.prod.yml up -d api celery
# Run migrations if needed
docker compose -f docker-compose.prod.yml exec api alembic upgrade head
```
### Rollback
```bash
# View image history
docker images wizamart-api
# Tag current as backup
docker tag wizamart-api:latest wizamart-api:backup
# Rollback to previous
docker compose -f docker-compose.prod.yml down api
docker tag wizamart-api:previous wizamart-api:latest
docker compose -f docker-compose.prod.yml up -d api
```
---
## Backups
### Database Backup
```bash
# Create backup
docker compose -f docker-compose.prod.yml exec db pg_dump -U wizamart_user wizamart_db | gzip > backup_$(date +%Y%m%d).sql.gz
# Restore backup
gunzip -c backup_20240115.sql.gz | docker compose -f docker-compose.prod.yml exec -T db psql -U wizamart_user -d wizamart_db
```
### Volume Backup
```bash
# Backup all volumes
docker run --rm \
-v wizamart_postgres_data:/data \
-v $(pwd)/backups:/backup \
alpine tar czf /backup/postgres_$(date +%Y%m%d).tar.gz /data
```
---
## Monitoring
### Resource Usage
```bash
docker stats
```
### Health Checks
```bash
# Check service health
docker compose -f docker-compose.prod.yml ps
# Test API health
curl -s http://localhost:8000/health | jq
```
---
## Troubleshooting
### Container Won't Start
```bash
# Check logs
docker compose -f docker-compose.prod.yml logs api
# Check container status
docker compose -f docker-compose.prod.yml ps -a
# Inspect container
docker inspect <container_id>
```
### Database Connection Issues
```bash
# Test from API container
docker compose -f docker-compose.prod.yml exec api python -c "
from app.core.database import engine
with engine.connect() as conn:
print('Connected!')
"
```
### Out of Disk Space
```bash
# Check disk usage
docker system df
# Clean up
docker system prune -a --volumes
```
### Memory Issues
```bash
# Check memory usage
docker stats --no-stream
# Increase limits in docker-compose.prod.yml
deploy:
resources:
limits:
memory: 2G
```
---
## Security
### Non-Root User
All containers run as non-root users. The Dockerfile creates a `wizamart` user.
### Secret Management
```bash
# Use Docker secrets (Swarm mode)
echo "your-password" | docker secret create db_password -
# Or use environment files
# Never commit .env to git
```
### Network Isolation
```yaml
# Add to docker-compose.prod.yml
networks:
frontend:
backend:
services:
nginx:
networks:
- frontend
api:
networks:
- frontend
- backend
db:
networks:
- backend
redis:
networks:
- backend
```
---
## Scaling
### Horizontal Scaling
```bash
# Scale API containers
docker compose -f docker-compose.prod.yml up -d --scale api=3
# Update nginx upstream
upstream api {
server api_1:8000;
server api_2:8000;
server api_3:8000;
}
```
### Moving to Kubernetes
When you outgrow Docker Compose, see our Kubernetes migration guide (coming soon).

View File

@@ -2,13 +2,24 @@
This guide covers deploying the Wizamart platform to production environments.
!!! tip "New to deployment?"
Start with the [Infrastructure Guide](infrastructure.md) for a complete overview of architecture options.
## Deployment Options
| Option | Best For | Guide |
|--------|----------|-------|
| **Traditional VPS** | Direct server access, debugging | [Production Guide](production.md) |
| **Docker Compose** | Consistent environments, easy rollbacks | [Docker Guide](docker.md) |
| **Managed Services** | Minimal ops, small teams | See [Infrastructure Guide](infrastructure.md#option-3-managed-services-minimal-ops) |
## Prerequisites
- Python 3.11+
- PostgreSQL 14+ (production) or SQLite (development)
- Redis (optional, for caching/sessions)
- PostgreSQL 15+ (required - SQLite not supported)
- Redis (required for Celery background jobs)
- Docker (for development database)
- Tailwind CSS CLI (standalone binary)
- uv package manager
## Environment Configuration

View File

@@ -0,0 +1,845 @@
# Infrastructure Guide
This guide documents the complete infrastructure for the Wizamart platform, from development to high-end production.
**Philosophy:** We prioritize **debuggability and operational simplicity** over complexity. Every component should be directly accessible for troubleshooting.
---
## Table of Contents
- [Architecture Overview](#architecture-overview)
- [Current State](#current-state)
- [Development Environment](#development-environment)
- [Production Options](#production-options)
- [Future High-End Architecture](#future-high-end-architecture)
- [Component Deep Dives](#component-deep-dives)
- [Troubleshooting Guide](#troubleshooting-guide)
- [Decision Matrix](#decision-matrix)
---
## Architecture Overview
### System Components
```
┌─────────────────────────────────────────────────────────────────────────┐
│ CLIENTS │
│ (Browsers, Mobile Apps, API Consumers) │
└─────────────────────────────────────────────────────────────────────────┘
┌─────────────────────────────────────────────────────────────────────────┐
│ LOAD BALANCER / PROXY │
│ (Nginx, Caddy, or Cloud LB) │
│ - SSL termination │
│ - Static file serving │
│ - Rate limiting │
└─────────────────────────────────────────────────────────────────────────┘
┌───────────────┼───────────────┐
▼ ▼ ▼
┌─────────────────────────────────────────────────────────────────────────┐
│ APPLICATION SERVERS │
│ (FastAPI + Uvicorn) │
│ - API endpoints │
│ - HTML rendering (Jinja2) │
│ - WebSocket connections │
└─────────────────────────────────────────────────────────────────────────┘
│ │ │
▼ ▼ ▼
┌──────────────────┐ ┌──────────────────┐ ┌──────────────────┐
│ PostgreSQL │ │ Redis │ │ File Storage │
│ (Primary DB) │ │ (Cache/Queue) │ │ (S3/Local) │
└──────────────────┘ └──────────────────┘ └──────────────────┘
┌──────────────────┐
│ Celery Workers │
│ (Background Jobs)│
└──────────────────┘
```
### Data Flow
1. **Request** → Nginx → Uvicorn → FastAPI → Service Layer → Database
2. **Background Job** → API creates task → Redis Queue → Celery Worker → Database
3. **Static Files** → Nginx serves directly (or CDN in production)
---
## Current State
### What We Have Now
| Component | Technology | Status |
|-----------|------------|--------|
| Web Framework | FastAPI + Uvicorn | ✅ Production Ready |
| Database | PostgreSQL 15 | ✅ Production Ready |
| ORM | SQLAlchemy 2.0 | ✅ Production Ready |
| Migrations | Alembic | ✅ Production Ready |
| Templates | Jinja2 + Tailwind CSS | ✅ Production Ready |
| Authentication | JWT (PyJWT) | ✅ Production Ready |
| Email | SMTP/SendGrid/Mailgun/SES | ✅ Production Ready |
| Payments | Stripe | ✅ Production Ready |
| Background Jobs | - | ⏳ Planned (Celery) |
| Caching | - | ⏳ Planned (Redis) |
| File Storage | Local filesystem | ⏳ Needs S3 for prod |
### What We Need to Add
| Component | Priority | Reason |
|-----------|----------|--------|
| Redis | High | Session cache, Celery broker |
| Celery | High | Background jobs (imports, emails, reports) |
| S3/MinIO | Medium | Scalable file storage |
| Sentry | Medium | Error tracking |
| Prometheus/Grafana | Low | Metrics and dashboards |
---
## Development Environment
### Local Setup (Recommended)
```bash
# 1. Start PostgreSQL
make docker-up
# 2. Run migrations
make migrate-up
# 3. Initialize data
make init-prod
# 4. Start development server
make dev
# 5. (Optional) Run tests
make test
```
### Services Running Locally
| Service | Host | Port | Purpose |
|---------|------|------|---------|
| FastAPI | localhost | 8000 | Main application |
| PostgreSQL | localhost | 5432 | Development database |
| PostgreSQL (test) | localhost | 5433 | Test database |
| MkDocs | localhost | 8001 | Documentation |
### Docker Compose Services
```yaml
# docker-compose.yml
services:
db: # PostgreSQL for development
redis: # Redis for cache/queue (coming soon)
api: # FastAPI application (optional)
```
---
## Production Options
### Option 1: Traditional VPS (Recommended for Troubleshooting)
**Best for:** Teams who want direct server access, familiar with Linux administration.
```
┌─────────────────────────────────────────────────────────────┐
│ VPS (4GB+ RAM) │
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ Nginx │ │ Uvicorn │ │ PostgreSQL │ │
│ │ (reverse │ │ (4 workers)│ │ (local) │ │
│ │ proxy) │ │ │ │ │ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
│ │ │ │ │
│ └────────────────┼────────────────┘ │
│ │ │
│ ┌─────────────┐ ┌─────────────┐ │
│ │ Redis │ │ Celery │ │
│ │ (local) │ │ (workers) │ │
│ └─────────────┘ └─────────────┘ │
└─────────────────────────────────────────────────────────────┘
```
**Setup:**
```bash
# On Ubuntu 22.04+ VPS
# 1. Install system packages
sudo apt update
sudo apt install -y nginx postgresql-15 redis-server python3.11 python3.11-venv
# 2. Create application user
sudo useradd -m -s /bin/bash wizamart
sudo su - wizamart
# 3. Clone and setup
git clone <repo> /home/wizamart/app
cd /home/wizamart/app
python3.11 -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt
# 4. Configure environment
cp .env.example .env
nano .env # Edit with production values
# 5. Setup database
sudo -u postgres createuser wizamart_user
sudo -u postgres createdb wizamart_db -O wizamart_user
alembic upgrade head
python scripts/init_production.py
# 6. Create systemd service
sudo nano /etc/systemd/system/wizamart.service
```
**Systemd Service:**
```ini
# /etc/systemd/system/wizamart.service
[Unit]
Description=Wizamart API
After=network.target postgresql.service redis.service
[Service]
User=wizamart
Group=wizamart
WorkingDirectory=/home/wizamart/app
Environment="PATH=/home/wizamart/app/.venv/bin"
EnvironmentFile=/home/wizamart/app/.env
ExecStart=/home/wizamart/app/.venv/bin/uvicorn main:app --host 127.0.0.1 --port 8000 --workers 4
Restart=always
RestartSec=3
[Install]
WantedBy=multi-user.target
```
**Celery Workers:**
```ini
# /etc/systemd/system/wizamart-celery.service
[Unit]
Description=Wizamart Celery Worker
After=network.target redis.service
[Service]
User=wizamart
Group=wizamart
WorkingDirectory=/home/wizamart/app
Environment="PATH=/home/wizamart/app/.venv/bin"
EnvironmentFile=/home/wizamart/app/.env
ExecStart=/home/wizamart/app/.venv/bin/celery -A app.celery worker --loglevel=info --concurrency=4
Restart=always
RestartSec=3
[Install]
WantedBy=multi-user.target
```
**Nginx Configuration:**
```nginx
# /etc/nginx/sites-available/wizamart
server {
listen 80;
server_name yourdomain.com;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
server_name yourdomain.com;
ssl_certificate /etc/letsencrypt/live/yourdomain.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/yourdomain.com/privkey.pem;
# Security headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
# Static files (served directly by Nginx)
location /static {
alias /home/wizamart/app/static;
expires 30d;
add_header Cache-Control "public, immutable";
}
# Uploaded files
location /uploads {
alias /home/wizamart/app/uploads;
expires 7d;
}
# API and application
location / {
proxy_pass http://127.0.0.1:8000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket support (for future real-time features)
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
```
**Troubleshooting Commands:**
```bash
# Check service status
sudo systemctl status wizamart
sudo systemctl status wizamart-celery
sudo systemctl status postgresql
sudo systemctl status redis
# View logs
sudo journalctl -u wizamart -f
sudo journalctl -u wizamart-celery -f
# Connect to database directly
sudo -u postgres psql wizamart_db
# Check Redis
redis-cli ping
redis-cli monitor # Watch commands in real-time
# Restart services
sudo systemctl restart wizamart
sudo systemctl restart wizamart-celery
```
---
### Option 2: Docker Compose Production
**Best for:** Consistent environments, easy rollbacks, container familiarity.
```yaml
# docker-compose.prod.yml
services:
api:
build: .
restart: always
ports:
- "127.0.0.1:8000:8000"
environment:
DATABASE_URL: postgresql://wizamart_user:${DB_PASSWORD}@db:5432/wizamart_db
REDIS_URL: redis://redis:6379/0
CELERY_BROKER_URL: redis://redis:6379/1
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
volumes:
- ./uploads:/app/uploads
- ./logs:/app/logs
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
celery:
build: .
restart: always
command: celery -A app.celery worker --loglevel=info --concurrency=4
environment:
DATABASE_URL: postgresql://wizamart_user:${DB_PASSWORD}@db:5432/wizamart_db
REDIS_URL: redis://redis:6379/0
CELERY_BROKER_URL: redis://redis:6379/1
depends_on:
- db
- redis
volumes:
- ./logs:/app/logs
celery-beat:
build: .
restart: always
command: celery -A app.celery beat --loglevel=info
environment:
DATABASE_URL: postgresql://wizamart_user:${DB_PASSWORD}@db:5432/wizamart_db
CELERY_BROKER_URL: redis://redis:6379/1
depends_on:
- redis
db:
image: postgres:15
restart: always
environment:
POSTGRES_DB: wizamart_db
POSTGRES_USER: wizamart_user
POSTGRES_PASSWORD: ${DB_PASSWORD}
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U wizamart_user -d wizamart_db"]
interval: 10s
timeout: 5s
retries: 5
redis:
image: redis:7-alpine
restart: always
volumes:
- redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
nginx:
image: nginx:alpine
restart: always
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf:ro
- ./static:/app/static:ro
- ./uploads:/app/uploads:ro
- /etc/letsencrypt:/etc/letsencrypt:ro
depends_on:
- api
volumes:
postgres_data:
redis_data:
```
**Troubleshooting Commands:**
```bash
# View all containers
docker compose -f docker-compose.prod.yml ps
# View logs
docker compose -f docker-compose.prod.yml logs -f api
docker compose -f docker-compose.prod.yml logs -f celery
# Access container shell
docker compose -f docker-compose.prod.yml exec api bash
docker compose -f docker-compose.prod.yml exec db psql -U wizamart_user -d wizamart_db
# Restart specific service
docker compose -f docker-compose.prod.yml restart api
# View resource usage
docker stats
```
---
### Option 3: Managed Services (Minimal Ops)
**Best for:** Small teams, focus on product not infrastructure.
| Component | Service | Cost (approx) |
|-----------|---------|---------------|
| App Hosting | Railway / Render / Fly.io | $5-25/mo |
| Database | Neon / Supabase / PlanetScale | $0-25/mo |
| Redis | Upstash / Redis Cloud | $0-10/mo |
| File Storage | Cloudflare R2 / AWS S3 | $0-5/mo |
| Email | Resend / SendGrid | $0-20/mo |
**Example: Railway + Neon**
```bash
# Deploy to Railway
railway login
railway init
railway up
# Configure environment
railway variables set DATABASE_URL="postgresql://..."
railway variables set REDIS_URL="redis://..."
```
---
## Future High-End Architecture
### Target Production Architecture
```
┌─────────────────┐
│ CloudFlare │
│ (CDN + WAF) │
└────────┬────────┘
┌────────▼────────┐
│ Load Balancer │
│ (HA Proxy/ALB) │
└────────┬────────┘
┌──────────────────────┼──────────────────────┐
│ │ │
┌────────▼────────┐ ┌────────▼────────┐ ┌────────▼────────┐
│ API Server 1 │ │ API Server 2 │ │ API Server N │
│ (Uvicorn) │ │ (Uvicorn) │ │ (Uvicorn) │
└────────┬────────┘ └────────┬────────┘ └────────┬────────┘
│ │ │
└──────────────────────┼──────────────────────┘
┌───────────────────────────┼───────────────────────────┐
│ │ │
┌────────▼────────┐ ┌────────▼────────┐ ┌────────▼────────┐
│ PostgreSQL │ │ Redis │ │ S3 / MinIO │
│ (Primary) │ │ (Cluster) │ │ (Files) │
│ │ │ │ │ │ │
│ ┌────▼────┐ │ │ ┌─────────┐ │ │ │
│ │ Replica │ │ │ │ Sentinel│ │ │ │
│ └─────────┘ │ │ └─────────┘ │ │ │
└─────────────────┘ └─────────────────┘ └─────────────────┘
┌──────────────────────┼──────────────────────┐
│ │ │
┌────────▼────────┐ ┌────────▼────────┐ ┌────────▼────────┐
│ Celery Worker 1 │ │ Celery Worker 2 │ │ Celery Beat │
│ (General) │ │ (Import Jobs) │ │ (Scheduler) │
└─────────────────┘ └─────────────────┘ └─────────────────┘
┌─────────────────────────────┐
│ Monitoring Stack │
│ ┌─────────┐ ┌───────────┐ │
│ │Prometheus│ │ Grafana │ │
│ └─────────┘ └───────────┘ │
│ ┌─────────┐ ┌───────────┐ │
│ │ Sentry │ │ Loki │ │
│ └─────────┘ └───────────┘ │
└─────────────────────────────┘
```
### Celery Task Queues
```python
# app/celery.py (to be implemented)
from celery import Celery
celery_app = Celery(
"wizamart",
broker=settings.celery_broker_url,
backend=settings.celery_result_backend,
)
celery_app.conf.task_queues = {
"default": {"exchange": "default", "routing_key": "default"},
"imports": {"exchange": "imports", "routing_key": "imports"},
"emails": {"exchange": "emails", "routing_key": "emails"},
"reports": {"exchange": "reports", "routing_key": "reports"},
}
celery_app.conf.task_routes = {
"app.tasks.import_letzshop_products": {"queue": "imports"},
"app.tasks.send_email": {"queue": "emails"},
"app.tasks.generate_report": {"queue": "reports"},
}
```
### Background Tasks to Implement
| Task | Queue | Priority | Description |
|------|-------|----------|-------------|
| `import_letzshop_products` | imports | High | Marketplace product sync |
| `import_letzshop_orders` | imports | High | Order sync from Letzshop |
| `send_order_confirmation` | emails | High | Order emails |
| `send_password_reset` | emails | High | Auth emails |
| `send_invoice_email` | emails | Medium | Invoice delivery |
| `generate_sales_report` | reports | Low | Analytics reports |
| `cleanup_expired_sessions` | default | Low | Maintenance |
| `sync_stripe_subscriptions` | default | Medium | Billing sync |
---
## Component Deep Dives
### PostgreSQL Configuration
**Production Settings (`postgresql.conf`):**
```ini
# Memory (adjust based on server RAM)
shared_buffers = 256MB # 25% of RAM for dedicated DB server
effective_cache_size = 768MB # 75% of RAM
work_mem = 16MB
maintenance_work_mem = 128MB
# Connections
max_connections = 100
# Write-Ahead Log
wal_level = replica
max_wal_senders = 3
# Query Planning
random_page_cost = 1.1 # For SSD storage
effective_io_concurrency = 200 # For SSD storage
# Logging
log_min_duration_statement = 1000 # Log queries > 1 second
log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d '
```
**Backup Strategy:**
```bash
# Daily backup script
#!/bin/bash
BACKUP_DIR=/backups/postgresql
DATE=$(date +%Y%m%d_%H%M%S)
pg_dump -U wizamart_user wizamart_db | gzip > $BACKUP_DIR/wizamart_$DATE.sql.gz
# Keep last 7 days
find $BACKUP_DIR -name "*.sql.gz" -mtime +7 -delete
```
### Redis Configuration
**Use Cases:**
| Use Case | Database | TTL | Description |
|----------|----------|-----|-------------|
| Session Cache | 0 | 24h | User sessions |
| API Rate Limiting | 0 | 1h | Request counters |
| Celery Broker | 1 | - | Task queue |
| Celery Results | 2 | 24h | Task results |
| Feature Flags | 3 | 5m | Feature gate cache |
**Configuration (`redis.conf`):**
```ini
maxmemory 256mb
maxmemory-policy allkeys-lru
appendonly yes
appendfsync everysec
```
### Nginx Tuning
```nginx
# /etc/nginx/nginx.conf
worker_processes auto;
worker_rlimit_nofile 65535;
events {
worker_connections 4096;
use epoll;
multi_accept on;
}
http {
# Buffers
client_body_buffer_size 10K;
client_header_buffer_size 1k;
client_max_body_size 50M;
large_client_header_buffers 2 1k;
# Timeouts
client_body_timeout 12;
client_header_timeout 12;
keepalive_timeout 15;
send_timeout 10;
# Gzip
gzip on;
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_types text/plain text/css text/xml application/json application/javascript;
}
```
---
## Troubleshooting Guide
### Quick Diagnostics
```bash
# Check all services
systemctl status wizamart wizamart-celery postgresql redis nginx
# Check ports
ss -tlnp | grep -E '(8000|5432|6379|80|443)'
# Check disk space
df -h
# Check memory
free -h
# Check CPU/processes
htop
```
### Database Issues
```bash
# Connect to database
sudo -u postgres psql wizamart_db
# Check active connections
SELECT count(*) FROM pg_stat_activity;
# Find slow queries
SELECT pid, now() - pg_stat_activity.query_start AS duration, query
FROM pg_stat_activity
WHERE state != 'idle'
ORDER BY duration DESC;
# Kill stuck query
SELECT pg_terminate_backend(pid);
# Check table sizes
SELECT relname, pg_size_pretty(pg_total_relation_size(relid))
FROM pg_catalog.pg_statio_user_tables
ORDER BY pg_total_relation_size(relid) DESC;
# Analyze query performance
EXPLAIN ANALYZE SELECT ...;
```
### Redis Issues
```bash
# Check connectivity
redis-cli ping
# Monitor real-time commands
redis-cli monitor
# Check memory usage
redis-cli info memory
# List all keys (careful in production!)
redis-cli --scan
# Check queue lengths
redis-cli llen celery
# Flush specific database
redis-cli -n 1 flushdb # Flush Celery broker
```
### Celery Issues
```bash
# Check worker status
celery -A app.celery inspect active
celery -A app.celery inspect reserved
celery -A app.celery inspect stats
# Purge all pending tasks
celery -A app.celery purge
# List registered tasks
celery -A app.celery inspect registered
```
### Application Issues
```bash
# Check API health
curl -s http://localhost:8000/health | jq
# View recent logs
journalctl -u wizamart --since "10 minutes ago"
# Check for Python errors
journalctl -u wizamart | grep -i error | tail -20
# Test database connection
python -c "from app.core.database import engine; print(engine.connect())"
```
### Common Problems & Solutions
| Problem | Diagnosis | Solution |
|---------|-----------|----------|
| 502 Bad Gateway | `systemctl status wizamart` | Restart app: `systemctl restart wizamart` |
| Database connection refused | `pg_isready` | Start PostgreSQL: `systemctl start postgresql` |
| High memory usage | `free -h`, `ps aux --sort=-%mem` | Restart app, check for memory leaks |
| Slow queries | PostgreSQL slow query log | Add indexes, optimize queries |
| Celery tasks stuck | `celery inspect active` | Restart workers, check Redis |
| Disk full | `df -h` | Clean logs, backups, temp files |
---
## Decision Matrix
### When to Use Each Option
| Scenario | Recommended | Reason |
|----------|-------------|--------|
| Solo developer, MVP | Managed (Railway) | Focus on product |
| Small team, budget conscious | Traditional VPS | Full control, low cost |
| Need direct DB access for debugging | Traditional VPS | Direct psql access |
| Familiar with Docker, want consistency | Docker Compose | Reproducible environments |
| High availability required | Docker + Orchestration | Easy scaling |
| Enterprise, compliance requirements | Kubernetes | Full orchestration |
### Cost Comparison (Monthly)
| Setup | Low Traffic | Medium | High |
|-------|-------------|--------|------|
| Managed (Railway + Neon) | $10 | $50 | $200+ |
| VPS (Hetzner/DigitalOcean) | $5 | $20 | $80 |
| Docker on VPS | $5 | $20 | $80 |
| AWS/GCP Full Stack | $50 | $200 | $1000+ |
---
## Migration Path
### Phase 1: Current (Development)
- ✅ PostgreSQL (Docker)
- ✅ FastAPI + Uvicorn
- ✅ Local file storage
### Phase 2: Production MVP
- ✅ PostgreSQL (managed or VPS)
- ✅ FastAPI + Uvicorn (systemd or Docker)
- ⏳ Redis (session cache)
- ⏳ Celery (background jobs)
- ⏳ S3/MinIO (file storage)
### Phase 3: Scale
- Horizontal app scaling (multiple Uvicorn instances)
- PostgreSQL read replicas
- Redis cluster
- CDN for static assets
- Dedicated Celery workers per queue
### Phase 4: High Availability
- Multi-region deployment
- Database failover
- Container orchestration (Kubernetes)
- Full monitoring stack
---
## Next Steps
1. **Add Redis to docker-compose.yml** - For session cache
2. **Implement Celery** - Start with email and import tasks
3. **Configure S3/MinIO** - For production file storage
4. **Set up Sentry** - Error tracking
5. **Choose production deployment** - VPS or Docker based on team preference
See also:
- [Production Deployment Guide](production.md)
- [Docker Deployment](docker.md)
- [Environment Configuration](environment.md)

View File

@@ -0,0 +1,399 @@
# Traditional VPS Deployment
This guide covers deploying Wizamart to a traditional VPS (Ubuntu 22.04+) without containers.
**Best for:** Teams who want direct server access and familiar Linux administration.
---
## Prerequisites
- Ubuntu 22.04 LTS or newer
- 4GB+ RAM recommended
- Root or sudo access
- Domain name with DNS configured
---
## Quick Start
```bash
# 1. Install system packages
sudo apt update && sudo apt upgrade -y
sudo apt install -y nginx postgresql-15 redis-server python3.11 python3.11-venv git curl
# 2. Create application user
sudo useradd -m -s /bin/bash wizamart
# 3. Setup PostgreSQL
sudo -u postgres createuser wizamart_user
sudo -u postgres createdb wizamart_db -O wizamart_user
sudo -u postgres psql -c "ALTER USER wizamart_user WITH PASSWORD 'your-secure-password';"
# 4. Clone and setup application
sudo su - wizamart
git clone <repository-url> ~/app
cd ~/app
python3.11 -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt
# 5. Configure environment
cp .env.example .env
nano .env # Edit with production values
# 6. Initialize database
alembic upgrade head
python scripts/init_production.py
# 7. Exit wizamart user
exit
```
---
## Systemd Services
### Main Application
```bash
sudo nano /etc/systemd/system/wizamart.service
```
```ini
[Unit]
Description=Wizamart API Server
After=network.target postgresql.service redis.service
[Service]
User=wizamart
Group=wizamart
WorkingDirectory=/home/wizamart/app
Environment="PATH=/home/wizamart/app/.venv/bin"
EnvironmentFile=/home/wizamart/app/.env
ExecStart=/home/wizamart/app/.venv/bin/uvicorn main:app --host 127.0.0.1 --port 8000 --workers 4
Restart=always
RestartSec=3
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
```
### Celery Worker (when implemented)
```bash
sudo nano /etc/systemd/system/wizamart-celery.service
```
```ini
[Unit]
Description=Wizamart Celery Worker
After=network.target redis.service postgresql.service
[Service]
User=wizamart
Group=wizamart
WorkingDirectory=/home/wizamart/app
Environment="PATH=/home/wizamart/app/.venv/bin"
EnvironmentFile=/home/wizamart/app/.env
ExecStart=/home/wizamart/app/.venv/bin/celery -A app.celery worker --loglevel=info --concurrency=4
Restart=always
RestartSec=3
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
```
### Celery Beat (Scheduler)
```bash
sudo nano /etc/systemd/system/wizamart-celery-beat.service
```
```ini
[Unit]
Description=Wizamart Celery Beat Scheduler
After=network.target redis.service
[Service]
User=wizamart
Group=wizamart
WorkingDirectory=/home/wizamart/app
Environment="PATH=/home/wizamart/app/.venv/bin"
EnvironmentFile=/home/wizamart/app/.env
ExecStart=/home/wizamart/app/.venv/bin/celery -A app.celery beat --loglevel=info
Restart=always
RestartSec=3
[Install]
WantedBy=multi-user.target
```
### Enable Services
```bash
sudo systemctl daemon-reload
sudo systemctl enable wizamart wizamart-celery wizamart-celery-beat
sudo systemctl start wizamart wizamart-celery wizamart-celery-beat
```
---
## Nginx Configuration
```bash
sudo nano /etc/nginx/sites-available/wizamart
```
```nginx
# Redirect HTTP to HTTPS
server {
listen 80;
server_name yourdomain.com www.yourdomain.com;
return 301 https://$server_name$request_uri;
}
# Main HTTPS server
server {
listen 443 ssl http2;
server_name yourdomain.com www.yourdomain.com;
# SSL (managed by Certbot)
ssl_certificate /etc/letsencrypt/live/yourdomain.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/yourdomain.com/privkey.pem;
include /etc/letsencrypt/options-ssl-nginx.conf;
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem;
# Security headers
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
# Logging
access_log /var/log/nginx/wizamart.access.log;
error_log /var/log/nginx/wizamart.error.log;
# Static files (served directly)
location /static {
alias /home/wizamart/app/static;
expires 30d;
add_header Cache-Control "public, immutable";
access_log off;
}
# Uploaded files
location /uploads {
alias /home/wizamart/app/uploads;
expires 7d;
add_header Cache-Control "public";
}
# Application
location / {
proxy_pass http://127.0.0.1:8000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# WebSocket support
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
# Timeouts
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
}
# Block sensitive files
location ~ /\. {
deny all;
}
location ~ \.env$ {
deny all;
}
}
```
### Enable Site
```bash
sudo ln -s /etc/nginx/sites-available/wizamart /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx
```
### SSL with Certbot
```bash
sudo apt install certbot python3-certbot-nginx
sudo certbot --nginx -d yourdomain.com -d www.yourdomain.com
```
---
## Firewall
```bash
sudo ufw allow OpenSSH
sudo ufw allow 'Nginx Full'
sudo ufw enable
```
---
## Daily Operations
### View Logs
```bash
# Application logs
sudo journalctl -u wizamart -f
# Celery logs
sudo journalctl -u wizamart-celery -f
# Nginx logs
sudo tail -f /var/log/nginx/wizamart.access.log
sudo tail -f /var/log/nginx/wizamart.error.log
# PostgreSQL logs
sudo tail -f /var/log/postgresql/postgresql-15-main.log
```
### Restart Services
```bash
sudo systemctl restart wizamart
sudo systemctl restart wizamart-celery
sudo systemctl restart nginx
```
### Database Access
```bash
# Connect as wizamart user
sudo -u postgres psql wizamart_db
# Or with password
psql -h localhost -U wizamart_user -d wizamart_db
```
### Deploy Updates
```bash
sudo su - wizamart
cd ~/app
git pull origin main
source .venv/bin/activate
pip install -r requirements.txt
alembic upgrade head
exit
sudo systemctl restart wizamart wizamart-celery
```
---
## Backups
### Database Backup Script
```bash
sudo nano /home/wizamart/backup.sh
```
```bash
#!/bin/bash
BACKUP_DIR=/home/wizamart/backups
DATE=$(date +%Y%m%d_%H%M%S)
# Create backup directory
mkdir -p $BACKUP_DIR
# Backup database
pg_dump -U wizamart_user wizamart_db | gzip > $BACKUP_DIR/db_$DATE.sql.gz
# Backup uploads
tar -czf $BACKUP_DIR/uploads_$DATE.tar.gz -C /home/wizamart/app uploads/
# Keep last 7 days
find $BACKUP_DIR -name "*.gz" -mtime +7 -delete
echo "Backup completed: $DATE"
```
```bash
chmod +x /home/wizamart/backup.sh
```
### Cron Job
```bash
sudo -u wizamart crontab -e
```
```cron
# Daily backup at 2 AM
0 2 * * * /home/wizamart/backup.sh >> /home/wizamart/backup.log 2>&1
```
---
## Monitoring
### Basic Health Check
```bash
curl -s http://localhost:8000/health | jq
```
### Process Monitoring
```bash
# Check all services
systemctl status wizamart wizamart-celery postgresql redis nginx
# Resource usage
htop
df -h
free -h
```
### Set Up Sentry (Error Tracking)
Add to `.env`:
```env
SENTRY_DSN=https://your-sentry-dsn
```
---
## Troubleshooting
See [Infrastructure Guide - Troubleshooting](infrastructure.md#troubleshooting-guide) for detailed diagnostics.
### Quick Checks
```bash
# Is the app running?
systemctl status wizamart
# Can we connect to the database?
pg_isready -h localhost -U wizamart_user
# Is Redis running?
redis-cli ping
# Check open ports
ss -tlnp | grep -E '(8000|5432|6379|80|443)'
# View recent errors
journalctl -u wizamart --since "1 hour ago" | grep -i error
```

View File

@@ -54,6 +54,100 @@ make backup-db
make verify-setup
```
## Alembic Commands Explained
Understanding what each Alembic command does is essential for managing database migrations effectively.
### Core Commands Reference
| Command | Description |
|---------|-------------|
| `alembic upgrade head` | Apply **all** pending migrations to get to the latest version |
| `alembic upgrade +1` | Apply only the **next** single migration (one step forward) |
| `alembic downgrade -1` | Roll back the **last** applied migration (one step back) |
| `alembic downgrade base` | Roll back **all** migrations (returns to empty schema) |
| `alembic current` | Show which migration the database is currently at |
| `alembic history` | List all migrations in the chain |
| `alembic heads` | Show the latest migration revision(s) |
### Visual Example
```
Migration Chain: [base] → [A] → [B] → [C] → [D] (head)
└── current database state (at revision B)
Command Result
─────────────────────────────────────────────────────────
alembic upgrade head → Database moves to [D]
alembic upgrade +1 → Database moves to [C]
alembic downgrade -1 → Database moves to [A]
alembic downgrade base → Database returns to [base] (empty schema)
alembic current → Shows "B" (current revision)
```
### Makefile Shortcuts
| Make Command | Alembic Equivalent | Description |
|--------------|-------------------|-------------|
| `make migrate-up` | `alembic upgrade head` | Apply all pending migrations |
| `make migrate-down` | `alembic downgrade -1` | Roll back last migration |
| `make migrate-status` | `alembic current` + `alembic history` | Show current state and history |
### Additional Useful Commands
```bash
# Show detailed migration history
alembic history --verbose
# Upgrade/downgrade to a specific revision
alembic upgrade abc123def456
alembic downgrade abc123def456
# Show what SQL would be generated (without executing)
alembic upgrade head --sql
# Mark database as being at a specific revision (without running migrations)
alembic stamp head
alembic stamp abc123def456
# Show the current revision ID only
alembic current --verbose
```
### Database Initialization Workflow
For setting up a new database:
```bash
# Option 1: Empty schema only (just tables, no data)
make migrate-up
# Option 2: Schema + production essentials (admin user, settings, CMS, email templates)
make init-prod
# Option 3: Full development setup (schema + production data + demo data)
make db-setup
# Or step by step:
make init-prod
make seed-demo
```
### Reset Workflow
For completely resetting the database:
```bash
# Nuclear reset: deletes database file, recreates schema, seeds all data
make db-reset
```
> **Note**: `make db-reset` rolls back all migrations and recreates the schema from scratch.
!!! note "PostgreSQL Required"
This project uses PostgreSQL exclusively. SQLite is not supported.
Start the development database with: `make docker-up`
## Development Workflows
### Adding New Database Fields

View File

@@ -1,11 +1,12 @@
# PyCharm Troubleshooting Guide
# Development Troubleshooting Guide
Common PyCharm issues and their solutions for the development team.
Common development issues and their solutions for the team.
---
## Table of Contents
- [Docker DNS/WiFi Issues (Digital Nomads)](#docker-dnswifi-issues-digital-nomads)
- [Go to Declaration Not Working (Ctrl+B)](#go-to-declaration-not-working-ctrlb)
- [Import Errors and Red Underlines](#import-errors-and-red-underlines)
- [Python Interpreter Issues](#python-interpreter-issues)
@@ -17,6 +18,150 @@ Common PyCharm issues and their solutions for the development team.
---
## Docker DNS/WiFi Issues (Digital Nomads)
### Problem
When using Docker on Linux (Ubuntu/Xubuntu), the `docker0` bridge network interface can interfere with DNS resolution, causing:
- Hotel/public WiFi captive portals not loading
- DNS lookups failing or timing out
- Internet connectivity issues when Docker is running
- WiFi works fine until Docker starts
This happens because Docker's bridge network (`docker0`) can take priority over your WiFi connection for DNS resolution.
### Solution: Lower docker0 Priority (Recommended)
This solution lets Docker work normally while ensuring your WiFi DNS takes priority. Perfect for digital nomads working from hotels, cafes, and co-working spaces.
**Step 1: Create the NetworkManager dispatcher script**
```bash
sudo tee /etc/NetworkManager/dispatcher.d/99-docker-dns-fix << 'EOF'
#!/bin/bash
# Lower docker0 metric so WiFi takes priority for DNS
# This prevents Docker from interfering with hotel/public WiFi captive portals
if [ "$1" = "docker0" ]; then
ip route del default via 172.17.0.1 dev docker0 2>/dev/null || true
fi
EOF
```
**Step 2: Make it executable**
```bash
sudo chmod +x /etc/NetworkManager/dispatcher.d/99-docker-dns-fix
```
**Step 3: Restart NetworkManager**
```bash
sudo systemctl restart NetworkManager
```
**Step 4: Verify it works**
```bash
# Start Docker
sudo systemctl start docker
# Check routes - docker0 should NOT have a default route
ip route | grep docker0
# Should return nothing or non-default routes only
# Your WiFi should still work
ping -c 3 google.com
```
### Alternative Solutions
#### Option A: Use Google DNS (Simple but less flexible)
If you don't need hotel captive portal DNS:
```bash
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json << 'EOF'
{
"dns": ["8.8.8.8", "8.8.4.4"],
"dns-opts": ["ndots:1"]
}
EOF
sudo systemctl restart docker
```
!!! warning "Captive Portals"
This option uses hardcoded Google DNS, which means hotel WiFi captive portals
(login pages) may not work properly since they rely on DNS hijacking.
#### Option B: Stop Docker when not in use
```bash
# When you need WiFi without Docker interference
sudo systemctl stop docker
sudo ip link set docker0 down
# When you need Docker again
sudo systemctl start docker
```
### Installing Docker on Xubuntu/Ubuntu
If you haven't installed Docker yet:
```bash
# Update package index
sudo apt update
# Install prerequisites
sudo apt install -y ca-certificates curl gnupg
# Add Docker's official GPG key
sudo install -m 0755 -d /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
sudo chmod a+r /etc/apt/keyrings/docker.gpg
# Add the repository
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
# Install Docker
sudo apt update
sudo apt install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
# Add your user to docker group (run docker without sudo)
sudo usermod -aG docker $USER
# Apply the DNS fix BEFORE using Docker
sudo tee /etc/NetworkManager/dispatcher.d/99-docker-dns-fix << 'EOF'
#!/bin/bash
if [ "$1" = "docker0" ]; then
ip route del default via 172.17.0.1 dev docker0 2>/dev/null || true
fi
EOF
sudo chmod +x /etc/NetworkManager/dispatcher.d/99-docker-dns-fix
# Log out and back in for docker group, then verify
docker run hello-world
```
### Using Docker with this Project
After installing Docker with the DNS fix:
```bash
# Start PostgreSQL for development
make docker-up
# Run tests (auto-starts test database)
make test
# Stop when done
make docker-down
```
---
## Go to Declaration Not Working (Ctrl+B)
### Problem

View File

@@ -12,19 +12,29 @@ After cloning the repository, follow these steps to get your database ready:
make install-all
```
### 2. Set Up Environment
### 2. Start PostgreSQL Database
```bash
# Start the development database with Docker
make docker-up
# Or manually:
docker-compose up -d db
```
### 3. Set Up Environment
```bash
# Copy the example environment file
cp .env.example .env
# Edit .env with your database configuration
# For development, you can use SQLite:
DATABASE_URL=sqlite:///./wizamart.db
# For PostgreSQL (recommended for production-like development):
# DATABASE_URL=postgresql://username:password@localhost:5432/ecommerce_dev
# The default works with docker-compose:
DATABASE_URL=postgresql://wizamart_user:secure_password@localhost:5432/wizamart_db
```
!!! note "PostgreSQL Required"
This project requires PostgreSQL. SQLite is not supported.
Docker Compose provides the easiest way to run PostgreSQL locally.
### 3. Run Database Migrations
```bash
# Apply all migrations to create the database schema
@@ -136,27 +146,26 @@ make migrate-up
## Environment-Specific Setup
### Development (SQLite)
### Development (Docker - Recommended)
```env
DATABASE_URL=sqlite:///./wizamart.db
DATABASE_URL=postgresql://wizamart_user:secure_password@localhost:5432/wizamart_db
```
- Quick setup, no additional software needed
- File-based database, easy to backup/restore
- Good for local development and testing
- Start with `make docker-up`
- Consistent environment matching production
- Easy to reset with `make db-reset`
### Development (PostgreSQL)
### Development (Local PostgreSQL)
```env
DATABASE_URL=postgresql://user:password@localhost:5432/wizamart_dev
```
- More production-like environment
- Better for testing complex queries
- Required for certain advanced features
- Use if you prefer a local PostgreSQL installation
- Ensure PostgreSQL 15+ is installed
### Production
```env
DATABASE_URL=postgresql://user:password@production-host:5432/wizamart_prod
```
- Always use PostgreSQL in production
- PostgreSQL is required for production
- Migrations are applied automatically during deployment
## Troubleshooting
@@ -312,19 +321,19 @@ python scripts/create_test_data.py
To start completely fresh:
```bash
# 1. Backup first (optional but recommended)
cp wizamart.db wizamart.db.backup
# Complete reset (drops all data and recreates)
make db-reset
# 2. Delete database
rm wizamart.db
# Or step by step:
# 1. Rollback all migrations
make migrate-down # Run multiple times or use: alembic downgrade base
# 3. Recreate schema
# 2. Recreate schema
make migrate-up
# 4. Seed all data
python scripts/create_test_data.py
python scripts/create_inventory.py
python scripts/create_landing_page.py
# 3. Seed data
make init-prod
make seed-demo
```
## Next Steps

View File

@@ -7,9 +7,13 @@ This guide will help you set up the Wizamart Platform for development or product
Before you begin, ensure you have the following installed:
- **Python 3.10 or higher**
- **PostgreSQL 12 or higher**
- **Docker and Docker Compose** (required for database)
- **Git**
- **Docker** (optional, for containerized deployment)
- **PostgreSQL client tools** (optional, for debugging)
!!! note "PostgreSQL Only"
This project uses PostgreSQL exclusively. SQLite is not supported.
Docker provides the easiest way to run PostgreSQL locally.
## Development Setup

View File

@@ -184,9 +184,10 @@ nav:
# --- Deployment & Operations ---
- Deployment:
- Overview: deployment/index.md
- Infrastructure Guide: deployment/infrastructure.md
- Launch Readiness: deployment/launch-readiness.md
- Traditional VPS: deployment/production.md
- Docker: deployment/docker.md
- Production: deployment/production.md
- GitLab CI/CD: deployment/gitlab.md
- Environment Variables: deployment/environment.md
- Stripe Integration: deployment/stripe-integration.md

View File

@@ -355,6 +355,52 @@ def validate_configuration(env_vars: dict) -> dict:
"items": ["Set PLATFORM_DOMAIN for your deployment"]
}
# -------------------------------------------------------------------------
# Celery / Redis Task Queue
# -------------------------------------------------------------------------
redis_url = env_vars.get("REDIS_URL", "")
use_celery = env_vars.get("USE_CELERY", "false").lower() == "true"
flower_url = env_vars.get("FLOWER_URL", "")
flower_password = env_vars.get("FLOWER_PASSWORD", "")
if use_celery:
if redis_url:
celery_items = [f"Redis: {redis_url.split('@')[-1] if '@' in redis_url else redis_url}"]
if flower_url:
celery_items.append(f"Flower: {flower_url}")
else:
celery_items.append("FLOWER_URL not set (monitoring disabled)")
if flower_password and flower_password != "changeme":
celery_items.append("Flower password configured")
elif flower_password == "changeme":
celery_items.append("WARNING: Change FLOWER_PASSWORD for production!")
results["celery"] = {
"status": "ok",
"message": "Celery enabled with Redis",
"items": celery_items
}
else:
results["celery"] = {
"status": "missing",
"message": "Celery enabled but Redis not configured",
"items": [
"Set REDIS_URL (e.g., redis://localhost:6379/0)",
"Or disable Celery: USE_CELERY=false"
]
}
else:
results["celery"] = {
"status": "warning",
"message": "Celery disabled (using FastAPI BackgroundTasks)",
"items": [
"Set USE_CELERY=true for production",
"Requires Redis: docker-compose up -d redis"
]
}
return results

View File

@@ -0,0 +1,192 @@
#!/usr/bin/env python3
"""
Migration Squash Script
This script squashes all existing migrations into a single initial migration.
Run this after setting up PostgreSQL to simplify the migration history.
Prerequisites:
- PostgreSQL must be running: make docker-up
- DATABASE_URL environment variable must be set to PostgreSQL
Usage:
python scripts/squash_migrations.py
What this script does:
1. Backs up existing migrations to alembic/versions_backup_YYYYMMDD/
2. Creates a fresh initial migration from current models
3. Stamps the database as being at the new migration
After running:
1. Review the new migration in alembic/versions/
2. Test with: make migrate-up (on a fresh database)
3. If satisfied, delete the backup directory
"""
import os
import shutil
import subprocess
import sys
from datetime import datetime
from pathlib import Path
# Add project root to path
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))
VERSIONS_DIR = project_root / "alembic" / "versions"
def check_prerequisites():
"""Verify PostgreSQL is configured."""
database_url = os.getenv("DATABASE_URL", "")
if not database_url.startswith("postgresql"):
print("ERROR: DATABASE_URL must be a PostgreSQL URL")
print(f"Current: {database_url[:50]}...")
print("")
print("Set DATABASE_URL or start PostgreSQL with: make docker-up")
sys.exit(1)
print(f"Database: {database_url.split('@')[0]}@...")
return True
def backup_migrations():
"""Backup existing migrations."""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
backup_dir = project_root / "alembic" / f"versions_backup_{timestamp}"
if not VERSIONS_DIR.exists():
print("No existing migrations to backup")
return None
migration_files = list(VERSIONS_DIR.glob("*.py"))
if not migration_files:
print("No migration files found")
return None
print(f"Backing up {len(migration_files)} migrations to {backup_dir.name}/")
shutil.copytree(VERSIONS_DIR, backup_dir)
# Clear versions directory (keep __pycache__ if exists)
for f in VERSIONS_DIR.glob("*.py"):
f.unlink()
return backup_dir
def create_fresh_migration():
"""Generate fresh initial migration from models."""
print("Generating fresh initial migration...")
result = subprocess.run(
[
sys.executable, "-m", "alembic", "revision",
"--autogenerate", "-m", "initial_postgresql_schema"
],
cwd=project_root,
capture_output=True,
text=True
)
if result.returncode != 0:
print("ERROR: Failed to generate migration")
print(result.stderr)
sys.exit(1)
print(result.stdout)
# Find the new migration file
new_migrations = list(VERSIONS_DIR.glob("*initial_postgresql_schema*.py"))
if new_migrations:
print(f"Created: {new_migrations[0].name}")
return new_migrations[0]
return None
def clean_migration_file(migration_path: Path):
"""Remove SQLite-specific patterns from migration."""
if not migration_path:
return
content = migration_path.read_text()
# Remove batch_alter_table references (not needed for PostgreSQL)
if "batch_alter_table" in content:
print("Note: Migration contains batch_alter_table - this is not needed for PostgreSQL")
# We don't auto-remove as it might be intentional
print(f"Review migration at: {migration_path}")
def stamp_database():
"""Stamp the database as being at the new migration."""
print("Stamping database with new migration...")
result = subprocess.run(
[sys.executable, "-m", "alembic", "stamp", "head"],
cwd=project_root,
capture_output=True,
text=True
)
if result.returncode != 0:
print("WARNING: Could not stamp database (may need to run migrate-up first)")
print(result.stderr)
else:
print("Database stamped at head")
def main():
print("=" * 60)
print("MIGRATION SQUASH SCRIPT")
print("=" * 60)
print("")
# Check prerequisites
check_prerequisites()
print("")
# Confirm with user
response = input("This will backup and replace all migrations. Continue? [y/N] ")
if response.lower() != 'y':
print("Aborted")
sys.exit(0)
print("")
# Backup existing migrations
backup_dir = backup_migrations()
print("")
# Create fresh migration
new_migration = create_fresh_migration()
print("")
# Clean up the migration file
clean_migration_file(new_migration)
print("")
# Summary
print("=" * 60)
print("SQUASH COMPLETE")
print("=" * 60)
print("")
if backup_dir:
print(f"Backup location: {backup_dir}")
print("")
print("Next steps:")
print("1. Review the new migration file")
print("2. On a fresh database, run: make migrate-up")
print("3. Verify all tables are created correctly")
print("4. If satisfied, delete the backup directory")
print("")
print("To restore from backup:")
print(f" rm -rf alembic/versions/*.py")
print(f" cp -r {backup_dir}/* alembic/versions/")
if __name__ == "__main__":
main()

View File

@@ -380,6 +380,12 @@ class ArchitectureValidator:
suggestion="Keep SQLAlchemy models and Pydantic models separate",
)
# Alembic migrations
elif "/alembic/versions/" in file_path_str or "\\alembic\\versions\\" in file_path_str:
print("🔄 Validating as Alembic migration...")
self._check_migration_batch_mode(file_path, content, lines)
self._check_migration_constraint_names(file_path, content, lines)
# Generic Python file - check exception handling
print("⚠️ Validating exception handling...")
for i, line in enumerate(lines, 1):
@@ -3760,6 +3766,126 @@ class ArchitectureValidator:
suggestion="Fix JSON syntax error (check for trailing commas, missing quotes)",
)
def _check_migration_batch_mode(
self, file_path: Path, content: str, lines: list[str]
):
"""MIG-001: Check that alter_column, drop_constraint, create_foreign_key use batch mode"""
# Track if we're inside a batch_alter_table context
in_batch_context = False
batch_indent = 0
for i, line in enumerate(lines, 1):
stripped = line.strip()
# Track batch_alter_table context entry
if "batch_alter_table(" in line or "with op.batch_alter_table" in line:
in_batch_context = True
# Get indent level of the 'with' statement
batch_indent = len(line) - len(line.lstrip())
continue
# Track batch_alter_table context exit (dedent)
if in_batch_context and stripped and not stripped.startswith("#"):
current_indent = len(line) - len(line.lstrip())
# If we're back at or before the 'with' indent level, we've exited
if current_indent <= batch_indent and not line.strip().startswith(
"with"
):
in_batch_context = False
# Skip comments
if stripped.startswith("#"):
continue
# Check for direct op.alter_column (not batch_op.alter_column)
if re.search(r"\bop\.alter_column\(", line):
self._add_violation(
rule_id="MIG-001",
rule_name="Use batch_alter_table for column modifications",
severity=Severity.ERROR,
file_path=file_path,
line_number=i,
message="op.alter_column() not supported by SQLite - use batch mode",
context=stripped[:100],
suggestion="Use: with op.batch_alter_table('table') as batch_op: batch_op.alter_column(...)",
)
# Check for direct op.drop_constraint (not batch_op.drop_constraint)
if re.search(r"\bop\.drop_constraint\(", line):
self._add_violation(
rule_id="MIG-001",
rule_name="Use batch_alter_table for constraint modifications",
severity=Severity.ERROR,
file_path=file_path,
line_number=i,
message="op.drop_constraint() not supported by SQLite - use batch mode",
context=stripped[:100],
suggestion="Use: with op.batch_alter_table('table') as batch_op: batch_op.drop_constraint(...)",
)
# Check for direct op.create_foreign_key (not batch_op.create_foreign_key)
if re.search(r"\bop\.create_foreign_key\(", line):
self._add_violation(
rule_id="MIG-001",
rule_name="Use batch_alter_table for foreign key creation",
severity=Severity.ERROR,
file_path=file_path,
line_number=i,
message="op.create_foreign_key() not supported by SQLite - use batch mode",
context=stripped[:100],
suggestion="Use: with op.batch_alter_table('table') as batch_op: batch_op.create_foreign_key(...)",
)
def _check_migration_constraint_names(
self, file_path: Path, content: str, lines: list[str]
):
"""MIG-002: Check that constraints have explicit names (not None)"""
for i, line in enumerate(lines, 1):
stripped = line.strip()
# Skip comments
if stripped.startswith("#"):
continue
# Check for create_foreign_key(None, ...)
if re.search(r"create_foreign_key\s*\(\s*None\s*,", line):
self._add_violation(
rule_id="MIG-002",
rule_name="Constraints must have explicit names",
severity=Severity.ERROR,
file_path=file_path,
line_number=i,
message="Foreign key constraint must have an explicit name, not None",
context=stripped[:100],
suggestion="Use: create_foreign_key('fk_table_column', ...)",
)
# Check for create_unique_constraint(None, ...)
if re.search(r"create_unique_constraint\s*\(\s*None\s*,", line):
self._add_violation(
rule_id="MIG-002",
rule_name="Constraints must have explicit names",
severity=Severity.ERROR,
file_path=file_path,
line_number=i,
message="Unique constraint must have an explicit name, not None",
context=stripped[:100],
suggestion="Use: create_unique_constraint('uq_table_columns', ...)",
)
# Check for drop_constraint(None, ...)
if re.search(r"drop_constraint\s*\(\s*None\s*,", line):
self._add_violation(
rule_id="MIG-002",
rule_name="Constraints must have explicit names",
severity=Severity.ERROR,
file_path=file_path,
line_number=i,
message="Cannot drop constraint with None name",
context=stripped[:100],
suggestion="Specify the constraint name to drop",
)
def _get_rule(self, rule_id: str) -> dict[str, Any]:
"""Get rule configuration by ID"""
# Look in different rule categories
@@ -3772,6 +3898,7 @@ class ArchitectureValidator:
"template_rules",
"frontend_component_rules",
"language_rules",
"migration_rules",
]:
rules = self.config.get(category, [])
for rule in rules:

View File

@@ -1,14 +1,17 @@
# tests/conftest.py - Updated main conftest with core fixtures only
# tests/conftest.py - PostgreSQL test configuration
"""
Core pytest configuration and fixtures.
This project uses PostgreSQL for testing. Start the test database with:
make test-db-up
IMPORTANT - Fixture Best Practices:
===================================
1. DO NOT use db.expunge() on fixtures - it detaches objects from the session
and breaks lazy loading of relationships (e.g., product.marketplace_product).
2. Test isolation is achieved through the db fixture which drops and recreates
all tables after each test - no need to manually detach objects.
2. Test isolation is achieved through TRUNCATE CASCADE after each test,
which is much faster than dropping/recreating tables.
3. If you need to ensure an object has fresh data, use db.refresh(obj) instead
of expunge/re-query patterns.
@@ -19,31 +22,53 @@ IMPORTANT - Fixture Best Practices:
See docs/testing/testing-guide.md for comprehensive testing documentation.
"""
import os
import pytest
from fastapi.testclient import TestClient
from sqlalchemy import create_engine
from sqlalchemy import create_engine, text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import StaticPool
from app.core.database import Base, get_db
from main import app
# Import all models to ensure they're registered with Base metadata
# Use in-memory SQLite database for tests
SQLALCHEMY_TEST_DATABASE_URL = "sqlite:///:memory:"
# PostgreSQL test database URL
# Use environment variable or default to local Docker test database
TEST_DATABASE_URL = os.getenv(
"TEST_DATABASE_URL",
"postgresql://test_user:test_password@localhost:5433/wizamart_test"
)
@pytest.fixture(scope="session")
def engine():
"""Create test database engine."""
return create_engine(
SQLALCHEMY_TEST_DATABASE_URL,
connect_args={"check_same_thread": False},
poolclass=StaticPool,
"""Create test database engine.
Verifies PostgreSQL connection on startup and provides helpful
error message if the test database is not running.
"""
engine = create_engine(
TEST_DATABASE_URL,
pool_pre_ping=True,
echo=False, # Set to True for SQL debugging
)
# Verify connection on startup
try:
with engine.connect() as conn:
conn.execute(text("SELECT 1"))
except Exception as e:
pytest.exit(
f"\n\nCannot connect to test database at {TEST_DATABASE_URL}\n"
f"Error: {e}\n\n"
"Start the test database with:\n"
" make test-db-up\n\n"
"Or manually:\n"
" docker-compose -f docker-compose.test.yml up -d\n"
)
return engine
@pytest.fixture(scope="session")
def testing_session_local(engine):
@@ -62,32 +87,42 @@ def testing_session_local(engine):
)
@pytest.fixture(scope="session", autouse=True)
def setup_database(engine):
"""Create all tables once at the start of the test session."""
Base.metadata.create_all(bind=engine)
yield
# Optionally drop tables after all tests (commented out for debugging)
# Base.metadata.drop_all(bind=engine)
@pytest.fixture(scope="function")
def db(engine, testing_session_local):
"""
Create a database session for each test function.
Provides test isolation by:
- Creating fresh tables before each test
- Dropping all tables after each test completes
- Using a fresh session for each test
- Truncating all tables after each test (fast cleanup)
Note: Fixtures should NOT use db.expunge() as this detaches objects
from the session and breaks lazy loading. The table drop/create cycle
provides sufficient isolation between tests.
from the session and breaks lazy loading. The TRUNCATE provides
sufficient isolation between tests.
"""
# Create all tables
Base.metadata.create_all(bind=engine)
# Create session
db_session = testing_session_local()
try:
yield db_session
finally:
db_session.close()
# Clean up all data after each test
Base.metadata.drop_all(bind=engine)
Base.metadata.create_all(bind=engine)
# Fast cleanup with TRUNCATE CASCADE
with engine.connect() as conn:
# Disable FK checks temporarily for fast truncation
conn.execute(text("SET session_replication_role = 'replica'"))
for table in reversed(Base.metadata.sorted_tables):
conn.execute(text(f'TRUNCATE TABLE "{table.name}" CASCADE'))
conn.execute(text("SET session_replication_role = 'origin'"))
conn.commit()
@pytest.fixture(scope="function")

View File

@@ -133,7 +133,8 @@ class TestCustomerAddressModel:
address_line_1="123 Main St",
city="Luxembourg",
postal_code="L-1234",
country="Luxembourg",
country_name="Luxembourg",
country_iso="LU",
is_default=True,
)
@@ -158,7 +159,8 @@ class TestCustomerAddressModel:
address_line_1="123 Shipping St",
city="Luxembourg",
postal_code="L-1234",
country="Luxembourg",
country_name="Luxembourg",
country_iso="LU",
)
db.add(shipping_address)
@@ -171,7 +173,8 @@ class TestCustomerAddressModel:
address_line_1="456 Billing Ave",
city="Luxembourg",
postal_code="L-5678",
country="Luxembourg",
country_name="Luxembourg",
country_iso="LU",
)
db.add(billing_address)
db.commit()
@@ -192,7 +195,8 @@ class TestCustomerAddressModel:
address_line_2="Suite 100",
city="Luxembourg",
postal_code="L-1234",
country="Luxembourg",
country_name="Luxembourg",
country_iso="LU",
)
db.add(address)
db.commit()
@@ -212,7 +216,8 @@ class TestCustomerAddressModel:
address_line_1="123 Main St",
city="Luxembourg",
postal_code="L-1234",
country="Luxembourg",
country_name="Luxembourg",
country_iso="LU",
)
db.add(address)
db.commit()
@@ -231,7 +236,8 @@ class TestCustomerAddressModel:
address_line_1="123 Main St",
city="Luxembourg",
postal_code="L-1234",
country="Luxembourg",
country_name="Luxembourg",
country_iso="LU",
)
db.add(address)
db.commit()

View File

@@ -188,7 +188,8 @@ class TestCustomerAddressCreateSchema:
address_line_1="123 Main St",
city="Luxembourg",
postal_code="L-1234",
country="Luxembourg",
country_name="Luxembourg",
country_iso="LU",
)
assert address.address_type == "shipping"
assert address.city == "Luxembourg"
@@ -202,7 +203,8 @@ class TestCustomerAddressCreateSchema:
address_line_1="123 Main St",
city="Luxembourg",
postal_code="L-1234",
country="Luxembourg",
country_name="Luxembourg",
country_iso="LU",
)
assert address.address_type == "billing"
@@ -216,7 +218,8 @@ class TestCustomerAddressCreateSchema:
address_line_1="123 Main St",
city="Luxembourg",
postal_code="L-1234",
country="Luxembourg",
country_name="Luxembourg",
country_iso="LU",
)
assert "address_type" in str(exc_info.value).lower()
@@ -229,7 +232,8 @@ class TestCustomerAddressCreateSchema:
address_line_1="123 Main St",
city="Luxembourg",
postal_code="L-1234",
country="Luxembourg",
country_name="Luxembourg",
country_iso="LU",
)
assert address.is_default is False
@@ -243,7 +247,8 @@ class TestCustomerAddressCreateSchema:
address_line_1="123 Main St",
city="Luxembourg",
postal_code="L-1234",
country="Luxembourg",
country_name="Luxembourg",
country_iso="LU",
)
assert address.company == "Tech Corp"
@@ -257,7 +262,8 @@ class TestCustomerAddressCreateSchema:
address_line_2="Apt 4B",
city="Luxembourg",
postal_code="L-1234",
country="Luxembourg",
country_name="Luxembourg",
country_iso="LU",
)
assert address.address_line_2 == "Apt 4B"
@@ -321,7 +327,8 @@ class TestCustomerAddressResponseSchema:
"address_line_2": None,
"city": "Luxembourg",
"postal_code": "L-1234",
"country": "Luxembourg",
"country_name": "Luxembourg",
"country_iso": "LU",
"is_default": True,
"created_at": datetime.now(),
"updated_at": datetime.now(),

View File

@@ -211,20 +211,23 @@ class TestEmailService:
class TestEmailSending:
"""Test suite for email sending functionality."""
@patch("app.services.email_service.get_provider")
@patch("app.services.email_service.settings")
def test_send_raw_success(self, mock_settings, mock_get_provider, db):
@patch("app.services.email_service.get_platform_provider")
@patch("app.services.email_service.get_platform_email_config")
def test_send_raw_success(self, mock_get_config, mock_get_platform_provider, db):
"""Test successful raw email sending."""
# Setup mocks
mock_settings.email_enabled = True
mock_settings.email_from_address = "noreply@test.com"
mock_settings.email_from_name = "Test"
mock_settings.email_reply_to = ""
mock_settings.email_provider = "smtp"
mock_get_config.return_value = {
"enabled": True,
"debug": False,
"provider": "smtp",
"from_email": "noreply@test.com",
"from_name": "Test",
"reply_to": "",
}
mock_provider = MagicMock()
mock_provider.send.return_value = (True, "msg-123", None)
mock_get_provider.return_value = mock_provider
mock_get_platform_provider.return_value = mock_provider
service = EmailService(db)
@@ -240,20 +243,23 @@ class TestEmailSending:
assert log.subject == "Test Subject"
assert log.provider_message_id == "msg-123"
@patch("app.services.email_service.get_provider")
@patch("app.services.email_service.settings")
def test_send_raw_failure(self, mock_settings, mock_get_provider, db):
@patch("app.services.email_service.get_platform_provider")
@patch("app.services.email_service.get_platform_email_config")
def test_send_raw_failure(self, mock_get_config, mock_get_platform_provider, db):
"""Test failed raw email sending."""
# Setup mocks
mock_settings.email_enabled = True
mock_settings.email_from_address = "noreply@test.com"
mock_settings.email_from_name = "Test"
mock_settings.email_reply_to = ""
mock_settings.email_provider = "smtp"
mock_get_config.return_value = {
"enabled": True,
"debug": False,
"provider": "smtp",
"from_email": "noreply@test.com",
"from_name": "Test",
"reply_to": "",
}
mock_provider = MagicMock()
mock_provider.send.return_value = (False, None, "Connection refused")
mock_get_provider.return_value = mock_provider
mock_get_platform_provider.return_value = mock_provider
service = EmailService(db)
@@ -287,9 +293,9 @@ class TestEmailSending:
assert log.status == EmailStatus.FAILED.value
assert "disabled" in log.error_message.lower()
@patch("app.services.email_service.get_provider")
@patch("app.services.email_service.settings")
def test_send_template_success(self, mock_settings, mock_get_provider, db):
@patch("app.services.email_service.get_platform_provider")
@patch("app.services.email_service.get_platform_email_config")
def test_send_template_success(self, mock_get_config, mock_get_platform_provider, db):
"""Test successful template email sending."""
# Create test template
template = EmailTemplate(
@@ -305,15 +311,18 @@ class TestEmailSending:
db.commit()
# Setup mocks
mock_settings.email_enabled = True
mock_settings.email_from_address = "noreply@test.com"
mock_settings.email_from_name = "Test"
mock_settings.email_reply_to = ""
mock_settings.email_provider = "smtp"
mock_get_config.return_value = {
"enabled": True,
"debug": False,
"provider": "smtp",
"from_email": "noreply@test.com",
"from_name": "Test",
"reply_to": "",
}
mock_provider = MagicMock()
mock_provider.send.return_value = (True, "msg-456", None)
mock_get_provider.return_value = mock_provider
mock_get_platform_provider.return_value = mock_provider
service = EmailService(db)
@@ -331,7 +340,8 @@ class TestEmailSending:
assert log.template_code == "test_send_template"
assert log.subject == "Hello John"
# Cleanup
# Cleanup - delete log first due to FK constraint
db.delete(log)
db.delete(template)
db.commit()
@@ -531,7 +541,8 @@ class TestSignupWelcomeEmail:
yield template
# Cleanup
# Cleanup - delete email logs referencing this template first
db.query(EmailLog).filter(EmailLog.template_id == template.id).delete()
db.delete(template)
db.commit()
@@ -575,20 +586,23 @@ class TestSignupWelcomeEmail:
for var in required_vars:
assert var in template.variables_list, f"Missing variable: {var}"
@patch("app.services.email_service.get_provider")
@patch("app.services.email_service.settings")
def test_welcome_email_send(self, mock_settings, mock_get_provider, db, welcome_template):
@patch("app.services.email_service.get_platform_provider")
@patch("app.services.email_service.get_platform_email_config")
def test_welcome_email_send(self, mock_get_config, mock_get_platform_provider, db, welcome_template, test_vendor, test_user):
"""Test sending welcome email."""
# Setup mocks
mock_settings.email_enabled = True
mock_settings.email_from_address = "noreply@test.com"
mock_settings.email_from_name = "Test"
mock_settings.email_reply_to = ""
mock_settings.email_provider = "smtp"
mock_get_config.return_value = {
"enabled": True,
"debug": False,
"provider": "smtp",
"from_email": "noreply@test.com",
"from_name": "Test",
"reply_to": "",
}
mock_provider = MagicMock()
mock_provider.send.return_value = (True, "welcome-msg-123", None)
mock_get_provider.return_value = mock_provider
mock_get_platform_provider.return_value = mock_provider
service = EmailService(db)
@@ -606,8 +620,8 @@ class TestSignupWelcomeEmail:
"trial_days": 30,
"tier_name": "Essential",
},
vendor_id=1,
user_id=1,
vendor_id=test_vendor.id,
user_id=test_user.id,
related_type="signup",
)

View File

@@ -218,17 +218,13 @@ class TestOnboardingServiceStep1:
service = OnboardingService(db)
# First create the onboarding record
onboarding = VendorOnboarding(
vendor_id=99999,
status=OnboardingStatus.NOT_STARTED.value,
)
db.add(onboarding)
db.flush()
# Use a vendor_id that doesn't exist
# The service should check vendor exists before doing anything
non_existent_vendor_id = 999999
with pytest.raises(VendorNotFoundException):
service.complete_company_profile(
vendor_id=99999,
vendor_id=non_existent_vendor_id,
default_language="en",
dashboard_language="en",
)