refactor: migrate templates and static files to self-contained modules

Templates Migration:
- Migrate admin templates to modules (tenancy, billing, monitoring, marketplace, etc.)
- Migrate vendor templates to modules (tenancy, billing, orders, messaging, etc.)
- Migrate storefront templates to modules (catalog, customers, orders, cart, checkout, cms)
- Migrate public templates to modules (billing, marketplace, cms)
- Keep shared templates in app/templates/ (base.html, errors/, partials/, macros/)
- Migrate letzshop partials to marketplace module

Static Files Migration:
- Migrate admin JS to modules: tenancy (23 files), core (5 files), monitoring (1 file)
- Migrate vendor JS to modules: tenancy (4 files), core (2 files)
- Migrate shared JS: vendor-selector.js to core, media-picker.js to cms
- Migrate storefront JS: storefront-layout.js to core
- Keep framework JS in static/ (api-client, utils, money, icons, log-config, lib/)
- Update all template references to use module_static paths

Naming Consistency:
- Rename static/platform/ to static/public/
- Rename app/templates/platform/ to app/templates/public/
- Update all extends and static references

Documentation:
- Update module-system.md with shared templates documentation
- Update frontend-structure.md with new module JS organization

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-02-01 14:34:16 +01:00
parent 843703258f
commit 4e28d91a78
542 changed files with 11603 additions and 9037 deletions

View File

@@ -0,0 +1,19 @@
# app/tasks/__init__.py
"""
Task dispatcher for Celery background tasks.
All Celery tasks are defined in their respective modules:
- app.modules.marketplace.tasks: Import/export/sync tasks
- app.modules.billing.tasks: Subscription management tasks
- app.modules.dev_tools.tasks: Code quality and test runner tasks
- app.modules.monitoring.tasks: Capacity monitoring tasks
Use the task_dispatcher for dispatching tasks from API routes:
from app.tasks import task_dispatcher
task_id = task_dispatcher.dispatch_marketplace_import(...)
"""
from app.tasks.dispatcher import task_dispatcher, TaskDispatcher
__all__ = ["task_dispatcher", "TaskDispatcher"]

View File

@@ -1,136 +0,0 @@
# app/tasks/background_tasks.py
"""Background tasks for marketplace imports."""
import logging
from datetime import UTC, datetime
from app.core.database import SessionLocal
from app.services.admin_notification_service import admin_notification_service
from app.utils.csv_processor import CSVProcessor
from app.modules.marketplace.models import MarketplaceImportJob
from models.database.vendor import Vendor
logger = logging.getLogger(__name__)
async def process_marketplace_import(
job_id: int,
url: str,
marketplace: str,
vendor_id: int,
batch_size: int = 1000,
language: str = "en",
):
"""Background task to process marketplace CSV import.
Args:
job_id: ID of the MarketplaceImportJob record
url: URL to the CSV file
marketplace: Name of the marketplace (e.g., 'Letzshop')
vendor_id: ID of the vendor
batch_size: Number of rows to process per batch
language: Language code for translations (default: 'en')
"""
db = SessionLocal()
csv_processor = CSVProcessor()
job = None
try:
# Get the import job
job = (
db.query(MarketplaceImportJob)
.filter(MarketplaceImportJob.id == job_id)
.first()
)
if not job:
logger.error(f"Import job {job_id} not found")
return
# Get vendor information
vendor = db.query(Vendor).filter(Vendor.id == vendor_id).first()
if not vendor:
logger.error(f"Vendor {vendor_id} not found for import job {job_id}")
job.status = "failed"
job.error_message = f"Vendor {vendor_id} not found"
job.completed_at = datetime.now(UTC)
db.commit()
return
# Update job status
job.status = "processing"
job.started_at = datetime.now(UTC)
db.commit()
logger.info(
f"Processing import: Job {job_id}, Marketplace: {marketplace}, "
f"Vendor: {vendor.name} ({vendor.vendor_code}), Language: {language}"
)
# Process CSV with vendor name and language
result = await csv_processor.process_marketplace_csv_from_url(
url=url,
marketplace=marketplace,
vendor_name=vendor.name, # Pass vendor name to CSV processor
batch_size=batch_size,
db=db,
language=language, # Pass language for translations
import_job_id=job_id, # Pass job ID for error tracking
)
# Update job with results
job.status = "completed"
job.completed_at = datetime.now(UTC)
job.imported_count = result["imported"]
job.updated_count = result["updated"]
job.error_count = result.get("errors", 0)
job.total_processed = result["total_processed"]
if result.get("errors", 0) > 0:
job.status = "completed_with_errors"
job.error_message = f"{result['errors']} rows had errors"
# Notify admin if error count is significant
if result.get("errors", 0) >= 5:
admin_notification_service.notify_import_failure(
db=db,
vendor_name=vendor.name,
job_id=job_id,
error_message=f"Import completed with {result['errors']} errors out of {result['total_processed']} rows",
vendor_id=vendor_id,
)
db.commit()
logger.info(
f"Import job {job_id} completed: "
f"imported={result['imported']}, updated={result['updated']}, "
f"errors={result.get('errors', 0)}"
)
except Exception as e:
logger.error(f"Import job {job_id} failed: {e}", exc_info=True)
if job is not None:
try:
job.status = "failed"
job.error_message = str(e)
job.completed_at = datetime.now(UTC)
# Create admin notification for import failure
vendor_name = vendor.name if vendor else f"Vendor {vendor_id}"
admin_notification_service.notify_import_failure(
db=db,
vendor_name=vendor_name,
job_id=job_id,
error_message=str(e)[:200], # Truncate long errors
vendor_id=vendor_id,
)
db.commit()
except Exception as commit_error:
logger.error(f"Failed to update job status: {commit_error}")
db.rollback()
finally:
if hasattr(db, "close") and callable(db.close):
try:
db.close()
except Exception as close_error:
logger.error(f"Error closing database session: {close_error}")

View File

@@ -1,16 +0,0 @@
# app/tasks/celery_tasks/__init__.py
"""
Celery task modules for Wizamart.
This package contains Celery task wrappers for background processing:
- marketplace: Product import tasks
- letzshop: Historical import tasks
- subscription: Scheduled subscription management
- export: Product export tasks
- code_quality: Code quality scan tasks
- test_runner: Test execution tasks
"""
from app.tasks.celery_tasks.base import DatabaseTask
__all__ = ["DatabaseTask"]

View File

@@ -1,91 +0,0 @@
# app/tasks/celery_tasks/base.py
"""
Base Celery task class with database session management.
Provides a DatabaseTask base class that handles:
- Database session lifecycle (create/close)
- Context manager pattern for session usage
- Proper cleanup on task completion or failure
"""
import logging
from contextlib import contextmanager
from celery import Task
from app.core.database import SessionLocal
logger = logging.getLogger(__name__)
class DatabaseTask(Task):
"""
Base task with database session management.
Usage:
@celery_app.task(bind=True, base=DatabaseTask)
def my_task(self, arg1, arg2):
with self.get_db() as db:
# Use db session
result = db.query(Model).all()
return result
"""
abstract = True
@contextmanager
def get_db(self):
"""
Context manager for database session.
Yields a database session and ensures proper cleanup
on both success and failure.
Yields:
Session: SQLAlchemy database session
Example:
with self.get_db() as db:
vendor = db.query(Vendor).filter(Vendor.id == vendor_id).first()
"""
db = SessionLocal()
try:
yield db
except Exception as e:
logger.error(f"Database error in task {self.name}: {e}")
db.rollback()
raise
finally:
db.close()
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Called when task fails.
Logs the failure with task details for debugging.
"""
logger.error(
f"Task {self.name}[{task_id}] failed: {exc}\n"
f"Args: {args}\n"
f"Kwargs: {kwargs}\n"
f"Traceback: {einfo}"
)
def on_success(self, retval, task_id, args, kwargs):
"""
Called when task succeeds.
Logs successful completion with task ID.
"""
logger.info(f"Task {self.name}[{task_id}] completed successfully")
def on_retry(self, exc, task_id, args, kwargs, einfo):
"""
Called when task is being retried.
Logs retry attempt with reason.
"""
logger.warning(
f"Task {self.name}[{task_id}] retrying due to: {exc}\n"
f"Retry count: {self.request.retries}"
)

View File

@@ -1,31 +0,0 @@
# app/tasks/celery_tasks/code_quality.py
"""
Celery tasks for code quality scans - LEGACY LOCATION
This file exists for backward compatibility.
The canonical location is now: app/modules/dev_tools/tasks/code_quality.py
All imports should use the new location:
from app.modules.dev_tools.tasks import execute_code_quality_scan
"""
# Re-export from canonical location for backward compatibility
from app.modules.dev_tools.tasks.code_quality import (
execute_code_quality_scan,
VALIDATOR_ARCHITECTURE,
VALIDATOR_SECURITY,
VALIDATOR_PERFORMANCE,
VALID_VALIDATOR_TYPES,
VALIDATOR_SCRIPTS,
VALIDATOR_NAMES,
)
__all__ = [
"execute_code_quality_scan",
"VALIDATOR_ARCHITECTURE",
"VALIDATOR_SECURITY",
"VALIDATOR_PERFORMANCE",
"VALID_VALIDATOR_TYPES",
"VALIDATOR_SCRIPTS",
"VALIDATOR_NAMES",
]

View File

@@ -1,24 +0,0 @@
# app/tasks/celery_tasks/export.py
"""
Legacy export tasks.
MIGRATED: All tasks have been migrated to app.modules.marketplace.tasks.
New locations:
- export_vendor_products_to_folder -> app.modules.marketplace.tasks.export_tasks
- export_marketplace_products -> app.modules.marketplace.tasks.export_tasks
Import from the new location:
from app.modules.marketplace.tasks import (
export_vendor_products_to_folder,
export_marketplace_products,
)
"""
# Re-export from new location for backward compatibility
from app.modules.marketplace.tasks.export_tasks import (
export_vendor_products_to_folder,
export_marketplace_products,
)
__all__ = ["export_vendor_products_to_folder", "export_marketplace_products"]

View File

@@ -1,22 +0,0 @@
# app/tasks/celery_tasks/letzshop.py
"""
Legacy Letzshop tasks.
MIGRATED: All tasks have been migrated to app.modules.marketplace.tasks.
New locations:
- process_historical_import -> app.modules.marketplace.tasks.import_tasks
- sync_vendor_directory -> app.modules.marketplace.tasks.sync_tasks
Import from the new location:
from app.modules.marketplace.tasks import (
process_historical_import,
sync_vendor_directory,
)
"""
# Re-export from new location for backward compatibility
from app.modules.marketplace.tasks.import_tasks import process_historical_import
from app.modules.marketplace.tasks.sync_tasks import sync_vendor_directory
__all__ = ["process_historical_import", "sync_vendor_directory"]

View File

@@ -1,17 +0,0 @@
# app/tasks/celery_tasks/marketplace.py
"""
Legacy marketplace tasks.
MIGRATED: All tasks have been migrated to app.modules.marketplace.tasks.
New locations:
- process_marketplace_import -> app.modules.marketplace.tasks.import_tasks
Import from the new location:
from app.modules.marketplace.tasks import process_marketplace_import
"""
# Re-export from new location for backward compatibility
from app.modules.marketplace.tasks.import_tasks import process_marketplace_import
__all__ = ["process_marketplace_import"]

View File

@@ -1,54 +0,0 @@
# app/tasks/celery_tasks/subscription.py
"""
Legacy subscription tasks.
MOSTLY MIGRATED: Most tasks have been migrated to app.modules.billing.tasks.
The following tasks now live in the billing module:
- reset_period_counters -> app.modules.billing.tasks.subscription
- check_trial_expirations -> app.modules.billing.tasks.subscription
- sync_stripe_status -> app.modules.billing.tasks.subscription
- cleanup_stale_subscriptions -> app.modules.billing.tasks.subscription
Remaining task (to be migrated to monitoring module):
- capture_capacity_snapshot
"""
import logging
from app.core.celery_config import celery_app
from app.tasks.celery_tasks.base import DatabaseTask
logger = logging.getLogger(__name__)
@celery_app.task(
bind=True,
base=DatabaseTask,
name="app.tasks.celery_tasks.subscription.capture_capacity_snapshot",
)
def capture_capacity_snapshot(self):
"""
Capture a daily snapshot of platform capacity metrics.
Runs daily at midnight.
TODO: Migrate to app.modules.monitoring.tasks
"""
from app.services.capacity_forecast_service import capacity_forecast_service
with self.get_db() as db:
snapshot = capacity_forecast_service.capture_daily_snapshot(db)
db.commit()
logger.info(
f"Captured capacity snapshot: {snapshot.total_vendors} vendors, "
f"{snapshot.total_products} products"
)
return {
"snapshot_id": snapshot.id,
"snapshot_date": snapshot.snapshot_date.isoformat(),
"total_vendors": snapshot.total_vendors,
"total_products": snapshot.total_products,
}

View File

@@ -1,15 +0,0 @@
# app/tasks/celery_tasks/test_runner.py
"""
Celery tasks for test execution - LEGACY LOCATION
This file exists for backward compatibility.
The canonical location is now: app/modules/dev_tools/tasks/test_runner.py
All imports should use the new location:
from app.modules.dev_tools.tasks import execute_test_run
"""
# Re-export from canonical location for backward compatibility
from app.modules.dev_tools.tasks.test_runner import execute_test_run
__all__ = ["execute_test_run"]

View File

@@ -1,217 +0,0 @@
# app/tasks/code_quality_tasks.py
"""Background tasks for code quality scans."""
import json
import logging
import subprocess
from datetime import UTC, datetime
from app.core.database import SessionLocal
from app.services.admin_notification_service import admin_notification_service
from app.modules.dev_tools.models import ArchitectureScan, ArchitectureViolation
logger = logging.getLogger(__name__)
# Validator type constants
VALIDATOR_ARCHITECTURE = "architecture"
VALIDATOR_SECURITY = "security"
VALIDATOR_PERFORMANCE = "performance"
VALID_VALIDATOR_TYPES = [VALIDATOR_ARCHITECTURE, VALIDATOR_SECURITY, VALIDATOR_PERFORMANCE]
# Map validator types to their scripts
VALIDATOR_SCRIPTS = {
VALIDATOR_ARCHITECTURE: "scripts/validate_architecture.py",
VALIDATOR_SECURITY: "scripts/validate_security.py",
VALIDATOR_PERFORMANCE: "scripts/validate_performance.py",
}
# Human-readable names
VALIDATOR_NAMES = {
VALIDATOR_ARCHITECTURE: "Architecture",
VALIDATOR_SECURITY: "Security",
VALIDATOR_PERFORMANCE: "Performance",
}
def _get_git_commit_hash() -> str | None:
"""Get current git commit hash"""
try:
result = subprocess.run(
["git", "rev-parse", "HEAD"],
capture_output=True,
text=True,
timeout=5,
)
if result.returncode == 0:
return result.stdout.strip()[:40]
except Exception:
pass
return None
async def execute_code_quality_scan(scan_id: int):
"""
Background task to execute a code quality scan.
This task:
1. Gets the scan record from DB
2. Updates status to 'running'
3. Runs the validator script
4. Parses JSON output and creates violation records
5. Updates scan with results and status 'completed' or 'failed'
Args:
scan_id: ID of the ArchitectureScan record
"""
db = SessionLocal()
scan = None
try:
# Get the scan record
scan = db.query(ArchitectureScan).filter(ArchitectureScan.id == scan_id).first()
if not scan:
logger.error(f"Code quality scan {scan_id} not found")
return
validator_type = scan.validator_type
if validator_type not in VALID_VALIDATOR_TYPES:
raise ValueError(f"Invalid validator type: {validator_type}")
script_path = VALIDATOR_SCRIPTS[validator_type]
validator_name = VALIDATOR_NAMES[validator_type]
# Update status to running
scan.status = "running"
scan.started_at = datetime.now(UTC)
scan.progress_message = f"Running {validator_name} validator..."
scan.git_commit_hash = _get_git_commit_hash()
db.commit()
logger.info(f"Starting {validator_name} scan (scan_id={scan_id})")
# Run validator with JSON output
start_time = datetime.now(UTC)
try:
result = subprocess.run(
["python", script_path, "--json"],
capture_output=True,
text=True,
timeout=600, # 10 minute timeout
)
except subprocess.TimeoutExpired:
logger.error(f"{validator_name} scan {scan_id} timed out after 10 minutes")
scan.status = "failed"
scan.error_message = "Scan timed out after 10 minutes"
scan.completed_at = datetime.now(UTC)
db.commit()
return
duration = (datetime.now(UTC) - start_time).total_seconds()
# Update progress
scan.progress_message = "Parsing results..."
db.commit()
# Parse JSON output (get only the JSON part, skip progress messages)
try:
lines = result.stdout.strip().split("\n")
json_start = -1
for i, line in enumerate(lines):
if line.strip().startswith("{"):
json_start = i
break
if json_start == -1:
raise ValueError("No JSON output found in validator output")
json_output = "\n".join(lines[json_start:])
data = json.loads(json_output)
except (json.JSONDecodeError, ValueError) as e:
logger.error(f"Failed to parse {validator_name} validator output: {e}")
logger.error(f"Stdout: {result.stdout[:1000]}")
logger.error(f"Stderr: {result.stderr[:1000]}")
scan.status = "failed"
scan.error_message = f"Failed to parse validator output: {e}"
scan.completed_at = datetime.now(UTC)
scan.duration_seconds = duration
db.commit()
return
# Update progress
scan.progress_message = "Storing violations..."
db.commit()
# Create violation records
violations_data = data.get("violations", [])
logger.info(f"Creating {len(violations_data)} {validator_name} violation records")
for v in violations_data:
violation = ArchitectureViolation(
scan_id=scan.id,
validator_type=validator_type,
rule_id=v.get("rule_id", "UNKNOWN"),
rule_name=v.get("rule_name", "Unknown Rule"),
severity=v.get("severity", "warning"),
file_path=v.get("file_path", ""),
line_number=v.get("line_number", 0),
message=v.get("message", ""),
context=v.get("context", ""),
suggestion=v.get("suggestion", ""),
status="open",
)
db.add(violation)
# Update scan with results
scan.total_files = data.get("files_checked", 0)
scan.total_violations = data.get("total_violations", len(violations_data))
scan.errors = data.get("errors", 0)
scan.warnings = data.get("warnings", 0)
scan.duration_seconds = duration
scan.completed_at = datetime.now(UTC)
scan.progress_message = None
# Set final status based on results
if scan.errors > 0:
scan.status = "completed_with_warnings"
else:
scan.status = "completed"
db.commit()
logger.info(
f"{validator_name} scan {scan_id} completed: "
f"files={scan.total_files}, violations={scan.total_violations}, "
f"errors={scan.errors}, warnings={scan.warnings}, "
f"duration={duration:.1f}s"
)
except Exception as e:
logger.error(f"Code quality scan {scan_id} failed: {e}", exc_info=True)
if scan is not None:
try:
scan.status = "failed"
scan.error_message = str(e)[:500] # Truncate long errors
scan.completed_at = datetime.now(UTC)
scan.progress_message = None
# Create admin notification for scan failure
admin_notification_service.create_notification(
db=db,
title="Code Quality Scan Failed",
message=f"{VALIDATOR_NAMES.get(scan.validator_type, 'Unknown')} scan failed: {str(e)[:200]}",
notification_type="error",
category="code_quality",
action_url="/admin/code-quality",
)
db.commit()
except Exception as commit_error:
logger.error(f"Failed to update scan status: {commit_error}")
db.rollback()
finally:
if hasattr(db, "close") and callable(db.close):
try:
db.close()
except Exception as close_error:
logger.error(f"Error closing database session: {close_error}")

View File

@@ -1,20 +1,21 @@
# app/tasks/dispatcher.py
"""
Task dispatcher with feature flag for gradual Celery migration.
Task dispatcher for Celery background tasks.
This module provides a unified interface for dispatching background tasks.
Based on the USE_CELERY setting, tasks are either:
- Sent to Celery for persistent, reliable execution
- Run via FastAPI BackgroundTasks (fire-and-forget)
This module provides a unified interface for dispatching background tasks
to Celery workers. All tasks are dispatched to their canonical locations
in the respective modules.
This allows for gradual rollout and instant rollback.
Module task locations:
- Marketplace: app.modules.marketplace.tasks
- Billing: app.modules.billing.tasks
- Dev-Tools: app.modules.dev_tools.tasks
- Monitoring: app.modules.monitoring.tasks
"""
import logging
from typing import Any
from fastapi import BackgroundTasks
from app.core.config import settings
logger = logging.getLogger(__name__)
@@ -22,14 +23,13 @@ logger = logging.getLogger(__name__)
class TaskDispatcher:
"""
Dispatches tasks to either Celery or FastAPI BackgroundTasks.
Dispatches tasks to Celery workers.
Usage:
from app.tasks.dispatcher import task_dispatcher
# In an API endpoint:
task_id = task_dispatcher.dispatch_marketplace_import(
background_tasks=background_tasks,
job_id=job.id,
url=url,
marketplace=marketplace,
@@ -42,21 +42,27 @@ class TaskDispatcher:
"""Check if Celery is enabled."""
return settings.use_celery
def _require_celery(self, task_name: str) -> None:
"""Raise error if Celery is not enabled."""
if not self.use_celery:
raise RuntimeError(
f"Celery is required for {task_name}. "
f"Set USE_CELERY=true in environment."
)
def dispatch_marketplace_import(
self,
background_tasks: BackgroundTasks,
job_id: int,
url: str,
marketplace: str,
vendor_id: int,
batch_size: int = 1000,
language: str = "en",
) -> str | None:
) -> str:
"""
Dispatch marketplace import task.
Args:
background_tasks: FastAPI BackgroundTasks instance
job_id: ID of the MarketplaceImportJob record
url: URL to the CSV file
marketplace: Name of the marketplace
@@ -65,151 +71,100 @@ class TaskDispatcher:
language: Language code for translations
Returns:
str | None: Celery task ID if using Celery, None otherwise
str: Celery task ID
"""
if self.use_celery:
from app.tasks.celery_tasks.marketplace import process_marketplace_import
self._require_celery("marketplace import")
from app.modules.marketplace.tasks import process_marketplace_import
task = process_marketplace_import.delay(
job_id=job_id,
url=url,
marketplace=marketplace,
vendor_id=vendor_id,
batch_size=batch_size,
language=language,
)
logger.info(f"Dispatched marketplace import to Celery: task_id={task.id}")
return task.id
else:
from app.tasks.background_tasks import process_marketplace_import
background_tasks.add_task(
process_marketplace_import,
job_id=job_id,
url=url,
marketplace=marketplace,
vendor_id=vendor_id,
batch_size=batch_size,
language=language,
)
logger.info("Dispatched marketplace import to BackgroundTasks")
return None
task = process_marketplace_import.delay(
job_id=job_id,
url=url,
marketplace=marketplace,
vendor_id=vendor_id,
batch_size=batch_size,
language=language,
)
logger.info(f"Dispatched marketplace import to Celery: task_id={task.id}")
return task.id
def dispatch_historical_import(
self,
background_tasks: BackgroundTasks,
job_id: int,
vendor_id: int,
) -> str | None:
) -> str:
"""
Dispatch Letzshop historical import task.
Args:
background_tasks: FastAPI BackgroundTasks instance
job_id: ID of the LetzshopHistoricalImportJob record
vendor_id: ID of the vendor
Returns:
str | None: Celery task ID if using Celery, None otherwise
str: Celery task ID
"""
if self.use_celery:
from app.tasks.celery_tasks.letzshop import process_historical_import
self._require_celery("historical import")
from app.modules.marketplace.tasks import process_historical_import
task = process_historical_import.delay(job_id=job_id, vendor_id=vendor_id)
logger.info(f"Dispatched historical import to Celery: task_id={task.id}")
return task.id
else:
from app.tasks.letzshop_tasks import process_historical_import
background_tasks.add_task(
process_historical_import,
job_id=job_id,
vendor_id=vendor_id,
)
logger.info("Dispatched historical import to BackgroundTasks")
return None
task = process_historical_import.delay(job_id=job_id, vendor_id=vendor_id)
logger.info(f"Dispatched historical import to Celery: task_id={task.id}")
return task.id
def dispatch_code_quality_scan(
self,
background_tasks: BackgroundTasks,
scan_id: int,
) -> str | None:
) -> str:
"""
Dispatch code quality scan task.
Args:
background_tasks: FastAPI BackgroundTasks instance
scan_id: ID of the ArchitectureScan record
Returns:
str | None: Celery task ID if using Celery, None otherwise
str: Celery task ID
"""
if self.use_celery:
from app.tasks.celery_tasks.code_quality import execute_code_quality_scan
self._require_celery("code quality scan")
from app.modules.dev_tools.tasks import execute_code_quality_scan
task = execute_code_quality_scan.delay(scan_id=scan_id)
logger.info(f"Dispatched code quality scan to Celery: task_id={task.id}")
return task.id
else:
from app.tasks.code_quality_tasks import execute_code_quality_scan
background_tasks.add_task(execute_code_quality_scan, scan_id=scan_id)
logger.info("Dispatched code quality scan to BackgroundTasks")
return None
task = execute_code_quality_scan.delay(scan_id=scan_id)
logger.info(f"Dispatched code quality scan to Celery: task_id={task.id}")
return task.id
def dispatch_test_run(
self,
background_tasks: BackgroundTasks,
run_id: int,
test_path: str = "tests",
extra_args: list[str] | None = None,
) -> str | None:
) -> str:
"""
Dispatch test run task.
Args:
background_tasks: FastAPI BackgroundTasks instance
run_id: ID of the TestRun record
test_path: Path to tests
extra_args: Additional pytest arguments
Returns:
str | None: Celery task ID if using Celery, None otherwise
str: Celery task ID
"""
if self.use_celery:
from app.tasks.celery_tasks.test_runner import execute_test_run
self._require_celery("test run")
from app.modules.dev_tools.tasks import execute_test_run
task = execute_test_run.delay(
run_id=run_id,
test_path=test_path,
extra_args=extra_args,
)
logger.info(f"Dispatched test run to Celery: task_id={task.id}")
return task.id
else:
from app.tasks.test_runner_tasks import execute_test_run
background_tasks.add_task(
execute_test_run,
run_id=run_id,
test_path=test_path,
extra_args=extra_args,
)
logger.info("Dispatched test run to BackgroundTasks")
return None
task = execute_test_run.delay(
run_id=run_id,
test_path=test_path,
extra_args=extra_args,
)
logger.info(f"Dispatched test run to Celery: task_id={task.id}")
return task.id
def dispatch_product_export(
self,
vendor_id: int,
triggered_by: str,
include_inactive: bool = False,
) -> str | None:
) -> str:
"""
Dispatch product export task (Celery only).
This task is only available via Celery as it's designed for
asynchronous batch exports. For synchronous exports, use
the export service directly.
Dispatch product export task.
Args:
vendor_id: ID of the vendor to export
@@ -217,24 +172,32 @@ class TaskDispatcher:
include_inactive: Whether to include inactive products
Returns:
str | None: Celery task ID if using Celery, None otherwise
str: Celery task ID
"""
if self.use_celery:
from app.tasks.celery_tasks.export import export_vendor_products_to_folder
self._require_celery("product export")
from app.modules.marketplace.tasks import export_vendor_products_to_folder
task = export_vendor_products_to_folder.delay(
vendor_id=vendor_id,
triggered_by=triggered_by,
include_inactive=include_inactive,
)
logger.info(f"Dispatched product export to Celery: task_id={task.id}")
return task.id
else:
logger.warning(
"Product export task requires Celery. "
"Use letzshop_export_service directly for synchronous export."
)
return None
task = export_vendor_products_to_folder.delay(
vendor_id=vendor_id,
triggered_by=triggered_by,
include_inactive=include_inactive,
)
logger.info(f"Dispatched product export to Celery: task_id={task.id}")
return task.id
def dispatch_capacity_snapshot(self) -> str:
"""
Dispatch capacity snapshot capture task.
Returns:
str: Celery task ID
"""
self._require_celery("capacity snapshot")
from app.modules.monitoring.tasks import capture_capacity_snapshot
task = capture_capacity_snapshot.delay()
logger.info(f"Dispatched capacity snapshot to Celery: task_id={task.id}")
return task.id
def get_task_status(self, task_id: str) -> dict[str, Any]:
"""

View File

@@ -1,344 +0,0 @@
# app/tasks/letzshop_tasks.py
"""Background tasks for Letzshop integration."""
import logging
from datetime import UTC, datetime
from typing import Any, Callable
from app.core.database import SessionLocal
from app.services.admin_notification_service import admin_notification_service
from app.modules.marketplace.services.letzshop import (
LetzshopClientError,
LetzshopCredentialsService,
LetzshopOrderService,
LetzshopVendorSyncService,
)
from app.modules.marketplace.models import LetzshopHistoricalImportJob
logger = logging.getLogger(__name__)
def _get_credentials_service(db) -> LetzshopCredentialsService:
"""Create a credentials service instance."""
return LetzshopCredentialsService(db)
def _get_order_service(db) -> LetzshopOrderService:
"""Create an order service instance."""
return LetzshopOrderService(db)
def process_historical_import(job_id: int, vendor_id: int):
"""
Background task for historical order import with progress tracking.
Imports both confirmed and declined orders from Letzshop API,
updating job progress in the database for frontend polling.
Args:
job_id: ID of the LetzshopHistoricalImportJob record
vendor_id: ID of the vendor to import orders for
"""
db = SessionLocal()
job = None
try:
# Get the import job
job = (
db.query(LetzshopHistoricalImportJob)
.filter(LetzshopHistoricalImportJob.id == job_id)
.first()
)
if not job:
logger.error(f"Historical import job {job_id} not found")
return
# Mark as started
job.status = "fetching"
job.started_at = datetime.now(UTC)
db.commit()
creds_service = _get_credentials_service(db)
order_service = _get_order_service(db)
# Create progress callback for fetching
def fetch_progress_callback(page: int, total_fetched: int):
"""Update fetch progress in database."""
job.current_page = page
job.shipments_fetched = total_fetched
db.commit()
# Create progress callback for processing
def create_processing_callback(
phase: str,
) -> Callable[[int, int, int, int], None]:
"""Create a processing progress callback for a phase."""
def callback(processed: int, imported: int, updated: int, skipped: int):
job.orders_processed = processed
job.orders_imported = imported
job.orders_updated = updated
job.orders_skipped = skipped
db.commit()
return callback
with creds_service.create_client(vendor_id) as client:
# ================================================================
# Phase 1: Import confirmed orders
# ================================================================
job.current_phase = "confirmed"
job.current_page = 0
job.shipments_fetched = 0
db.commit()
logger.info(f"Job {job_id}: Fetching confirmed shipments for vendor {vendor_id}")
confirmed_shipments = client.get_all_shipments_paginated(
state="confirmed",
page_size=50,
progress_callback=fetch_progress_callback,
)
logger.info(f"Job {job_id}: Fetched {len(confirmed_shipments)} confirmed shipments")
# Process confirmed shipments
job.status = "processing"
job.orders_processed = 0
job.orders_imported = 0
job.orders_updated = 0
job.orders_skipped = 0
db.commit()
confirmed_stats = order_service.import_historical_shipments(
vendor_id=vendor_id,
shipments=confirmed_shipments,
match_products=True,
progress_callback=create_processing_callback("confirmed"),
)
# Store confirmed stats
job.confirmed_stats = {
"total": confirmed_stats["total"],
"imported": confirmed_stats["imported"],
"updated": confirmed_stats["updated"],
"skipped": confirmed_stats["skipped"],
"products_matched": confirmed_stats["products_matched"],
"products_not_found": confirmed_stats["products_not_found"],
}
job.products_matched = confirmed_stats["products_matched"]
job.products_not_found = confirmed_stats["products_not_found"]
db.commit()
logger.info(
f"Job {job_id}: Confirmed phase complete - "
f"imported={confirmed_stats['imported']}, "
f"updated={confirmed_stats['updated']}, "
f"skipped={confirmed_stats['skipped']}"
)
# ================================================================
# Phase 2: Import unconfirmed (pending) orders
# Note: Letzshop API has no "declined" state. Declined items
# are tracked at the inventory unit level, not shipment level.
# Valid states: unconfirmed, confirmed, completed, accepted
# ================================================================
job.current_phase = "unconfirmed"
job.status = "fetching"
job.current_page = 0
job.shipments_fetched = 0
db.commit()
logger.info(f"Job {job_id}: Fetching unconfirmed shipments for vendor {vendor_id}")
unconfirmed_shipments = client.get_all_shipments_paginated(
state="unconfirmed",
page_size=50,
progress_callback=fetch_progress_callback,
)
logger.info(f"Job {job_id}: Fetched {len(unconfirmed_shipments)} unconfirmed shipments")
# Process unconfirmed shipments
job.status = "processing"
job.orders_processed = 0
db.commit()
unconfirmed_stats = order_service.import_historical_shipments(
vendor_id=vendor_id,
shipments=unconfirmed_shipments,
match_products=True,
progress_callback=create_processing_callback("unconfirmed"),
)
# Store unconfirmed stats (in declined_stats field for compatibility)
job.declined_stats = {
"total": unconfirmed_stats["total"],
"imported": unconfirmed_stats["imported"],
"updated": unconfirmed_stats["updated"],
"skipped": unconfirmed_stats["skipped"],
"products_matched": unconfirmed_stats["products_matched"],
"products_not_found": unconfirmed_stats["products_not_found"],
}
# Add to cumulative product matching stats
job.products_matched += unconfirmed_stats["products_matched"]
job.products_not_found += unconfirmed_stats["products_not_found"]
logger.info(
f"Job {job_id}: Unconfirmed phase complete - "
f"imported={unconfirmed_stats['imported']}, "
f"updated={unconfirmed_stats['updated']}, "
f"skipped={unconfirmed_stats['skipped']}"
)
# ================================================================
# Complete
# ================================================================
job.status = "completed"
job.completed_at = datetime.now(UTC)
db.commit()
# Update credentials sync status
creds_service.update_sync_status(vendor_id, "success", None)
logger.info(f"Job {job_id}: Historical import completed successfully")
except LetzshopClientError as e:
logger.error(f"Job {job_id}: Letzshop API error: {e}")
if job is not None:
try:
job.status = "failed"
job.error_message = f"Letzshop API error: {e}"
job.completed_at = datetime.now(UTC)
# Get vendor name for notification
order_service = _get_order_service(db)
vendor = order_service.get_vendor(vendor_id)
vendor_name = vendor.name if vendor else f"Vendor {vendor_id}"
# Create admin notification for sync failure
admin_notification_service.notify_order_sync_failure(
db=db,
vendor_name=vendor_name,
error_message=f"Historical import failed: {str(e)[:150]}",
vendor_id=vendor_id,
)
db.commit()
creds_service = _get_credentials_service(db)
creds_service.update_sync_status(vendor_id, "failed", str(e))
except Exception as commit_error:
logger.error(f"Job {job_id}: Failed to update job status: {commit_error}")
db.rollback()
except Exception as e:
logger.error(f"Job {job_id}: Unexpected error: {e}", exc_info=True)
if job is not None:
try:
job.status = "failed"
job.error_message = str(e)
job.completed_at = datetime.now(UTC)
# Get vendor name for notification
order_service = _get_order_service(db)
vendor = order_service.get_vendor(vendor_id)
vendor_name = vendor.name if vendor else f"Vendor {vendor_id}"
# Create admin notification for critical error
admin_notification_service.notify_critical_error(
db=db,
error_type="Historical Import",
error_message=f"Import job {job_id} failed for {vendor_name}: {str(e)[:150]}",
details={"job_id": job_id, "vendor_id": vendor_id, "vendor_name": vendor_name},
)
db.commit()
except Exception as commit_error:
logger.error(f"Job {job_id}: Failed to update job status: {commit_error}")
db.rollback()
finally:
if hasattr(db, "close") and callable(db.close):
try:
db.close()
except Exception as close_error:
logger.error(f"Job {job_id}: Error closing database session: {close_error}")
# =============================================================================
# Vendor Directory Sync
# =============================================================================
def sync_letzshop_vendor_directory() -> dict[str, Any]:
"""
Sync Letzshop vendor directory to local cache.
This task fetches all vendors from Letzshop's public GraphQL API
and updates the local letzshop_vendor_cache table.
Should be run periodically (e.g., daily) via Celery beat.
Returns:
Dictionary with sync statistics.
"""
db = SessionLocal()
stats = {}
try:
logger.info("Starting Letzshop vendor directory sync task...")
sync_service = LetzshopVendorSyncService(db)
def progress_callback(page: int, fetched: int, total: int):
"""Log progress during sync."""
logger.info(f"Vendor sync progress: page {page}, {fetched}/{total} vendors")
stats = sync_service.sync_all_vendors(progress_callback=progress_callback)
logger.info(
f"Vendor directory sync completed: "
f"{stats.get('created', 0)} created, "
f"{stats.get('updated', 0)} updated, "
f"{stats.get('errors', 0)} errors"
)
# Send admin notification if there were errors
if stats.get("errors", 0) > 0:
admin_notification_service.notify_system_info(
db=db,
title="Letzshop Vendor Sync Completed with Errors",
message=(
f"Synced {stats.get('total_fetched', 0)} vendors. "
f"Errors: {stats.get('errors', 0)}"
),
details=stats,
)
return stats
except Exception as e:
logger.error(f"Vendor directory sync failed: {e}", exc_info=True)
# Notify admins of failure
try:
admin_notification_service.notify_critical_error(
db=db,
error_type="Vendor Directory Sync",
error_message=f"Failed to sync Letzshop vendor directory: {str(e)[:200]}",
details={"error": str(e)},
)
db.commit()
except Exception:
pass
raise
finally:
if hasattr(db, "close") and callable(db.close):
try:
db.close()
except Exception as close_error:
logger.error(f"Error closing database session: {close_error}")

View File

@@ -1,318 +0,0 @@
# app/tasks/subscription_tasks.py
"""
Background tasks for subscription management.
Provides scheduled tasks for:
- Resetting period counters at billing period end
- Expiring trials without payment methods
- Syncing subscription status with Stripe
- Capturing daily capacity snapshots
"""
import logging
from datetime import UTC, datetime, timedelta
from app.core.database import SessionLocal
from app.services.stripe_service import stripe_service
from app.modules.billing.models import SubscriptionStatus, VendorSubscription
logger = logging.getLogger(__name__)
async def reset_period_counters():
"""
Reset order counters for subscriptions whose billing period has ended.
Should run daily. Resets orders_this_period to 0 and updates period dates.
"""
db = SessionLocal()
now = datetime.now(UTC)
reset_count = 0
try:
# Find subscriptions where period has ended
expired_periods = (
db.query(VendorSubscription)
.filter(
VendorSubscription.period_end <= now,
VendorSubscription.status.in_(["active", "trial"]),
)
.all()
)
for subscription in expired_periods:
old_period_end = subscription.period_end
# Reset counters
subscription.orders_this_period = 0
subscription.orders_limit_reached_at = None
# Set new period dates
if subscription.is_annual:
subscription.period_start = now
subscription.period_end = now + timedelta(days=365)
else:
subscription.period_start = now
subscription.period_end = now + timedelta(days=30)
subscription.updated_at = now
reset_count += 1
logger.info(
f"Reset period counters for vendor {subscription.vendor_id}: "
f"old_period_end={old_period_end}, new_period_end={subscription.period_end}"
)
db.commit()
logger.info(f"Reset period counters for {reset_count} subscriptions")
except Exception as e:
logger.error(f"Error resetting period counters: {e}")
db.rollback()
raise
finally:
db.close()
return {"reset_count": reset_count}
async def check_trial_expirations():
"""
Check for expired trials and update their status.
Trials without a payment method are marked as expired.
Trials with a payment method transition to active.
Should run daily.
"""
db = SessionLocal()
now = datetime.now(UTC)
expired_count = 0
activated_count = 0
try:
# Find expired trials
expired_trials = (
db.query(VendorSubscription)
.filter(
VendorSubscription.status == SubscriptionStatus.TRIAL.value,
VendorSubscription.trial_ends_at <= now,
)
.all()
)
for subscription in expired_trials:
if subscription.stripe_payment_method_id:
# Has payment method - activate
subscription.status = SubscriptionStatus.ACTIVE.value
activated_count += 1
logger.info(
f"Activated subscription for vendor {subscription.vendor_id} "
f"(trial ended with payment method)"
)
else:
# No payment method - expire
subscription.status = SubscriptionStatus.EXPIRED.value
expired_count += 1
logger.info(
f"Expired trial for vendor {subscription.vendor_id} "
f"(no payment method)"
)
subscription.updated_at = now
db.commit()
logger.info(
f"Trial expiration check: {expired_count} expired, {activated_count} activated"
)
except Exception as e:
logger.error(f"Error checking trial expirations: {e}")
db.rollback()
raise
finally:
db.close()
return {"expired_count": expired_count, "activated_count": activated_count}
async def sync_stripe_status():
"""
Sync subscription status with Stripe.
Fetches current status from Stripe and updates local records.
Handles cases where Stripe status differs from local status.
Should run hourly.
"""
if not stripe_service.is_configured:
logger.warning("Stripe not configured, skipping sync")
return {"synced": 0, "skipped": True}
db = SessionLocal()
synced_count = 0
error_count = 0
try:
# Find subscriptions with Stripe IDs
subscriptions = (
db.query(VendorSubscription)
.filter(VendorSubscription.stripe_subscription_id.isnot(None))
.all()
)
for subscription in subscriptions:
try:
# Fetch from Stripe
stripe_sub = stripe_service.get_subscription(
subscription.stripe_subscription_id
)
if not stripe_sub:
logger.warning(
f"Stripe subscription {subscription.stripe_subscription_id} "
f"not found for vendor {subscription.vendor_id}"
)
continue
# Map Stripe status to local status
status_map = {
"active": SubscriptionStatus.ACTIVE.value,
"trialing": SubscriptionStatus.TRIAL.value,
"past_due": SubscriptionStatus.PAST_DUE.value,
"canceled": SubscriptionStatus.CANCELLED.value,
"unpaid": SubscriptionStatus.PAST_DUE.value,
"incomplete": SubscriptionStatus.TRIAL.value,
"incomplete_expired": SubscriptionStatus.EXPIRED.value,
}
new_status = status_map.get(stripe_sub.status)
if new_status and new_status != subscription.status:
old_status = subscription.status
subscription.status = new_status
subscription.updated_at = datetime.now(UTC)
logger.info(
f"Updated vendor {subscription.vendor_id} status: "
f"{old_status} -> {new_status} (from Stripe)"
)
# Update period dates from Stripe
if stripe_sub.current_period_start:
subscription.period_start = datetime.fromtimestamp(
stripe_sub.current_period_start, tz=UTC
)
if stripe_sub.current_period_end:
subscription.period_end = datetime.fromtimestamp(
stripe_sub.current_period_end, tz=UTC
)
# Update payment method
if stripe_sub.default_payment_method:
subscription.stripe_payment_method_id = (
stripe_sub.default_payment_method
if isinstance(stripe_sub.default_payment_method, str)
else stripe_sub.default_payment_method.id
)
synced_count += 1
except Exception as e:
logger.error(
f"Error syncing subscription {subscription.stripe_subscription_id}: {e}"
)
error_count += 1
db.commit()
logger.info(f"Stripe sync complete: {synced_count} synced, {error_count} errors")
except Exception as e:
logger.error(f"Error in Stripe sync task: {e}")
db.rollback()
raise
finally:
db.close()
return {"synced_count": synced_count, "error_count": error_count}
async def cleanup_stale_subscriptions():
"""
Clean up subscriptions in inconsistent states.
Handles edge cases like:
- Subscriptions stuck in processing
- Old cancelled subscriptions past their period end
Should run weekly.
"""
db = SessionLocal()
now = datetime.now(UTC)
cleaned_count = 0
try:
# Find cancelled subscriptions past their period end
stale_cancelled = (
db.query(VendorSubscription)
.filter(
VendorSubscription.status == SubscriptionStatus.CANCELLED.value,
VendorSubscription.period_end < now - timedelta(days=30),
)
.all()
)
for subscription in stale_cancelled:
# Mark as expired (fully terminated)
subscription.status = SubscriptionStatus.EXPIRED.value
subscription.updated_at = now
cleaned_count += 1
logger.info(
f"Marked stale cancelled subscription as expired: "
f"vendor {subscription.vendor_id}"
)
db.commit()
logger.info(f"Cleaned up {cleaned_count} stale subscriptions")
except Exception as e:
logger.error(f"Error cleaning up stale subscriptions: {e}")
db.rollback()
raise
finally:
db.close()
return {"cleaned_count": cleaned_count}
async def capture_capacity_snapshot():
"""
Capture a daily snapshot of platform capacity metrics.
Used for growth trending and capacity forecasting.
Should run daily (e.g., at midnight).
"""
from app.services.capacity_forecast_service import capacity_forecast_service
db = SessionLocal()
try:
snapshot = capacity_forecast_service.capture_daily_snapshot(db)
db.commit()
logger.info(
f"Captured capacity snapshot: {snapshot.total_vendors} vendors, "
f"{snapshot.total_products} products"
)
return {
"snapshot_id": snapshot.id,
"snapshot_date": snapshot.snapshot_date.isoformat(),
"total_vendors": snapshot.total_vendors,
"total_products": snapshot.total_products,
}
except Exception as e:
logger.error(f"Error capturing capacity snapshot: {e}")
db.rollback()
raise
finally:
db.close()

View File

@@ -1,61 +0,0 @@
# app/tasks/test_runner_tasks.py
"""Background tasks for test runner."""
import logging
from app.core.database import SessionLocal
from app.services.test_runner_service import test_runner_service
from app.modules.dev_tools.models import TestRun
logger = logging.getLogger(__name__)
async def execute_test_run(
run_id: int,
test_path: str = "tests",
extra_args: list[str] | None = None,
):
"""Background task to execute pytest tests.
Args:
run_id: ID of the TestRun record
test_path: Path to tests (relative to project root)
extra_args: Additional pytest arguments
"""
db = SessionLocal()
test_run = None
try:
# Get the test run record
test_run = db.query(TestRun).filter(TestRun.id == run_id).first()
if not test_run:
logger.error(f"Test run {run_id} not found")
return
logger.info(f"Starting test execution: Run {run_id}, Path: {test_path}")
# Execute the tests
test_runner_service._execute_tests(db, test_run, test_path, extra_args)
db.commit()
logger.info(
f"Test run {run_id} completed: "
f"status={test_run.status}, passed={test_run.passed}, "
f"failed={test_run.failed}, duration={test_run.duration_seconds:.1f}s"
)
except Exception as e:
logger.error(f"Test run {run_id} failed: {e}", exc_info=True)
if test_run is not None:
try:
test_run.status = "error"
db.commit()
except Exception as commit_error:
logger.error(f"Failed to update test run status: {commit_error}")
db.rollback()
finally:
if hasattr(db, "close") and callable(db.close):
try:
db.close()
except Exception as close_error:
logger.error(f"Error closing database session: {close_error}")