Some checks failed
Move all auth schemas (UserContext, UserLogin, LoginResponse, etc.) from legacy models/schema/auth.py to app/modules/tenancy/schemas/auth.py per MOD-019. Update 84 import sites across 14 modules. Legacy file now re-exports for backwards compatibility. Add missing tenancy service methods for cross-module consumers: - merchant_service.get_merchant_by_owner_id() - merchant_service.get_merchant_count_for_owner() - admin_service.get_user_by_id() (public, was private-only) - platform_service.get_active_store_count() Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
261 lines
8.8 KiB
Python
261 lines
8.8 KiB
Python
# app/modules/monitoring/routes/api/admin_tasks.py
|
|
"""
|
|
Background Tasks Monitoring API
|
|
Provides unified view of all background tasks across the system
|
|
"""
|
|
|
|
from datetime import UTC, datetime
|
|
|
|
from fastapi import APIRouter, Depends, Query
|
|
from pydantic import BaseModel
|
|
from sqlalchemy.orm import Session
|
|
|
|
from app.api.deps import get_current_admin_api
|
|
from app.core.database import get_db
|
|
from app.modules.monitoring.services.background_tasks_service import (
|
|
background_tasks_service,
|
|
)
|
|
from app.modules.tenancy.schemas.auth import UserContext
|
|
|
|
admin_tasks_router = APIRouter(prefix="/tasks")
|
|
|
|
|
|
class BackgroundTaskResponse(BaseModel):
|
|
"""Unified background task response"""
|
|
|
|
id: int
|
|
task_type: str # 'import', 'test_run', or 'code_quality_scan'
|
|
status: str
|
|
started_at: str | None
|
|
completed_at: str | None
|
|
duration_seconds: float | None
|
|
description: str
|
|
triggered_by: str | None
|
|
error_message: str | None
|
|
details: dict | None
|
|
celery_task_id: str | None = None # Celery task ID for Flower linking
|
|
|
|
|
|
class BackgroundTasksStatsResponse(BaseModel):
|
|
"""Statistics for background tasks"""
|
|
|
|
total_tasks: int
|
|
running: int
|
|
completed: int
|
|
failed: int
|
|
tasks_today: int
|
|
avg_duration_seconds: float | None
|
|
|
|
# By type
|
|
import_jobs: dict
|
|
test_runs: dict
|
|
code_quality_scans: dict
|
|
|
|
|
|
def _convert_import_to_response(job) -> BackgroundTaskResponse:
|
|
"""Convert MarketplaceImportJob to BackgroundTaskResponse"""
|
|
duration = None
|
|
if job.started_at and job.completed_at:
|
|
duration = (job.completed_at - job.started_at).total_seconds()
|
|
elif job.started_at and job.status == "processing":
|
|
duration = (datetime.now(UTC) - job.started_at).total_seconds()
|
|
|
|
return BackgroundTaskResponse(
|
|
id=job.id,
|
|
task_type="import",
|
|
status=job.status,
|
|
started_at=job.started_at.isoformat() if job.started_at else None,
|
|
completed_at=job.completed_at.isoformat() if job.completed_at else None,
|
|
duration_seconds=duration,
|
|
description=f"Import from {job.marketplace}: {job.source_url[:50]}..."
|
|
if len(job.source_url) > 50
|
|
else f"Import from {job.marketplace}: {job.source_url}",
|
|
triggered_by=job.user.username if job.user else None,
|
|
error_message=job.error_message,
|
|
details={
|
|
"marketplace": job.marketplace,
|
|
"store_id": job.store_id,
|
|
"imported": job.imported_count,
|
|
"updated": job.updated_count,
|
|
"errors": job.error_count,
|
|
"total_processed": job.total_processed,
|
|
},
|
|
celery_task_id=getattr(job, "celery_task_id", None),
|
|
)
|
|
|
|
|
|
def _convert_test_run_to_response(run) -> BackgroundTaskResponse:
|
|
"""Convert TestRun to BackgroundTaskResponse"""
|
|
duration = run.duration_seconds
|
|
if run.status == "running" and run.timestamp:
|
|
duration = (datetime.now(UTC) - run.timestamp).total_seconds()
|
|
|
|
return BackgroundTaskResponse(
|
|
id=run.id,
|
|
task_type="test_run",
|
|
status=run.status,
|
|
started_at=run.timestamp.isoformat() if run.timestamp else None,
|
|
completed_at=None,
|
|
duration_seconds=duration,
|
|
description=f"Test run: {run.test_path}",
|
|
triggered_by=run.triggered_by,
|
|
error_message=None,
|
|
details={
|
|
"test_path": run.test_path,
|
|
"total_tests": run.total_tests,
|
|
"passed": run.passed,
|
|
"failed": run.failed,
|
|
"errors": run.errors,
|
|
"pass_rate": run.pass_rate,
|
|
"git_branch": run.git_branch,
|
|
},
|
|
celery_task_id=getattr(run, "celery_task_id", None),
|
|
)
|
|
|
|
|
|
def _convert_scan_to_response(scan) -> BackgroundTaskResponse:
|
|
"""Convert ArchitectureScan to BackgroundTaskResponse"""
|
|
duration = scan.duration_seconds
|
|
if scan.status in ["pending", "running"] and scan.started_at:
|
|
duration = (datetime.now(UTC) - scan.started_at).total_seconds()
|
|
|
|
# Map validator type to human-readable name
|
|
validator_names = {
|
|
"architecture": "Architecture",
|
|
"security": "Security",
|
|
"performance": "Performance",
|
|
}
|
|
validator_name = validator_names.get(scan.validator_type, scan.validator_type)
|
|
|
|
return BackgroundTaskResponse(
|
|
id=scan.id,
|
|
task_type="code_quality_scan",
|
|
status=scan.status,
|
|
started_at=scan.started_at.isoformat() if scan.started_at else None,
|
|
completed_at=scan.completed_at.isoformat() if scan.completed_at else None,
|
|
duration_seconds=duration,
|
|
description=f"{validator_name} code quality scan",
|
|
triggered_by=scan.triggered_by,
|
|
error_message=scan.error_message,
|
|
details={
|
|
"validator_type": scan.validator_type,
|
|
"total_files": scan.total_files,
|
|
"total_violations": scan.total_violations,
|
|
"errors": scan.errors,
|
|
"warnings": scan.warnings,
|
|
"git_commit_hash": scan.git_commit_hash,
|
|
"progress_message": scan.progress_message,
|
|
},
|
|
celery_task_id=getattr(scan, "celery_task_id", None),
|
|
)
|
|
|
|
|
|
@admin_tasks_router.get("", response_model=list[BackgroundTaskResponse])
|
|
async def list_background_tasks(
|
|
status: str | None = Query(None, description="Filter by status"),
|
|
task_type: str | None = Query(
|
|
None, description="Filter by type (import, test_run, code_quality_scan)"
|
|
),
|
|
limit: int = Query(50, ge=1, le=200),
|
|
db: Session = Depends(get_db),
|
|
current_user: UserContext = Depends(get_current_admin_api),
|
|
):
|
|
"""
|
|
List all background tasks across the system
|
|
|
|
Returns a unified view of import jobs, test runs, and code quality scans.
|
|
"""
|
|
tasks = []
|
|
|
|
# Get import jobs
|
|
if task_type is None or task_type == "import":
|
|
import_jobs = background_tasks_service.get_import_jobs(
|
|
db, status=status, limit=limit
|
|
)
|
|
tasks.extend([_convert_import_to_response(job) for job in import_jobs])
|
|
|
|
# Get test runs
|
|
if task_type is None or task_type == "test_run":
|
|
test_runs = background_tasks_service.get_test_runs(
|
|
db, status=status, limit=limit
|
|
)
|
|
tasks.extend([_convert_test_run_to_response(run) for run in test_runs])
|
|
|
|
# Get code quality scans
|
|
if task_type is None or task_type == "code_quality_scan":
|
|
scans = background_tasks_service.get_code_quality_scans(
|
|
db, status=status, limit=limit
|
|
)
|
|
tasks.extend([_convert_scan_to_response(scan) for scan in scans])
|
|
|
|
# Sort by start time (most recent first)
|
|
tasks.sort(
|
|
key=lambda t: t.started_at or "1970-01-01T00:00:00",
|
|
reverse=True,
|
|
)
|
|
|
|
return tasks[:limit]
|
|
|
|
|
|
@admin_tasks_router.get("/stats", response_model=BackgroundTasksStatsResponse)
|
|
async def get_background_tasks_stats(
|
|
db: Session = Depends(get_db),
|
|
current_user: UserContext = Depends(get_current_admin_api),
|
|
):
|
|
"""
|
|
Get statistics for background tasks
|
|
"""
|
|
import_stats = background_tasks_service.get_import_stats(db)
|
|
test_stats = background_tasks_service.get_test_run_stats(db)
|
|
scan_stats = background_tasks_service.get_scan_stats(db)
|
|
|
|
# Combined stats
|
|
total_running = (
|
|
import_stats["running"] + test_stats["running"] + scan_stats["running"]
|
|
)
|
|
total_completed = (
|
|
import_stats["completed"] + test_stats["completed"] + scan_stats["completed"]
|
|
)
|
|
total_failed = (
|
|
import_stats["failed"] + test_stats["failed"] + scan_stats["failed"]
|
|
)
|
|
total_tasks = import_stats["total"] + test_stats["total"] + scan_stats["total"]
|
|
tasks_today = import_stats["today"] + test_stats["today"] + scan_stats["today"]
|
|
|
|
return BackgroundTasksStatsResponse(
|
|
total_tasks=total_tasks,
|
|
running=total_running,
|
|
completed=total_completed,
|
|
failed=total_failed,
|
|
tasks_today=tasks_today,
|
|
avg_duration_seconds=test_stats.get("avg_duration"),
|
|
import_jobs=import_stats,
|
|
test_runs=test_stats,
|
|
code_quality_scans=scan_stats,
|
|
)
|
|
|
|
|
|
@admin_tasks_router.get("/running", response_model=list[BackgroundTaskResponse])
|
|
async def list_running_tasks(
|
|
db: Session = Depends(get_db),
|
|
current_user: UserContext = Depends(get_current_admin_api),
|
|
):
|
|
"""
|
|
List currently running background tasks
|
|
"""
|
|
tasks = []
|
|
|
|
# Running imports
|
|
running_imports = background_tasks_service.get_running_imports(db)
|
|
tasks.extend([_convert_import_to_response(job) for job in running_imports])
|
|
|
|
# Running test runs
|
|
running_tests = background_tasks_service.get_running_test_runs(db)
|
|
tasks.extend([_convert_test_run_to_response(run) for run in running_tests])
|
|
|
|
# Running code quality scans
|
|
running_scans = background_tasks_service.get_running_scans(db)
|
|
tasks.extend([_convert_scan_to_response(scan) for scan in running_scans])
|
|
|
|
return tasks
|