feat: implement background task architecture for code quality scans
- Add status fields to ArchitectureScan model (status, started_at, completed_at, error_message, progress_message) - Create database migration for new status fields - Create background task function execute_code_quality_scan() - Update API to return 202 with job IDs and support polling - Add code quality scans to unified BackgroundTasksService - Integrate scans into background tasks API and page - Implement frontend polling with 3-second interval - Add progress banner showing scan status - Users can navigate away while scans run in background - Document the implementation in architecture docs 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -22,7 +22,7 @@ class BackgroundTaskResponse(BaseModel):
|
||||
"""Unified background task response"""
|
||||
|
||||
id: int
|
||||
task_type: str # 'import' or 'test_run'
|
||||
task_type: str # 'import', 'test_run', or 'code_quality_scan'
|
||||
status: str
|
||||
started_at: str | None
|
||||
completed_at: str | None
|
||||
@@ -46,6 +46,7 @@ class BackgroundTasksStatsResponse(BaseModel):
|
||||
# By type
|
||||
import_jobs: dict
|
||||
test_runs: dict
|
||||
code_quality_scans: dict
|
||||
|
||||
|
||||
def _convert_import_to_response(job) -> BackgroundTaskResponse:
|
||||
@@ -107,11 +108,47 @@ def _convert_test_run_to_response(run) -> BackgroundTaskResponse:
|
||||
)
|
||||
|
||||
|
||||
def _convert_scan_to_response(scan) -> BackgroundTaskResponse:
|
||||
"""Convert ArchitectureScan to BackgroundTaskResponse"""
|
||||
duration = scan.duration_seconds
|
||||
if scan.status in ["pending", "running"] and scan.started_at:
|
||||
duration = (datetime.now(UTC) - scan.started_at).total_seconds()
|
||||
|
||||
# Map validator type to human-readable name
|
||||
validator_names = {
|
||||
"architecture": "Architecture",
|
||||
"security": "Security",
|
||||
"performance": "Performance",
|
||||
}
|
||||
validator_name = validator_names.get(scan.validator_type, scan.validator_type)
|
||||
|
||||
return BackgroundTaskResponse(
|
||||
id=scan.id,
|
||||
task_type="code_quality_scan",
|
||||
status=scan.status,
|
||||
started_at=scan.started_at.isoformat() if scan.started_at else None,
|
||||
completed_at=scan.completed_at.isoformat() if scan.completed_at else None,
|
||||
duration_seconds=duration,
|
||||
description=f"{validator_name} code quality scan",
|
||||
triggered_by=scan.triggered_by,
|
||||
error_message=scan.error_message,
|
||||
details={
|
||||
"validator_type": scan.validator_type,
|
||||
"total_files": scan.total_files,
|
||||
"total_violations": scan.total_violations,
|
||||
"errors": scan.errors,
|
||||
"warnings": scan.warnings,
|
||||
"git_commit_hash": scan.git_commit_hash,
|
||||
"progress_message": scan.progress_message,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/tasks", response_model=list[BackgroundTaskResponse])
|
||||
async def list_background_tasks(
|
||||
status: str | None = Query(None, description="Filter by status"),
|
||||
task_type: str | None = Query(
|
||||
None, description="Filter by type (import, test_run)"
|
||||
None, description="Filter by type (import, test_run, code_quality_scan)"
|
||||
),
|
||||
limit: int = Query(50, ge=1, le=200),
|
||||
db: Session = Depends(get_db),
|
||||
@@ -120,7 +157,7 @@ async def list_background_tasks(
|
||||
"""
|
||||
List all background tasks across the system
|
||||
|
||||
Returns a unified view of import jobs and test runs.
|
||||
Returns a unified view of import jobs, test runs, and code quality scans.
|
||||
"""
|
||||
tasks = []
|
||||
|
||||
@@ -138,6 +175,13 @@ async def list_background_tasks(
|
||||
)
|
||||
tasks.extend([_convert_test_run_to_response(run) for run in test_runs])
|
||||
|
||||
# Get code quality scans
|
||||
if task_type is None or task_type == "code_quality_scan":
|
||||
scans = background_tasks_service.get_code_quality_scans(
|
||||
db, status=status, limit=limit
|
||||
)
|
||||
tasks.extend([_convert_scan_to_response(scan) for scan in scans])
|
||||
|
||||
# Sort by start time (most recent first)
|
||||
tasks.sort(
|
||||
key=lambda t: t.started_at or "1970-01-01T00:00:00",
|
||||
@@ -157,22 +201,31 @@ async def get_background_tasks_stats(
|
||||
"""
|
||||
import_stats = background_tasks_service.get_import_stats(db)
|
||||
test_stats = background_tasks_service.get_test_run_stats(db)
|
||||
scan_stats = background_tasks_service.get_scan_stats(db)
|
||||
|
||||
# Combined stats
|
||||
total_running = import_stats["running"] + test_stats["running"]
|
||||
total_completed = import_stats["completed"] + test_stats["completed"]
|
||||
total_failed = import_stats["failed"] + test_stats["failed"]
|
||||
total_tasks = import_stats["total"] + test_stats["total"]
|
||||
total_running = (
|
||||
import_stats["running"] + test_stats["running"] + scan_stats["running"]
|
||||
)
|
||||
total_completed = (
|
||||
import_stats["completed"] + test_stats["completed"] + scan_stats["completed"]
|
||||
)
|
||||
total_failed = (
|
||||
import_stats["failed"] + test_stats["failed"] + scan_stats["failed"]
|
||||
)
|
||||
total_tasks = import_stats["total"] + test_stats["total"] + scan_stats["total"]
|
||||
tasks_today = import_stats["today"] + test_stats["today"] + scan_stats["today"]
|
||||
|
||||
return BackgroundTasksStatsResponse(
|
||||
total_tasks=total_tasks,
|
||||
running=total_running,
|
||||
completed=total_completed,
|
||||
failed=total_failed,
|
||||
tasks_today=import_stats["today"] + test_stats["today"],
|
||||
tasks_today=tasks_today,
|
||||
avg_duration_seconds=test_stats.get("avg_duration"),
|
||||
import_jobs=import_stats,
|
||||
test_runs=test_stats,
|
||||
code_quality_scans=scan_stats,
|
||||
)
|
||||
|
||||
|
||||
@@ -194,4 +247,8 @@ async def list_running_tasks(
|
||||
running_tests = background_tasks_service.get_running_test_runs(db)
|
||||
tasks.extend([_convert_test_run_to_response(run) for run in running_tests])
|
||||
|
||||
# Running code quality scans
|
||||
running_scans = background_tasks_service.get_running_scans(db)
|
||||
tasks.extend([_convert_scan_to_response(scan) for scan in running_scans])
|
||||
|
||||
return tasks
|
||||
|
||||
@@ -1,24 +1,42 @@
|
||||
"""
|
||||
Code Quality API Endpoints
|
||||
RESTful API for architecture validation and violation management
|
||||
RESTful API for code quality validation and violation management
|
||||
Supports multiple validator types: architecture, security, performance
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from datetime import UTC, datetime
|
||||
from enum import Enum
|
||||
|
||||
from fastapi import APIRouter, Depends, Query
|
||||
from fastapi import APIRouter, BackgroundTasks, Depends, Query
|
||||
from pydantic import BaseModel, Field
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.api.deps import get_current_admin_api
|
||||
from app.core.database import get_db
|
||||
from app.exceptions import ViolationNotFoundException
|
||||
from app.services.code_quality_service import code_quality_service
|
||||
from app.exceptions import ScanNotFoundException, ViolationNotFoundException
|
||||
from app.services.code_quality_service import (
|
||||
VALID_VALIDATOR_TYPES,
|
||||
code_quality_service,
|
||||
)
|
||||
from app.tasks.code_quality_tasks import execute_code_quality_scan
|
||||
from models.database.architecture_scan import ArchitectureScan
|
||||
from models.database.user import User
|
||||
from models.schema.stats import CodeQualityDashboardStatsResponse
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# Enums and Constants
|
||||
|
||||
|
||||
class ValidatorType(str, Enum):
|
||||
"""Supported validator types"""
|
||||
|
||||
ARCHITECTURE = "architecture"
|
||||
SECURITY = "security"
|
||||
PERFORMANCE = "performance"
|
||||
|
||||
|
||||
# Pydantic Models for API
|
||||
|
||||
|
||||
@@ -27,23 +45,65 @@ class ScanResponse(BaseModel):
|
||||
|
||||
id: int
|
||||
timestamp: str
|
||||
validator_type: str
|
||||
status: str
|
||||
started_at: str | None
|
||||
completed_at: str | None
|
||||
progress_message: str | None
|
||||
total_files: int
|
||||
total_violations: int
|
||||
errors: int
|
||||
warnings: int
|
||||
duration_seconds: float
|
||||
triggered_by: str
|
||||
triggered_by: str | None
|
||||
git_commit_hash: str | None
|
||||
error_message: str | None = None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class ScanRequest(BaseModel):
|
||||
"""Request model for triggering scans"""
|
||||
|
||||
validator_types: list[ValidatorType] = Field(
|
||||
default=[ValidatorType.ARCHITECTURE, ValidatorType.SECURITY, ValidatorType.PERFORMANCE],
|
||||
description="Validator types to run",
|
||||
)
|
||||
|
||||
|
||||
class ScanJobResponse(BaseModel):
|
||||
"""Response model for a queued scan job"""
|
||||
|
||||
id: int
|
||||
validator_type: str
|
||||
status: str
|
||||
message: str
|
||||
|
||||
|
||||
class MultiScanJobResponse(BaseModel):
|
||||
"""Response model for multiple queued scans (background task pattern)"""
|
||||
|
||||
scans: list[ScanJobResponse]
|
||||
message: str
|
||||
status_url: str
|
||||
|
||||
|
||||
class MultiScanResponse(BaseModel):
|
||||
"""Response model for completed scans (legacy sync pattern)"""
|
||||
|
||||
scans: list[ScanResponse]
|
||||
total_violations: int
|
||||
total_errors: int
|
||||
total_warnings: int
|
||||
|
||||
|
||||
class ViolationResponse(BaseModel):
|
||||
"""Response model for a violation"""
|
||||
|
||||
id: int
|
||||
scan_id: int
|
||||
validator_type: str
|
||||
rule_id: str
|
||||
rule_name: str
|
||||
severity: str
|
||||
@@ -111,37 +171,124 @@ class AddCommentRequest(BaseModel):
|
||||
# API Endpoints
|
||||
|
||||
|
||||
@router.post("/scan", response_model=ScanResponse)
|
||||
async def trigger_scan(
|
||||
db: Session = Depends(get_db), current_user: User = Depends(get_current_admin_api)
|
||||
):
|
||||
"""
|
||||
Trigger a new architecture scan
|
||||
|
||||
Requires authentication. Runs the validator script and stores results.
|
||||
Domain exceptions (ScanTimeoutException, ScanParseException) bubble up to global handler.
|
||||
"""
|
||||
scan = code_quality_service.run_scan(
|
||||
db, triggered_by=f"manual:{current_user.username}"
|
||||
)
|
||||
db.commit()
|
||||
|
||||
def _scan_to_response(scan: ArchitectureScan) -> ScanResponse:
|
||||
"""Convert ArchitectureScan to ScanResponse."""
|
||||
return ScanResponse(
|
||||
id=scan.id,
|
||||
timestamp=scan.timestamp.isoformat(),
|
||||
total_files=scan.total_files,
|
||||
total_violations=scan.total_violations,
|
||||
errors=scan.errors,
|
||||
warnings=scan.warnings,
|
||||
duration_seconds=scan.duration_seconds,
|
||||
timestamp=scan.timestamp.isoformat() if scan.timestamp else None,
|
||||
validator_type=scan.validator_type,
|
||||
status=scan.status,
|
||||
started_at=scan.started_at.isoformat() if scan.started_at else None,
|
||||
completed_at=scan.completed_at.isoformat() if scan.completed_at else None,
|
||||
progress_message=scan.progress_message,
|
||||
total_files=scan.total_files or 0,
|
||||
total_violations=scan.total_violations or 0,
|
||||
errors=scan.errors or 0,
|
||||
warnings=scan.warnings or 0,
|
||||
duration_seconds=scan.duration_seconds or 0.0,
|
||||
triggered_by=scan.triggered_by,
|
||||
git_commit_hash=scan.git_commit_hash,
|
||||
error_message=scan.error_message,
|
||||
)
|
||||
|
||||
|
||||
@router.post("/scan", response_model=MultiScanJobResponse, status_code=202)
|
||||
async def trigger_scan(
|
||||
request: ScanRequest = None,
|
||||
background_tasks: BackgroundTasks = None,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Trigger code quality scan(s) as background tasks.
|
||||
|
||||
By default runs all validators. Specify validator_types to run specific validators.
|
||||
Returns immediately with job IDs. Poll /scan/{scan_id}/status for progress.
|
||||
|
||||
Scans run asynchronously - users can browse other pages while scans execute.
|
||||
"""
|
||||
if request is None:
|
||||
request = ScanRequest()
|
||||
|
||||
scan_jobs = []
|
||||
triggered_by = f"manual:{current_user.username}"
|
||||
|
||||
for vtype in request.validator_types:
|
||||
# Create scan record with pending status
|
||||
scan = ArchitectureScan(
|
||||
timestamp=datetime.now(UTC),
|
||||
validator_type=vtype.value,
|
||||
status="pending",
|
||||
triggered_by=triggered_by,
|
||||
)
|
||||
db.add(scan)
|
||||
db.flush() # Get scan.id
|
||||
|
||||
# Queue background task
|
||||
background_tasks.add_task(execute_code_quality_scan, scan.id)
|
||||
|
||||
scan_jobs.append(
|
||||
ScanJobResponse(
|
||||
id=scan.id,
|
||||
validator_type=vtype.value,
|
||||
status="pending",
|
||||
message=f"{vtype.value.capitalize()} scan queued",
|
||||
)
|
||||
)
|
||||
|
||||
db.commit()
|
||||
|
||||
validator_names = ", ".join(vtype.value for vtype in request.validator_types)
|
||||
return MultiScanJobResponse(
|
||||
scans=scan_jobs,
|
||||
message=f"Scans queued for: {validator_names}",
|
||||
status_url="/admin/code-quality/scans/running",
|
||||
)
|
||||
|
||||
|
||||
@router.get("/scans/{scan_id}/status", response_model=ScanResponse)
|
||||
async def get_scan_status(
|
||||
scan_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get status of a specific scan.
|
||||
|
||||
Use this endpoint to poll for scan completion.
|
||||
"""
|
||||
scan = db.query(ArchitectureScan).filter(ArchitectureScan.id == scan_id).first()
|
||||
if not scan:
|
||||
raise ScanNotFoundException(scan_id)
|
||||
|
||||
return _scan_to_response(scan)
|
||||
|
||||
|
||||
@router.get("/scans/running", response_model=list[ScanResponse])
|
||||
async def get_running_scans(
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get all currently running scans.
|
||||
|
||||
Returns scans with status 'pending' or 'running'.
|
||||
"""
|
||||
scans = (
|
||||
db.query(ArchitectureScan)
|
||||
.filter(ArchitectureScan.status.in_(["pending", "running"]))
|
||||
.order_by(ArchitectureScan.timestamp.desc())
|
||||
.all()
|
||||
)
|
||||
return [_scan_to_response(scan) for scan in scans]
|
||||
|
||||
|
||||
@router.get("/scans", response_model=list[ScanResponse])
|
||||
async def list_scans(
|
||||
limit: int = Query(30, ge=1, le=100, description="Number of scans to return"),
|
||||
validator_type: ValidatorType | None = Query(
|
||||
None, description="Filter by validator type"
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
@@ -149,23 +296,13 @@ async def list_scans(
|
||||
Get scan history
|
||||
|
||||
Returns recent scans for trend analysis.
|
||||
Optionally filter by validator type.
|
||||
"""
|
||||
scans = code_quality_service.get_scan_history(db, limit=limit)
|
||||
scans = code_quality_service.get_scan_history(
|
||||
db, limit=limit, validator_type=validator_type.value if validator_type else None
|
||||
)
|
||||
|
||||
return [
|
||||
ScanResponse(
|
||||
id=scan.id,
|
||||
timestamp=scan.timestamp.isoformat(),
|
||||
total_files=scan.total_files,
|
||||
total_violations=scan.total_violations,
|
||||
errors=scan.errors,
|
||||
warnings=scan.warnings,
|
||||
duration_seconds=scan.duration_seconds,
|
||||
triggered_by=scan.triggered_by,
|
||||
git_commit_hash=scan.git_commit_hash,
|
||||
)
|
||||
for scan in scans
|
||||
]
|
||||
return [_scan_to_response(scan) for scan in scans]
|
||||
|
||||
|
||||
@router.get("/violations", response_model=ViolationListResponse)
|
||||
@@ -173,8 +310,11 @@ async def list_violations(
|
||||
scan_id: int | None = Query(
|
||||
None, description="Filter by scan ID (defaults to latest)"
|
||||
),
|
||||
validator_type: ValidatorType | None = Query(
|
||||
None, description="Filter by validator type"
|
||||
),
|
||||
severity: str | None = Query(
|
||||
None, description="Filter by severity (error, warning)"
|
||||
None, description="Filter by severity (error, warning, info)"
|
||||
),
|
||||
status: str | None = Query(
|
||||
None, description="Filter by status (open, assigned, resolved, ignored)"
|
||||
@@ -191,13 +331,15 @@ async def list_violations(
|
||||
"""
|
||||
Get violations with filtering and pagination
|
||||
|
||||
Returns violations from latest scan by default.
|
||||
Returns violations from latest scan(s) by default.
|
||||
Filter by validator_type to get violations from a specific validator.
|
||||
"""
|
||||
offset = (page - 1) * page_size
|
||||
|
||||
violations, total = code_quality_service.get_violations(
|
||||
db,
|
||||
scan_id=scan_id,
|
||||
validator_type=validator_type.value if validator_type else None,
|
||||
severity=severity,
|
||||
status=status,
|
||||
rule_id=rule_id,
|
||||
@@ -213,6 +355,7 @@ async def list_violations(
|
||||
ViolationResponse(
|
||||
id=v.id,
|
||||
scan_id=v.scan_id,
|
||||
validator_type=v.validator_type,
|
||||
rule_id=v.rule_id,
|
||||
rule_name=v.rule_name,
|
||||
severity=v.severity,
|
||||
@@ -280,6 +423,7 @@ async def get_violation(
|
||||
return ViolationDetailResponse(
|
||||
id=violation.id,
|
||||
scan_id=violation.scan_id,
|
||||
validator_type=violation.validator_type,
|
||||
rule_id=violation.rule_id,
|
||||
rule_name=violation.rule_name,
|
||||
severity=violation.severity,
|
||||
@@ -429,7 +573,11 @@ async def add_comment(
|
||||
|
||||
@router.get("/stats", response_model=CodeQualityDashboardStatsResponse)
|
||||
async def get_dashboard_stats(
|
||||
db: Session = Depends(get_db), current_user: User = Depends(get_current_admin_api)
|
||||
validator_type: ValidatorType | None = Query(
|
||||
None, description="Filter by validator type (returns combined stats if not specified)"
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get dashboard statistics
|
||||
@@ -440,7 +588,32 @@ async def get_dashboard_stats(
|
||||
- Trend data (last 7 scans)
|
||||
- Top violating files
|
||||
- Violations by rule and module
|
||||
- Per-validator breakdown
|
||||
|
||||
When validator_type is specified, returns stats for that type only.
|
||||
When not specified, returns combined stats across all validators.
|
||||
"""
|
||||
stats = code_quality_service.get_dashboard_stats(db)
|
||||
stats = code_quality_service.get_dashboard_stats(
|
||||
db, validator_type=validator_type.value if validator_type else None
|
||||
)
|
||||
|
||||
return CodeQualityDashboardStatsResponse(**stats)
|
||||
|
||||
|
||||
@router.get("/validator-types")
|
||||
async def get_validator_types(
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get list of available validator types
|
||||
|
||||
Returns the supported validator types for filtering.
|
||||
"""
|
||||
return {
|
||||
"validator_types": VALID_VALIDATOR_TYPES,
|
||||
"descriptions": {
|
||||
"architecture": "Architectural patterns and code organization rules",
|
||||
"security": "Security vulnerabilities and best practices",
|
||||
"performance": "Performance issues and optimizations",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ from datetime import UTC, datetime
|
||||
from sqlalchemy import case, desc, func
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from models.database.architecture_scan import ArchitectureScan
|
||||
from models.database.marketplace_import_job import MarketplaceImportJob
|
||||
from models.database.test_run import TestRun
|
||||
|
||||
@@ -124,6 +125,69 @@ class BackgroundTasksService:
|
||||
"avg_duration": round(stats.avg_duration or 0, 1),
|
||||
}
|
||||
|
||||
def get_code_quality_scans(
|
||||
self, db: Session, status: str | None = None, limit: int = 50
|
||||
) -> list[ArchitectureScan]:
|
||||
"""Get code quality scans with optional status filter"""
|
||||
query = db.query(ArchitectureScan)
|
||||
if status:
|
||||
query = query.filter(ArchitectureScan.status == status)
|
||||
return query.order_by(desc(ArchitectureScan.timestamp)).limit(limit).all()
|
||||
|
||||
def get_running_scans(self, db: Session) -> list[ArchitectureScan]:
|
||||
"""Get currently running code quality scans"""
|
||||
return (
|
||||
db.query(ArchitectureScan)
|
||||
.filter(ArchitectureScan.status.in_(["pending", "running"]))
|
||||
.all()
|
||||
)
|
||||
|
||||
def get_scan_stats(self, db: Session) -> dict:
|
||||
"""Get code quality scan statistics"""
|
||||
today_start = datetime.now(UTC).replace(
|
||||
hour=0, minute=0, second=0, microsecond=0
|
||||
)
|
||||
|
||||
stats = db.query(
|
||||
func.count(ArchitectureScan.id).label("total"),
|
||||
func.sum(
|
||||
case(
|
||||
(ArchitectureScan.status.in_(["pending", "running"]), 1), else_=0
|
||||
)
|
||||
).label("running"),
|
||||
func.sum(
|
||||
case(
|
||||
(
|
||||
ArchitectureScan.status.in_(
|
||||
["completed", "completed_with_warnings"]
|
||||
),
|
||||
1,
|
||||
),
|
||||
else_=0,
|
||||
)
|
||||
).label("completed"),
|
||||
func.sum(
|
||||
case((ArchitectureScan.status == "failed", 1), else_=0)
|
||||
).label("failed"),
|
||||
func.avg(ArchitectureScan.duration_seconds).label("avg_duration"),
|
||||
).first()
|
||||
|
||||
today_count = (
|
||||
db.query(func.count(ArchitectureScan.id))
|
||||
.filter(ArchitectureScan.timestamp >= today_start)
|
||||
.scalar()
|
||||
or 0
|
||||
)
|
||||
|
||||
return {
|
||||
"total": stats.total or 0,
|
||||
"running": stats.running or 0,
|
||||
"completed": stats.completed or 0,
|
||||
"failed": stats.failed or 0,
|
||||
"today": today_count,
|
||||
"avg_duration": round(stats.avg_duration or 0, 1),
|
||||
}
|
||||
|
||||
|
||||
# Singleton instance
|
||||
background_tasks_service = BackgroundTasksService()
|
||||
|
||||
217
app/tasks/code_quality_tasks.py
Normal file
217
app/tasks/code_quality_tasks.py
Normal file
@@ -0,0 +1,217 @@
|
||||
# app/tasks/code_quality_tasks.py
|
||||
"""Background tasks for code quality scans."""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import subprocess
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from app.core.database import SessionLocal
|
||||
from app.services.admin_notification_service import admin_notification_service
|
||||
from models.database.architecture_scan import ArchitectureScan, ArchitectureViolation
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Validator type constants
|
||||
VALIDATOR_ARCHITECTURE = "architecture"
|
||||
VALIDATOR_SECURITY = "security"
|
||||
VALIDATOR_PERFORMANCE = "performance"
|
||||
|
||||
VALID_VALIDATOR_TYPES = [VALIDATOR_ARCHITECTURE, VALIDATOR_SECURITY, VALIDATOR_PERFORMANCE]
|
||||
|
||||
# Map validator types to their scripts
|
||||
VALIDATOR_SCRIPTS = {
|
||||
VALIDATOR_ARCHITECTURE: "scripts/validate_architecture.py",
|
||||
VALIDATOR_SECURITY: "scripts/validate_security.py",
|
||||
VALIDATOR_PERFORMANCE: "scripts/validate_performance.py",
|
||||
}
|
||||
|
||||
# Human-readable names
|
||||
VALIDATOR_NAMES = {
|
||||
VALIDATOR_ARCHITECTURE: "Architecture",
|
||||
VALIDATOR_SECURITY: "Security",
|
||||
VALIDATOR_PERFORMANCE: "Performance",
|
||||
}
|
||||
|
||||
|
||||
def _get_git_commit_hash() -> str | None:
|
||||
"""Get current git commit hash"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "rev-parse", "HEAD"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return result.stdout.strip()[:40]
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
async def execute_code_quality_scan(scan_id: int):
|
||||
"""
|
||||
Background task to execute a code quality scan.
|
||||
|
||||
This task:
|
||||
1. Gets the scan record from DB
|
||||
2. Updates status to 'running'
|
||||
3. Runs the validator script
|
||||
4. Parses JSON output and creates violation records
|
||||
5. Updates scan with results and status 'completed' or 'failed'
|
||||
|
||||
Args:
|
||||
scan_id: ID of the ArchitectureScan record
|
||||
"""
|
||||
db = SessionLocal()
|
||||
scan = None
|
||||
|
||||
try:
|
||||
# Get the scan record
|
||||
scan = db.query(ArchitectureScan).filter(ArchitectureScan.id == scan_id).first()
|
||||
if not scan:
|
||||
logger.error(f"Code quality scan {scan_id} not found")
|
||||
return
|
||||
|
||||
validator_type = scan.validator_type
|
||||
if validator_type not in VALID_VALIDATOR_TYPES:
|
||||
raise ValueError(f"Invalid validator type: {validator_type}")
|
||||
|
||||
script_path = VALIDATOR_SCRIPTS[validator_type]
|
||||
validator_name = VALIDATOR_NAMES[validator_type]
|
||||
|
||||
# Update status to running
|
||||
scan.status = "running"
|
||||
scan.started_at = datetime.now(UTC)
|
||||
scan.progress_message = f"Running {validator_name} validator..."
|
||||
scan.git_commit_hash = _get_git_commit_hash()
|
||||
db.commit()
|
||||
|
||||
logger.info(f"Starting {validator_name} scan (scan_id={scan_id})")
|
||||
|
||||
# Run validator with JSON output
|
||||
start_time = datetime.now(UTC)
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["python", script_path, "--json"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=600, # 10 minute timeout
|
||||
)
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.error(f"{validator_name} scan {scan_id} timed out after 10 minutes")
|
||||
scan.status = "failed"
|
||||
scan.error_message = "Scan timed out after 10 minutes"
|
||||
scan.completed_at = datetime.now(UTC)
|
||||
db.commit()
|
||||
return
|
||||
|
||||
duration = (datetime.now(UTC) - start_time).total_seconds()
|
||||
|
||||
# Update progress
|
||||
scan.progress_message = "Parsing results..."
|
||||
db.commit()
|
||||
|
||||
# Parse JSON output (get only the JSON part, skip progress messages)
|
||||
try:
|
||||
lines = result.stdout.strip().split("\n")
|
||||
json_start = -1
|
||||
for i, line in enumerate(lines):
|
||||
if line.strip().startswith("{"):
|
||||
json_start = i
|
||||
break
|
||||
|
||||
if json_start == -1:
|
||||
raise ValueError("No JSON output found in validator output")
|
||||
|
||||
json_output = "\n".join(lines[json_start:])
|
||||
data = json.loads(json_output)
|
||||
except (json.JSONDecodeError, ValueError) as e:
|
||||
logger.error(f"Failed to parse {validator_name} validator output: {e}")
|
||||
logger.error(f"Stdout: {result.stdout[:1000]}")
|
||||
logger.error(f"Stderr: {result.stderr[:1000]}")
|
||||
scan.status = "failed"
|
||||
scan.error_message = f"Failed to parse validator output: {e}"
|
||||
scan.completed_at = datetime.now(UTC)
|
||||
scan.duration_seconds = duration
|
||||
db.commit()
|
||||
return
|
||||
|
||||
# Update progress
|
||||
scan.progress_message = "Storing violations..."
|
||||
db.commit()
|
||||
|
||||
# Create violation records
|
||||
violations_data = data.get("violations", [])
|
||||
logger.info(f"Creating {len(violations_data)} {validator_name} violation records")
|
||||
|
||||
for v in violations_data:
|
||||
violation = ArchitectureViolation(
|
||||
scan_id=scan.id,
|
||||
validator_type=validator_type,
|
||||
rule_id=v.get("rule_id", "UNKNOWN"),
|
||||
rule_name=v.get("rule_name", "Unknown Rule"),
|
||||
severity=v.get("severity", "warning"),
|
||||
file_path=v.get("file_path", ""),
|
||||
line_number=v.get("line_number", 0),
|
||||
message=v.get("message", ""),
|
||||
context=v.get("context", ""),
|
||||
suggestion=v.get("suggestion", ""),
|
||||
status="open",
|
||||
)
|
||||
db.add(violation)
|
||||
|
||||
# Update scan with results
|
||||
scan.total_files = data.get("files_checked", 0)
|
||||
scan.total_violations = data.get("total_violations", len(violations_data))
|
||||
scan.errors = data.get("errors", 0)
|
||||
scan.warnings = data.get("warnings", 0)
|
||||
scan.duration_seconds = duration
|
||||
scan.completed_at = datetime.now(UTC)
|
||||
scan.progress_message = None
|
||||
|
||||
# Set final status based on results
|
||||
if scan.errors > 0:
|
||||
scan.status = "completed_with_warnings"
|
||||
else:
|
||||
scan.status = "completed"
|
||||
|
||||
db.commit()
|
||||
|
||||
logger.info(
|
||||
f"{validator_name} scan {scan_id} completed: "
|
||||
f"files={scan.total_files}, violations={scan.total_violations}, "
|
||||
f"errors={scan.errors}, warnings={scan.warnings}, "
|
||||
f"duration={duration:.1f}s"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Code quality scan {scan_id} failed: {e}", exc_info=True)
|
||||
if scan is not None:
|
||||
try:
|
||||
scan.status = "failed"
|
||||
scan.error_message = str(e)[:500] # Truncate long errors
|
||||
scan.completed_at = datetime.now(UTC)
|
||||
scan.progress_message = None
|
||||
|
||||
# Create admin notification for scan failure
|
||||
admin_notification_service.create_notification(
|
||||
db=db,
|
||||
title="Code Quality Scan Failed",
|
||||
message=f"{VALIDATOR_NAMES.get(scan.validator_type, 'Unknown')} scan failed: {str(e)[:200]}",
|
||||
notification_type="error",
|
||||
category="code_quality",
|
||||
action_url="/admin/code-quality",
|
||||
)
|
||||
|
||||
db.commit()
|
||||
except Exception as commit_error:
|
||||
logger.error(f"Failed to update scan status: {commit_error}")
|
||||
db.rollback()
|
||||
finally:
|
||||
if hasattr(db, "close") and callable(db.close):
|
||||
try:
|
||||
db.close()
|
||||
except Exception as close_error:
|
||||
logger.error(f"Error closing database session: {close_error}")
|
||||
@@ -1,7 +1,7 @@
|
||||
{# app/templates/admin/code-quality-dashboard.html #}
|
||||
{% extends "admin/base.html" %}
|
||||
{% from 'shared/macros/alerts.html' import loading_state, error_state, alert_dynamic %}
|
||||
{% from 'shared/macros/headers.html' import page_header_flex, refresh_button, action_button %}
|
||||
{% from 'shared/macros/headers.html' import page_header_flex, refresh_button %}
|
||||
|
||||
{% block title %}Code Quality Dashboard{% endblock %}
|
||||
|
||||
@@ -12,9 +12,46 @@
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
{% call page_header_flex(title='Code Quality Dashboard', subtitle='Architecture validation and technical debt tracking') %}
|
||||
{% call page_header_flex(title='Code Quality Dashboard', subtitle='Unified code quality tracking: architecture, security, and performance') %}
|
||||
{{ refresh_button(variant='secondary') }}
|
||||
{{ action_button('Run Scan', 'Scanning...', 'scanning', 'runScan()', icon='search') }}
|
||||
<!-- Scan Dropdown -->
|
||||
<div x-data="{ scanDropdownOpen: false }" class="relative">
|
||||
<button @click="scanDropdownOpen = !scanDropdownOpen"
|
||||
:disabled="scanning"
|
||||
class="flex items-center px-4 py-2 text-sm font-medium leading-5 text-white transition-colors duration-150 bg-purple-600 border border-transparent rounded-lg hover:bg-purple-700 focus:outline-none focus:shadow-outline-purple disabled:opacity-50">
|
||||
<template x-if="!scanning">
|
||||
<span class="flex items-center">
|
||||
<span x-html="$icon('search', 'w-4 h-4 mr-2')"></span>
|
||||
Run Scan
|
||||
<span x-html="$icon('chevron-down', 'w-4 h-4 ml-1')"></span>
|
||||
</span>
|
||||
</template>
|
||||
<template x-if="scanning">
|
||||
<span>Scanning...</span>
|
||||
</template>
|
||||
</button>
|
||||
<div x-show="scanDropdownOpen"
|
||||
@click.away="scanDropdownOpen = false"
|
||||
x-transition
|
||||
class="absolute right-0 mt-2 w-48 bg-white dark:bg-gray-800 rounded-lg shadow-lg z-10 border border-gray-200 dark:border-gray-700">
|
||||
<button @click="runScan('all'); scanDropdownOpen = false"
|
||||
class="block w-full px-4 py-2 text-sm text-left text-gray-700 dark:text-gray-300 hover:bg-gray-100 dark:hover:bg-gray-700 rounded-t-lg">
|
||||
Run All Validators
|
||||
</button>
|
||||
<button @click="runScan('architecture'); scanDropdownOpen = false"
|
||||
class="block w-full px-4 py-2 text-sm text-left text-gray-700 dark:text-gray-300 hover:bg-gray-100 dark:hover:bg-gray-700">
|
||||
Architecture Only
|
||||
</button>
|
||||
<button @click="runScan('security'); scanDropdownOpen = false"
|
||||
class="block w-full px-4 py-2 text-sm text-left text-gray-700 dark:text-gray-300 hover:bg-gray-100 dark:hover:bg-gray-700">
|
||||
Security Only
|
||||
</button>
|
||||
<button @click="runScan('performance'); scanDropdownOpen = false"
|
||||
class="block w-full px-4 py-2 text-sm text-left text-gray-700 dark:text-gray-300 hover:bg-gray-100 dark:hover:bg-gray-700 rounded-b-lg">
|
||||
Performance Only
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
{% endcall %}
|
||||
|
||||
{{ loading_state('Loading dashboard...') }}
|
||||
@@ -23,8 +60,79 @@
|
||||
|
||||
{{ alert_dynamic(type='success', message_var='successMessage', show_condition='successMessage') }}
|
||||
|
||||
<!-- Scan Progress Alert -->
|
||||
<div x-show="scanning && scanProgress"
|
||||
x-transition
|
||||
class="flex items-center p-4 mb-4 text-sm text-blue-800 rounded-lg bg-blue-50 dark:bg-gray-800 dark:text-blue-400"
|
||||
role="alert">
|
||||
<svg class="animate-spin -ml-1 mr-3 h-5 w-5 text-blue-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24">
|
||||
<circle class="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" stroke-width="4"></circle>
|
||||
<path class="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path>
|
||||
</svg>
|
||||
<span x-text="scanProgress">Running scan...</span>
|
||||
<span class="ml-2 text-xs text-gray-500 dark:text-gray-400">(You can navigate away - scan runs in background)</span>
|
||||
</div>
|
||||
|
||||
<!-- Dashboard Content -->
|
||||
<div x-show="!loading && !error">
|
||||
<!-- Validator Type Tabs -->
|
||||
<div class="mb-6">
|
||||
<div class="flex flex-wrap space-x-1 bg-gray-100 dark:bg-gray-700 rounded-lg p-1 inline-flex">
|
||||
<button @click="selectValidator('all')"
|
||||
:class="selectedValidator === 'all' ? 'bg-white dark:bg-gray-800 text-purple-600 dark:text-purple-400 shadow-sm' : 'text-gray-600 dark:text-gray-400 hover:text-gray-800 dark:hover:text-gray-200'"
|
||||
class="px-4 py-2 rounded-md text-sm font-medium transition-colors duration-150">
|
||||
All
|
||||
</button>
|
||||
<button @click="selectValidator('architecture')"
|
||||
:class="selectedValidator === 'architecture' ? 'bg-white dark:bg-gray-800 text-purple-600 dark:text-purple-400 shadow-sm' : 'text-gray-600 dark:text-gray-400 hover:text-gray-800 dark:hover:text-gray-200'"
|
||||
class="px-4 py-2 rounded-md text-sm font-medium transition-colors duration-150">
|
||||
Architecture
|
||||
</button>
|
||||
<button @click="selectValidator('security')"
|
||||
:class="selectedValidator === 'security' ? 'bg-white dark:bg-gray-800 text-red-600 dark:text-red-400 shadow-sm' : 'text-gray-600 dark:text-gray-400 hover:text-gray-800 dark:hover:text-gray-200'"
|
||||
class="px-4 py-2 rounded-md text-sm font-medium transition-colors duration-150">
|
||||
Security
|
||||
</button>
|
||||
<button @click="selectValidator('performance')"
|
||||
:class="selectedValidator === 'performance' ? 'bg-white dark:bg-gray-800 text-yellow-600 dark:text-yellow-400 shadow-sm' : 'text-gray-600 dark:text-gray-400 hover:text-gray-800 dark:hover:text-gray-200'"
|
||||
class="px-4 py-2 rounded-md text-sm font-medium transition-colors duration-150">
|
||||
Performance
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Per-Validator Summary (shown when "All" is selected) -->
|
||||
<div x-show="selectedValidator === 'all' && stats.by_validator && Object.keys(stats.by_validator).length > 0" class="grid gap-4 mb-6 md:grid-cols-3">
|
||||
<template x-for="vtype in ['architecture', 'security', 'performance']" :key="vtype">
|
||||
<div class="p-4 bg-white rounded-lg shadow-xs dark:bg-gray-800 cursor-pointer hover:ring-2 hover:ring-purple-500"
|
||||
@click="selectValidator(vtype)">
|
||||
<div class="flex items-center justify-between">
|
||||
<div>
|
||||
<p class="text-sm font-medium text-gray-600 dark:text-gray-400 capitalize" x-text="vtype"></p>
|
||||
<p class="text-xl font-semibold text-gray-700 dark:text-gray-200"
|
||||
x-text="stats.by_validator[vtype]?.total_violations || 0"></p>
|
||||
</div>
|
||||
<div class="p-2 rounded-full"
|
||||
:class="{
|
||||
'bg-purple-100 text-purple-600 dark:bg-purple-900 dark:text-purple-400': vtype === 'architecture',
|
||||
'bg-red-100 text-red-600 dark:bg-red-900 dark:text-red-400': vtype === 'security',
|
||||
'bg-yellow-100 text-yellow-600 dark:bg-yellow-900 dark:text-yellow-400': vtype === 'performance'
|
||||
}">
|
||||
<span x-html="vtype === 'architecture' ? $icon('cube', 'w-5 h-5') : (vtype === 'security' ? $icon('shield-check', 'w-5 h-5') : $icon('lightning-bolt', 'w-5 h-5'))"></span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="mt-2 flex space-x-3 text-xs">
|
||||
<span class="text-red-600 dark:text-red-400">
|
||||
<span x-text="stats.by_validator[vtype]?.errors || 0"></span> errors
|
||||
</span>
|
||||
<span class="text-yellow-600 dark:text-yellow-400">
|
||||
<span x-text="stats.by_validator[vtype]?.warnings || 0"></span> warnings
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
|
||||
<!-- Stats Cards -->
|
||||
<div class="grid gap-6 mb-8 md:grid-cols-2 xl:grid-cols-4">
|
||||
<!-- Card: Total Violations -->
|
||||
@@ -192,7 +300,15 @@
|
||||
<template x-if="stats.by_rule && Object.keys(stats.by_rule).length > 0">
|
||||
<template x-for="[rule_id, count] in Object.entries(stats.by_rule)" :key="rule_id">
|
||||
<div class="flex justify-between items-center text-sm">
|
||||
<span class="text-gray-700 dark:text-gray-300" x-text="rule_id"></span>
|
||||
<span class="text-gray-700 dark:text-gray-300 flex items-center">
|
||||
<span class="inline-block w-2 h-2 rounded-full mr-2"
|
||||
:class="{
|
||||
'bg-purple-500': rule_id.startsWith('API') || rule_id.startsWith('SVC') || rule_id.startsWith('FE'),
|
||||
'bg-red-500': rule_id.startsWith('SEC'),
|
||||
'bg-yellow-500': rule_id.startsWith('PERF')
|
||||
}"></span>
|
||||
<span x-text="rule_id"></span>
|
||||
</span>
|
||||
<span class="font-semibold text-gray-900 dark:text-gray-100" x-text="count"></span>
|
||||
</div>
|
||||
</template>
|
||||
@@ -231,17 +347,17 @@
|
||||
Quick Actions
|
||||
</h4>
|
||||
<div class="flex flex-wrap gap-3">
|
||||
<a href="/admin/code-quality/violations"
|
||||
<a :href="'/admin/code-quality/violations' + (selectedValidator !== 'all' ? '?validator_type=' + selectedValidator : '')"
|
||||
class="flex items-center px-4 py-2 text-sm font-medium leading-5 text-white transition-colors duration-150 bg-purple-600 border border-transparent rounded-lg hover:bg-purple-700 focus:outline-none focus:shadow-outline-purple">
|
||||
<span x-html="$icon('clipboard-list', 'w-4 h-4 mr-2')"></span>
|
||||
View All Violations
|
||||
</a>
|
||||
<a href="/admin/code-quality/violations?status=open"
|
||||
<a :href="'/admin/code-quality/violations?status=open' + (selectedValidator !== 'all' ? '&validator_type=' + selectedValidator : '')"
|
||||
class="flex items-center px-4 py-2 text-sm font-medium leading-5 text-gray-700 dark:text-gray-300 transition-colors duration-150 bg-white dark:bg-gray-700 border border-gray-300 dark:border-gray-600 rounded-lg hover:bg-gray-50 dark:hover:bg-gray-600 focus:outline-none focus:shadow-outline-gray">
|
||||
<span x-html="$icon('folder-open', 'w-4 h-4 mr-2')"></span>
|
||||
Open Violations
|
||||
</a>
|
||||
<a href="/admin/code-quality/violations?severity=error"
|
||||
<a :href="'/admin/code-quality/violations?severity=error' + (selectedValidator !== 'all' ? '&validator_type=' + selectedValidator : '')"
|
||||
class="flex items-center px-4 py-2 text-sm font-medium leading-5 text-gray-700 dark:text-gray-300 transition-colors duration-150 bg-white dark:bg-gray-700 border border-gray-300 dark:border-gray-600 rounded-lg hover:bg-gray-50 dark:hover:bg-gray-600 focus:outline-none focus:shadow-outline-gray">
|
||||
<span x-html="$icon('exclamation', 'w-4 h-4 mr-2')"></span>
|
||||
Errors Only
|
||||
@@ -253,6 +369,9 @@
|
||||
<!-- Last Scan Info -->
|
||||
<div x-show="stats.last_scan" class="text-sm text-gray-600 dark:text-gray-400 text-center">
|
||||
Last scan: <span x-text="stats.last_scan ? new Date(stats.last_scan).toLocaleString() : 'Never'"></span>
|
||||
<template x-if="selectedValidator !== 'all'">
|
||||
<span class="ml-2">(<span class="capitalize" x-text="selectedValidator"></span> validator)</span>
|
||||
</template>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
|
||||
Reference in New Issue
Block a user