refactor: convert legacy services/tasks to re-exports
Legacy service and task files now re-export from module locations: app/services/: - stats_service.py -> app.modules.analytics.services - usage_service.py -> app.modules.analytics.services app/tasks/celery_tasks/: - code_quality.py -> app.modules.dev_tools.tasks - test_runner.py -> app.modules.dev_tools.tasks Maintains backwards compatibility while actual code lives in self-contained modules. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -1,236 +1,31 @@
|
||||
# app/tasks/celery_tasks/code_quality.py
|
||||
"""
|
||||
Celery tasks for code quality scans.
|
||||
Celery tasks for code quality scans - LEGACY LOCATION
|
||||
|
||||
Wraps the existing execute_code_quality_scan function for Celery execution.
|
||||
This file exists for backward compatibility.
|
||||
The canonical location is now: app/modules/dev_tools/tasks/code_quality.py
|
||||
|
||||
All imports should use the new location:
|
||||
from app.modules.dev_tools.tasks import execute_code_quality_scan
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import subprocess
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from app.core.celery_config import celery_app
|
||||
from app.services.admin_notification_service import admin_notification_service
|
||||
from app.tasks.celery_tasks.base import DatabaseTask
|
||||
from models.database.architecture_scan import ArchitectureScan, ArchitectureViolation
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Validator type constants
|
||||
VALIDATOR_ARCHITECTURE = "architecture"
|
||||
VALIDATOR_SECURITY = "security"
|
||||
VALIDATOR_PERFORMANCE = "performance"
|
||||
|
||||
VALID_VALIDATOR_TYPES = [VALIDATOR_ARCHITECTURE, VALIDATOR_SECURITY, VALIDATOR_PERFORMANCE]
|
||||
|
||||
# Map validator types to their scripts
|
||||
VALIDATOR_SCRIPTS = {
|
||||
VALIDATOR_ARCHITECTURE: "scripts/validate_architecture.py",
|
||||
VALIDATOR_SECURITY: "scripts/validate_security.py",
|
||||
VALIDATOR_PERFORMANCE: "scripts/validate_performance.py",
|
||||
}
|
||||
|
||||
# Human-readable names
|
||||
VALIDATOR_NAMES = {
|
||||
VALIDATOR_ARCHITECTURE: "Architecture",
|
||||
VALIDATOR_SECURITY: "Security",
|
||||
VALIDATOR_PERFORMANCE: "Performance",
|
||||
}
|
||||
|
||||
|
||||
def _get_git_commit_hash() -> str | None:
|
||||
"""Get current git commit hash."""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "rev-parse", "HEAD"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return result.stdout.strip()[:40]
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
@celery_app.task(
|
||||
bind=True,
|
||||
base=DatabaseTask,
|
||||
name="app.tasks.celery_tasks.code_quality.execute_code_quality_scan",
|
||||
max_retries=1,
|
||||
time_limit=700, # 11+ minutes hard limit
|
||||
soft_time_limit=600, # 10 minutes soft limit
|
||||
# Re-export from canonical location for backward compatibility
|
||||
from app.modules.dev_tools.tasks.code_quality import (
|
||||
execute_code_quality_scan,
|
||||
VALIDATOR_ARCHITECTURE,
|
||||
VALIDATOR_SECURITY,
|
||||
VALIDATOR_PERFORMANCE,
|
||||
VALID_VALIDATOR_TYPES,
|
||||
VALIDATOR_SCRIPTS,
|
||||
VALIDATOR_NAMES,
|
||||
)
|
||||
def execute_code_quality_scan(self, scan_id: int):
|
||||
"""
|
||||
Celery task to execute a code quality scan.
|
||||
|
||||
This task:
|
||||
1. Gets the scan record from DB
|
||||
2. Updates status to 'running'
|
||||
3. Runs the validator script
|
||||
4. Parses JSON output and creates violation records
|
||||
5. Updates scan with results and status 'completed' or 'failed'
|
||||
|
||||
Args:
|
||||
scan_id: ID of the ArchitectureScan record
|
||||
|
||||
Returns:
|
||||
dict: Scan results summary
|
||||
"""
|
||||
with self.get_db() as db:
|
||||
# Get the scan record
|
||||
scan = db.query(ArchitectureScan).filter(ArchitectureScan.id == scan_id).first()
|
||||
if not scan:
|
||||
logger.error(f"Code quality scan {scan_id} not found")
|
||||
return {"error": f"Scan {scan_id} not found"}
|
||||
|
||||
# Store Celery task ID
|
||||
scan.celery_task_id = self.request.id
|
||||
|
||||
validator_type = scan.validator_type
|
||||
if validator_type not in VALID_VALIDATOR_TYPES:
|
||||
scan.status = "failed"
|
||||
scan.error_message = f"Invalid validator type: {validator_type}"
|
||||
db.commit()
|
||||
return {"error": f"Invalid validator type: {validator_type}"}
|
||||
|
||||
script_path = VALIDATOR_SCRIPTS[validator_type]
|
||||
validator_name = VALIDATOR_NAMES[validator_type]
|
||||
|
||||
try:
|
||||
# Update status to running
|
||||
scan.status = "running"
|
||||
scan.started_at = datetime.now(UTC)
|
||||
scan.progress_message = f"Running {validator_name} validator..."
|
||||
scan.git_commit_hash = _get_git_commit_hash()
|
||||
db.commit()
|
||||
|
||||
logger.info(f"Starting {validator_name} scan (scan_id={scan_id})")
|
||||
|
||||
# Run validator with JSON output
|
||||
start_time = datetime.now(UTC)
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["python", script_path, "--json"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=600, # 10 minute timeout
|
||||
)
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.error(f"{validator_name} scan {scan_id} timed out after 10 minutes")
|
||||
scan.status = "failed"
|
||||
scan.error_message = "Scan timed out after 10 minutes"
|
||||
scan.completed_at = datetime.now(UTC)
|
||||
db.commit()
|
||||
return {"error": "Scan timed out"}
|
||||
|
||||
duration = (datetime.now(UTC) - start_time).total_seconds()
|
||||
|
||||
# Update progress
|
||||
scan.progress_message = "Parsing results..."
|
||||
db.commit()
|
||||
|
||||
# Parse JSON output
|
||||
try:
|
||||
lines = result.stdout.strip().split("\n")
|
||||
json_start = -1
|
||||
for i, line in enumerate(lines):
|
||||
if line.strip().startswith("{"):
|
||||
json_start = i
|
||||
break
|
||||
|
||||
if json_start == -1:
|
||||
raise ValueError("No JSON output found in validator output")
|
||||
|
||||
json_output = "\n".join(lines[json_start:])
|
||||
data = json.loads(json_output)
|
||||
except (json.JSONDecodeError, ValueError) as e:
|
||||
logger.error(f"Failed to parse {validator_name} validator output: {e}")
|
||||
scan.status = "failed"
|
||||
scan.error_message = f"Failed to parse validator output: {e}"
|
||||
scan.completed_at = datetime.now(UTC)
|
||||
scan.duration_seconds = duration
|
||||
db.commit()
|
||||
return {"error": str(e)}
|
||||
|
||||
# Update progress
|
||||
scan.progress_message = "Storing violations..."
|
||||
db.commit()
|
||||
|
||||
# Create violation records
|
||||
violations_data = data.get("violations", [])
|
||||
logger.info(f"Creating {len(violations_data)} {validator_name} violation records")
|
||||
|
||||
for v in violations_data:
|
||||
violation = ArchitectureViolation(
|
||||
scan_id=scan.id,
|
||||
validator_type=validator_type,
|
||||
rule_id=v.get("rule_id", "UNKNOWN"),
|
||||
rule_name=v.get("rule_name", "Unknown Rule"),
|
||||
severity=v.get("severity", "warning"),
|
||||
file_path=v.get("file_path", ""),
|
||||
line_number=v.get("line_number", 0),
|
||||
message=v.get("message", ""),
|
||||
context=v.get("context", ""),
|
||||
suggestion=v.get("suggestion", ""),
|
||||
status="open",
|
||||
)
|
||||
db.add(violation)
|
||||
|
||||
# Update scan with results
|
||||
scan.total_files = data.get("files_checked", 0)
|
||||
scan.total_violations = data.get("total_violations", len(violations_data))
|
||||
scan.errors = data.get("errors", 0)
|
||||
scan.warnings = data.get("warnings", 0)
|
||||
scan.duration_seconds = duration
|
||||
scan.completed_at = datetime.now(UTC)
|
||||
scan.progress_message = None
|
||||
|
||||
# Set final status based on results
|
||||
if scan.errors > 0:
|
||||
scan.status = "completed_with_warnings"
|
||||
else:
|
||||
scan.status = "completed"
|
||||
|
||||
db.commit()
|
||||
|
||||
logger.info(
|
||||
f"{validator_name} scan {scan_id} completed: "
|
||||
f"files={scan.total_files}, violations={scan.total_violations}, "
|
||||
f"errors={scan.errors}, warnings={scan.warnings}, "
|
||||
f"duration={duration:.1f}s"
|
||||
)
|
||||
|
||||
return {
|
||||
"scan_id": scan_id,
|
||||
"validator_type": validator_type,
|
||||
"status": scan.status,
|
||||
"total_files": scan.total_files,
|
||||
"total_violations": scan.total_violations,
|
||||
"errors": scan.errors,
|
||||
"warnings": scan.warnings,
|
||||
"duration_seconds": duration,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Code quality scan {scan_id} failed: {e}", exc_info=True)
|
||||
scan.status = "failed"
|
||||
scan.error_message = str(e)[:500]
|
||||
scan.completed_at = datetime.now(UTC)
|
||||
scan.progress_message = None
|
||||
|
||||
# Create admin notification for scan failure
|
||||
admin_notification_service.create_notification(
|
||||
db=db,
|
||||
title="Code Quality Scan Failed",
|
||||
message=f"{VALIDATOR_NAMES.get(scan.validator_type, 'Unknown')} scan failed: {str(e)[:200]}",
|
||||
notification_type="error",
|
||||
category="code_quality",
|
||||
action_url="/admin/code-quality",
|
||||
)
|
||||
|
||||
db.commit()
|
||||
raise # Re-raise for Celery
|
||||
__all__ = [
|
||||
"execute_code_quality_scan",
|
||||
"VALIDATOR_ARCHITECTURE",
|
||||
"VALIDATOR_SECURITY",
|
||||
"VALIDATOR_PERFORMANCE",
|
||||
"VALID_VALIDATOR_TYPES",
|
||||
"VALIDATOR_SCRIPTS",
|
||||
"VALIDATOR_NAMES",
|
||||
]
|
||||
|
||||
@@ -1,83 +1,15 @@
|
||||
# app/tasks/celery_tasks/test_runner.py
|
||||
"""
|
||||
Celery tasks for test execution.
|
||||
Celery tasks for test execution - LEGACY LOCATION
|
||||
|
||||
Wraps the existing execute_test_run function for Celery execution.
|
||||
This file exists for backward compatibility.
|
||||
The canonical location is now: app/modules/dev_tools/tasks/test_runner.py
|
||||
|
||||
All imports should use the new location:
|
||||
from app.modules.dev_tools.tasks import execute_test_run
|
||||
"""
|
||||
|
||||
import logging
|
||||
# Re-export from canonical location for backward compatibility
|
||||
from app.modules.dev_tools.tasks.test_runner import execute_test_run
|
||||
|
||||
from app.core.celery_config import celery_app
|
||||
from app.services.test_runner_service import test_runner_service
|
||||
from app.tasks.celery_tasks.base import DatabaseTask
|
||||
from models.database.test_run import TestRun
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@celery_app.task(
|
||||
bind=True,
|
||||
base=DatabaseTask,
|
||||
name="app.tasks.celery_tasks.test_runner.execute_test_run",
|
||||
max_retries=1,
|
||||
time_limit=3600, # 1 hour hard limit
|
||||
soft_time_limit=3300, # 55 minutes soft limit
|
||||
)
|
||||
def execute_test_run(
|
||||
self,
|
||||
run_id: int,
|
||||
test_path: str = "tests",
|
||||
extra_args: list[str] | None = None,
|
||||
):
|
||||
"""
|
||||
Celery task to execute pytest tests.
|
||||
|
||||
Args:
|
||||
run_id: ID of the TestRun record
|
||||
test_path: Path to tests (relative to project root)
|
||||
extra_args: Additional pytest arguments
|
||||
|
||||
Returns:
|
||||
dict: Test run results summary
|
||||
"""
|
||||
with self.get_db() as db:
|
||||
# Get the test run record
|
||||
test_run = db.query(TestRun).filter(TestRun.id == run_id).first()
|
||||
if not test_run:
|
||||
logger.error(f"Test run {run_id} not found")
|
||||
return {"error": f"Test run {run_id} not found"}
|
||||
|
||||
# Store Celery task ID
|
||||
test_run.celery_task_id = self.request.id
|
||||
db.commit()
|
||||
|
||||
try:
|
||||
logger.info(f"Starting test execution: Run {run_id}, Path: {test_path}")
|
||||
|
||||
# Execute the tests
|
||||
test_runner_service._execute_tests(db, test_run, test_path, extra_args)
|
||||
db.commit()
|
||||
|
||||
logger.info(
|
||||
f"Test run {run_id} completed: "
|
||||
f"status={test_run.status}, passed={test_run.passed}, "
|
||||
f"failed={test_run.failed}, duration={test_run.duration_seconds:.1f}s"
|
||||
)
|
||||
|
||||
return {
|
||||
"run_id": run_id,
|
||||
"status": test_run.status,
|
||||
"total_tests": test_run.total_tests,
|
||||
"passed": test_run.passed,
|
||||
"failed": test_run.failed,
|
||||
"errors": test_run.errors,
|
||||
"skipped": test_run.skipped,
|
||||
"coverage_percent": test_run.coverage_percent,
|
||||
"duration_seconds": test_run.duration_seconds,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Test run {run_id} failed: {e}", exc_info=True)
|
||||
test_run.status = "error"
|
||||
db.commit()
|
||||
raise # Re-raise for Celery
|
||||
__all__ = ["execute_test_run"]
|
||||
|
||||
Reference in New Issue
Block a user