feat: complete dev_tools module self-containment

Migrate dev_tools module to self-contained structure:

- routes/api/ - API endpoints
- models/architecture_scan.py - Architecture scan models
- models/test_run.py - Test run models
- schemas/ - Pydantic schemas
- services/ - Business logic services
- tasks/ - Celery background tasks
- exceptions.py - Module exceptions

Updated definition.py with self-contained paths.

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-28 22:21:59 +01:00
parent 705d336e19
commit d987274e2c
14 changed files with 1028 additions and 19 deletions

View File

@@ -2,20 +2,45 @@
"""
Dev-Tools Module - Developer tools and utilities.
This module provides:
This is a self-contained internal module providing:
- Code quality scanning (architecture, security, performance validators)
- Violation tracking and assignment
- Test execution and results management
- Component library browser
- Icon browser
- Development utilities
Module Structure:
- models/ - Database models (ArchitectureScan, TestRun, etc.)
- services/ - Business logic (CodeQualityService, TestRunnerService)
- schemas/ - Pydantic DTOs
- tasks/ - Celery background tasks
- routes/ - API and page routes
- exceptions.py - Module-specific exceptions
Routes:
- Admin: (page routes only, minimal API)
- Vendor: None
- Admin: Code quality API, test runner API, component/icon pages
- Vendor: None (internal module)
Menu Items:
- Admin: components, icons
- Admin: components, icons, code-quality, tests
- Vendor: None
"""
from app.modules.dev_tools.definition import dev_tools_module
# Use lazy imports to avoid circular import issues when models are loaded early
# The definition imports from app.modules.base which eventually imports models
__all__ = ["dev_tools_module"]
def __getattr__(name: str):
"""Lazy import module components to avoid circular imports."""
if name == "dev_tools_module":
from app.modules.dev_tools.definition import dev_tools_module
return dev_tools_module
elif name == "get_dev_tools_module_with_routers":
from app.modules.dev_tools.definition import get_dev_tools_module_with_routers
return get_dev_tools_module_with_routers
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
__all__ = ["dev_tools_module", "get_dev_tools_module_with_routers"]

View File

@@ -3,35 +3,93 @@
Dev-Tools module definition.
Defines the dev-tools module including its features, menu items,
and route configurations.
route configurations, and task definitions.
Note: This module primarily provides page routes, not API routes.
Dev-Tools is an internal module providing:
- Code quality scanning (architecture, security, performance validators)
- Test execution and results management
- Component library browser
- Icon browser
"""
from app.modules.base import ModuleDefinition
from models.database.admin_menu_config import FrontendType
def _get_admin_router():
"""Lazy import of admin router to avoid circular imports."""
from app.modules.dev_tools.routes.api.admin import admin_router
return admin_router
# Dev-Tools module definition
dev_tools_module = ModuleDefinition(
code="dev-tools",
name="Developer Tools",
description="Component library and icon browser for development.",
description=(
"Internal development tools including code quality scanning, "
"test execution, component library, and icon browser."
),
version="1.0.0",
features=[
"component_library", # UI component browser
"icon_browser", # Icon library browser
"code_quality", # Code quality scanning
"architecture_validation", # Architecture validator
"security_validation", # Security validator
"performance_validation", # Performance validator
"test_runner", # Test execution
"violation_management", # Violation tracking and assignment
],
menu_items={
FrontendType.ADMIN: [
"components", # Component library page
"icons", # Icon browser page
"code-quality", # Code quality dashboard
"tests", # Test runner dashboard
],
FrontendType.VENDOR: [], # No vendor menu items
FrontendType.VENDOR: [], # No vendor menu items - internal module
},
is_core=False,
is_internal=True, # Internal module - admin-only, not customer-facing
# =========================================================================
# Self-Contained Module Configuration
# =========================================================================
is_self_contained=True,
services_path="app.modules.dev_tools.services",
models_path="app.modules.dev_tools.models",
schemas_path="app.modules.dev_tools.schemas",
exceptions_path="app.modules.dev_tools.exceptions",
tasks_path="app.modules.dev_tools.tasks",
# =========================================================================
# Scheduled Tasks
# =========================================================================
# Note: Code quality and test tasks are on-demand, not scheduled.
# If scheduled scans are desired, they can be added here:
# scheduled_tasks=[
# ScheduledTask(
# name="dev_tools.nightly_code_scan",
# task="app.modules.dev_tools.tasks.code_quality.execute_code_quality_scan",
# schedule="0 2 * * *", # Daily at 02:00
# options={"queue": "long_running"},
# ),
# ],
scheduled_tasks=[],
)
__all__ = ["dev_tools_module"]
def get_dev_tools_module_with_routers() -> ModuleDefinition:
"""
Get dev-tools module with routers attached.
This function attaches the routers lazily to avoid circular imports
during module initialization.
"""
dev_tools_module.admin_router = _get_admin_router()
# No vendor router for internal modules
dev_tools_module.vendor_router = None
return dev_tools_module
__all__ = ["dev_tools_module", "get_dev_tools_module_with_routers"]

View File

@@ -0,0 +1,79 @@
# app/modules/dev_tools/exceptions.py
"""
Dev-Tools Module Exceptions
Module-specific exceptions for code quality and test runner functionality.
Re-exports code quality exceptions from the legacy location and adds
test runner specific exceptions.
"""
from app.exceptions.base import (
ExternalServiceException,
ResourceNotFoundException,
)
# Re-export code quality exceptions from legacy location
# This avoids circular imports since app/exceptions/__init__.py imports code_quality.py
from app.exceptions.code_quality import (
ViolationNotFoundException,
ScanNotFoundException,
ScanExecutionException,
ScanTimeoutException,
ScanParseException,
ViolationOperationException,
InvalidViolationStatusException,
)
# =============================================================================
# Test Runner Exceptions (defined here as they don't exist in legacy location)
# =============================================================================
class TestRunNotFoundException(ResourceNotFoundException):
"""Raised when a test run is not found."""
def __init__(self, run_id: int):
super().__init__(
resource_type="TestRun",
identifier=str(run_id),
error_code="TEST_RUN_NOT_FOUND",
)
class TestExecutionException(ExternalServiceException):
"""Raised when test execution fails."""
def __init__(self, reason: str):
super().__init__(
service_name="TestRunner",
message=f"Test execution failed: {reason}",
error_code="TEST_EXECUTION_FAILED",
)
class TestTimeoutException(ExternalServiceException):
"""Raised when test execution times out."""
def __init__(self, timeout_seconds: int = 3600):
super().__init__(
service_name="TestRunner",
message=f"Test execution timed out after {timeout_seconds} seconds",
error_code="TEST_TIMEOUT",
)
__all__ = [
# Code quality exceptions (re-exported)
"ViolationNotFoundException",
"ScanNotFoundException",
"ScanExecutionException",
"ScanTimeoutException",
"ScanParseException",
"ViolationOperationException",
"InvalidViolationStatusException",
# Test runner exceptions (defined here)
"TestRunNotFoundException",
"TestExecutionException",
"TestTimeoutException",
]

View File

@@ -0,0 +1,45 @@
# app/modules/dev_tools/models/__init__.py
"""
Dev-Tools module database models.
This is the canonical location for dev-tools models. Module models are automatically
discovered and registered with SQLAlchemy's Base.metadata at startup.
Usage:
from app.modules.dev_tools.models import (
ArchitectureScan,
ArchitectureViolation,
ArchitectureRule,
ViolationAssignment,
ViolationComment,
TestRun,
TestResult,
TestCollection,
)
"""
from app.modules.dev_tools.models.architecture_scan import (
ArchitectureScan,
ArchitectureViolation,
ArchitectureRule,
ViolationAssignment,
ViolationComment,
)
from app.modules.dev_tools.models.test_run import (
TestRun,
TestResult,
TestCollection,
)
__all__ = [
# Architecture scan models
"ArchitectureScan",
"ArchitectureViolation",
"ArchitectureRule",
"ViolationAssignment",
"ViolationComment",
# Test run models
"TestRun",
"TestResult",
"TestCollection",
]

View File

@@ -0,0 +1,206 @@
# app/modules/dev_tools/models/architecture_scan.py
"""
Architecture Scan Models
Database models for tracking code quality scans and violations.
This is the canonical location - models are re-exported from the legacy location
for backward compatibility.
"""
from sqlalchemy import (
JSON,
Boolean,
Column,
DateTime,
Float,
ForeignKey,
Integer,
String,
Text,
)
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
from app.core.database import Base
class ArchitectureScan(Base):
"""Represents a single run of a code quality validator"""
__tablename__ = "architecture_scans"
id = Column(Integer, primary_key=True, index=True)
timestamp = Column(
DateTime(timezone=True), server_default=func.now(), nullable=False, index=True
)
validator_type = Column(
String(20), nullable=False, index=True, default="architecture"
) # 'architecture', 'security', 'performance'
# Background task status fields (harmonized architecture)
status = Column(
String(30), nullable=False, default="pending", index=True
) # 'pending', 'running', 'completed', 'failed', 'completed_with_warnings'
started_at = Column(DateTime(timezone=True), nullable=True)
completed_at = Column(DateTime(timezone=True), nullable=True)
error_message = Column(Text, nullable=True)
progress_message = Column(String(255), nullable=True) # Current step description
# Scan results
total_files = Column(Integer, default=0)
total_violations = Column(Integer, default=0)
errors = Column(Integer, default=0)
warnings = Column(Integer, default=0)
duration_seconds = Column(Float, default=0.0)
triggered_by = Column(String(100)) # 'manual:username', 'scheduled', 'ci/cd'
git_commit_hash = Column(String(40))
# Celery task tracking (optional - for USE_CELERY=true)
celery_task_id = Column(String(255), nullable=True, index=True)
# Relationship to violations
violations = relationship(
"ArchitectureViolation", back_populates="scan", cascade="all, delete-orphan"
)
def __repr__(self):
return f"<ArchitectureScan(id={self.id}, violations={self.total_violations}, errors={self.errors})>"
class ArchitectureViolation(Base):
"""Represents a single code quality violation found during a scan"""
__tablename__ = "architecture_violations"
id = Column(Integer, primary_key=True, index=True)
scan_id = Column(
Integer, ForeignKey("architecture_scans.id"), nullable=False, index=True
)
validator_type = Column(
String(20), nullable=False, index=True, default="architecture"
) # 'architecture', 'security', 'performance'
rule_id = Column(String(20), nullable=False, index=True) # e.g., 'API-001', 'SEC-001', 'PERF-001'
rule_name = Column(String(200), nullable=False)
severity = Column(
String(10), nullable=False, index=True
) # 'error', 'warning', 'info'
file_path = Column(String(500), nullable=False, index=True)
line_number = Column(Integer, nullable=False)
message = Column(Text, nullable=False)
context = Column(Text) # Code snippet
suggestion = Column(Text)
status = Column(
String(20), default="open", index=True
) # 'open', 'assigned', 'resolved', 'ignored', 'technical_debt'
assigned_to = Column(Integer, ForeignKey("users.id"))
resolved_at = Column(DateTime(timezone=True))
resolved_by = Column(Integer, ForeignKey("users.id"))
resolution_note = Column(Text)
created_at = Column(
DateTime(timezone=True), server_default=func.now(), nullable=False
)
# Relationships
scan = relationship("ArchitectureScan", back_populates="violations")
assigned_user = relationship(
"User", foreign_keys=[assigned_to], backref="assigned_violations"
)
resolver = relationship(
"User", foreign_keys=[resolved_by], backref="resolved_violations"
)
assignments = relationship(
"ViolationAssignment", back_populates="violation", cascade="all, delete-orphan"
)
comments = relationship(
"ViolationComment", back_populates="violation", cascade="all, delete-orphan"
)
def __repr__(self):
return f"<ArchitectureViolation(id={self.id}, rule={self.rule_id}, file={self.file_path}:{self.line_number})>"
class ArchitectureRule(Base):
"""Code quality rules configuration (from YAML with database overrides)"""
__tablename__ = "architecture_rules"
id = Column(Integer, primary_key=True, index=True)
rule_id = Column(
String(20), unique=True, nullable=False, index=True
) # e.g., 'API-001', 'SEC-001', 'PERF-001'
validator_type = Column(
String(20), nullable=False, index=True, default="architecture"
) # 'architecture', 'security', 'performance'
category = Column(
String(50), nullable=False
) # 'api_endpoint', 'service_layer', 'authentication', 'database', etc.
name = Column(String(200), nullable=False)
description = Column(Text)
severity = Column(String(10), nullable=False) # Can override default from YAML
enabled = Column(Boolean, default=True, nullable=False)
custom_config = Column(JSON) # For rule-specific settings
created_at = Column(
DateTime(timezone=True), server_default=func.now(), nullable=False
)
updated_at = Column(
DateTime(timezone=True),
server_default=func.now(),
onupdate=func.now(),
nullable=False,
)
def __repr__(self):
return f"<ArchitectureRule(id={self.rule_id}, name={self.name}, enabled={self.enabled})>"
class ViolationAssignment(Base):
"""Tracks assignment of violations to developers"""
__tablename__ = "violation_assignments"
id = Column(Integer, primary_key=True, index=True)
violation_id = Column(
Integer, ForeignKey("architecture_violations.id"), nullable=False, index=True
)
user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
assigned_at = Column(
DateTime(timezone=True), server_default=func.now(), nullable=False
)
assigned_by = Column(Integer, ForeignKey("users.id"))
due_date = Column(DateTime(timezone=True))
priority = Column(
String(10), default="medium"
) # 'low', 'medium', 'high', 'critical'
# Relationships
violation = relationship("ArchitectureViolation", back_populates="assignments")
user = relationship("User", foreign_keys=[user_id], backref="violation_assignments")
assigner = relationship(
"User", foreign_keys=[assigned_by], backref="assigned_by_me"
)
def __repr__(self):
return f"<ViolationAssignment(id={self.id}, violation_id={self.violation_id}, user_id={self.user_id})>"
class ViolationComment(Base):
"""Comments on violations for collaboration"""
__tablename__ = "violation_comments"
id = Column(Integer, primary_key=True, index=True)
violation_id = Column(
Integer, ForeignKey("architecture_violations.id"), nullable=False, index=True
)
user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
comment = Column(Text, nullable=False)
created_at = Column(
DateTime(timezone=True), server_default=func.now(), nullable=False
)
# Relationships
violation = relationship("ArchitectureViolation", back_populates="comments")
user = relationship("User", backref="violation_comments")
def __repr__(self):
return f"<ViolationComment(id={self.id}, violation_id={self.violation_id}, user_id={self.user_id})>"

View File

@@ -0,0 +1,151 @@
# app/modules/dev_tools/models/test_run.py
"""
Test Run Models
Database models for tracking pytest test runs and results.
This is the canonical location - models are re-exported from the legacy location
for backward compatibility.
"""
from sqlalchemy import (
JSON,
Column,
DateTime,
Float,
ForeignKey,
Integer,
String,
Text,
)
from sqlalchemy.orm import relationship
from sqlalchemy.sql import func
from app.core.database import Base
class TestRun(Base):
"""Represents a single pytest run"""
__tablename__ = "test_runs"
id = Column(Integer, primary_key=True, index=True)
timestamp = Column(
DateTime(timezone=True), server_default=func.now(), nullable=False, index=True
)
# Test counts
total_tests = Column(Integer, default=0)
passed = Column(Integer, default=0)
failed = Column(Integer, default=0)
errors = Column(Integer, default=0)
skipped = Column(Integer, default=0)
xfailed = Column(Integer, default=0) # Expected failures
xpassed = Column(Integer, default=0) # Unexpected passes
# Coverage info (optional)
coverage_percent = Column(Float, nullable=True)
# Timing
duration_seconds = Column(Float, default=0.0)
# Run metadata
triggered_by = Column(String(100)) # 'manual', 'scheduled', 'ci/cd'
git_commit_hash = Column(String(40))
git_branch = Column(String(100))
test_path = Column(String(500)) # Which tests were run (e.g., 'tests/unit')
pytest_args = Column(String(500)) # Command line arguments used
# Status
status = Column(
String(20), default="running", index=True
) # 'running', 'passed', 'failed', 'error'
# Celery task tracking (optional - for USE_CELERY=true)
celery_task_id = Column(String(255), nullable=True, index=True)
# Relationship to test results
results = relationship(
"TestResult", back_populates="run", cascade="all, delete-orphan"
)
def __repr__(self):
return f"<TestRun(id={self.id}, total={self.total_tests}, passed={self.passed}, failed={self.failed})>"
@property
def pass_rate(self) -> float:
"""Calculate pass rate as percentage"""
if self.total_tests == 0:
return 0.0
return (self.passed / self.total_tests) * 100
class TestResult(Base):
"""Represents a single test result from a pytest run"""
__tablename__ = "test_results"
id = Column(Integer, primary_key=True, index=True)
run_id = Column(Integer, ForeignKey("test_runs.id"), nullable=False, index=True)
# Test identification
node_id = Column(
String(500), nullable=False, index=True
) # e.g., 'tests/unit/test_foo.py::test_bar'
test_name = Column(String(200), nullable=False) # e.g., 'test_bar'
test_file = Column(String(300), nullable=False) # e.g., 'tests/unit/test_foo.py'
test_class = Column(String(200)) # e.g., 'TestFooClass' (optional)
# Result
outcome = Column(
String(20), nullable=False, index=True
) # 'passed', 'failed', 'error', 'skipped', 'xfailed', 'xpassed'
duration_seconds = Column(Float, default=0.0)
# Failure details (if applicable)
error_message = Column(Text)
traceback = Column(Text)
# Test metadata
markers = Column(JSON) # List of pytest markers
parameters = Column(JSON) # Parametrized test params
# Timestamps
created_at = Column(
DateTime(timezone=True), server_default=func.now(), nullable=False
)
# Relationships
run = relationship("TestRun", back_populates="results")
def __repr__(self):
return f"<TestResult(id={self.id}, node_id={self.node_id}, outcome={self.outcome})>"
class TestCollection(Base):
"""Cached test collection info for quick stats"""
__tablename__ = "test_collections"
id = Column(Integer, primary_key=True, index=True)
# Collection stats
total_tests = Column(Integer, default=0)
total_files = Column(Integer, default=0)
total_classes = Column(Integer, default=0)
# By category
unit_tests = Column(Integer, default=0)
integration_tests = Column(Integer, default=0)
performance_tests = Column(Integer, default=0)
system_tests = Column(Integer, default=0)
# Collection data
test_files = Column(JSON) # List of test files with counts
# Timestamps
collected_at = Column(
DateTime(timezone=True), server_default=func.now(), nullable=False
)
def __repr__(self):
return f"<TestCollection(id={self.id}, total={self.total_tests})>"

View File

@@ -2,14 +2,15 @@
"""
Dev-Tools module route registration.
This module provides functions to register dev-tools routes
with module-based access control.
This module provides dev-tools routes with module-based access control.
Note: Dev-Tools module has primarily page routes, not API routes.
The page routes are defined in admin/vendor page handlers.
Structure:
- routes/api/ - REST API endpoints (code quality, tests)
- routes/pages/ - HTML page rendering (component library, icons)
Note: Dev-tools is an internal module (admin-only), so there is no vendor router.
"""
# Dev-tools has minimal API routes - primarily page routes
# No auto-imports needed
from app.modules.dev_tools.routes.api import admin_router
__all__ = []
__all__ = ["admin_router"]

View File

@@ -0,0 +1,14 @@
# app/modules/dev_tools/routes/api/__init__.py
"""
Dev-Tools module API routes.
Provides REST API endpoints for code quality and test running:
- Admin API: Code quality scans, violations, test execution
Note: Dev-tools is an internal module, so there are no vendor routes.
Currently re-exports routes from the legacy location.
"""
from app.modules.dev_tools.routes.api.admin import admin_router
__all__ = ["admin_router"]

View File

@@ -0,0 +1,35 @@
# app/modules/dev_tools/routes/api/admin.py
"""
Dev-Tools Admin API Routes.
Provides admin-only API endpoints for:
- Code quality scanning (architecture, security, performance)
- Violation management
- Test execution
Note: This currently re-exports routes from legacy locations.
In future cleanup phases, the route implementations may be moved here.
"""
from fastapi import APIRouter
# Import the existing routers from legacy locations
from app.api.v1.admin.code_quality import router as code_quality_router
from app.api.v1.admin.tests import router as tests_router
# Create a combined admin router for the dev-tools module
admin_router = APIRouter(prefix="/dev-tools", tags=["dev-tools"])
# Include sub-routers
admin_router.include_router(
code_quality_router,
prefix="/code-quality",
tags=["code-quality"],
)
admin_router.include_router(
tests_router,
prefix="/tests",
tags=["tests"],
)
__all__ = ["admin_router"]

View File

@@ -0,0 +1,12 @@
# app/modules/dev_tools/schemas/__init__.py
"""
Dev-Tools module Pydantic schemas.
Schemas for API request/response serialization.
Currently re-exports from central location for backward compatibility.
"""
# Note: Dev-tools schemas are mostly inline in the API routes
# If dedicated schema files exist, they would be re-exported here
__all__ = []

View File

@@ -0,0 +1,42 @@
# app/modules/dev_tools/services/__init__.py
"""
Dev-Tools module services.
This module re-exports services from their current locations.
In future cleanup phases, the actual service implementations
may be moved here.
Services:
- code_quality_service: Code quality scanning and violation management
- test_runner_service: Test execution and results management
"""
from app.services.code_quality_service import (
code_quality_service,
CodeQualityService,
VALIDATOR_ARCHITECTURE,
VALIDATOR_SECURITY,
VALIDATOR_PERFORMANCE,
VALID_VALIDATOR_TYPES,
VALIDATOR_SCRIPTS,
VALIDATOR_NAMES,
)
from app.services.test_runner_service import (
test_runner_service,
TestRunnerService,
)
__all__ = [
# Code quality
"code_quality_service",
"CodeQualityService",
"VALIDATOR_ARCHITECTURE",
"VALIDATOR_SECURITY",
"VALIDATOR_PERFORMANCE",
"VALID_VALIDATOR_TYPES",
"VALIDATOR_SCRIPTS",
"VALIDATOR_NAMES",
# Test runner
"test_runner_service",
"TestRunnerService",
]

View File

@@ -0,0 +1,18 @@
# app/modules/dev_tools/tasks/__init__.py
"""
Dev-Tools module Celery tasks.
On-demand tasks for:
- Code quality scans (architecture, security, performance)
- Test execution
Note: These tasks are triggered on-demand, not scheduled.
"""
from app.modules.dev_tools.tasks.code_quality import execute_code_quality_scan
from app.modules.dev_tools.tasks.test_runner import execute_test_run
__all__ = [
"execute_code_quality_scan",
"execute_test_run",
]

View File

@@ -0,0 +1,238 @@
# app/modules/dev_tools/tasks/code_quality.py
"""
Celery tasks for code quality scans.
Wraps the existing execute_code_quality_scan function for Celery execution.
This is the canonical location - task is re-exported from the legacy location
for backward compatibility.
"""
import json
import logging
import subprocess
from datetime import UTC, datetime
from app.core.celery_config import celery_app
from app.services.admin_notification_service import admin_notification_service
from app.tasks.celery_tasks.base import DatabaseTask
from app.modules.dev_tools.models import ArchitectureScan, ArchitectureViolation
logger = logging.getLogger(__name__)
# Validator type constants
VALIDATOR_ARCHITECTURE = "architecture"
VALIDATOR_SECURITY = "security"
VALIDATOR_PERFORMANCE = "performance"
VALID_VALIDATOR_TYPES = [VALIDATOR_ARCHITECTURE, VALIDATOR_SECURITY, VALIDATOR_PERFORMANCE]
# Map validator types to their scripts
VALIDATOR_SCRIPTS = {
VALIDATOR_ARCHITECTURE: "scripts/validate_architecture.py",
VALIDATOR_SECURITY: "scripts/validate_security.py",
VALIDATOR_PERFORMANCE: "scripts/validate_performance.py",
}
# Human-readable names
VALIDATOR_NAMES = {
VALIDATOR_ARCHITECTURE: "Architecture",
VALIDATOR_SECURITY: "Security",
VALIDATOR_PERFORMANCE: "Performance",
}
def _get_git_commit_hash() -> str | None:
"""Get current git commit hash."""
try:
result = subprocess.run(
["git", "rev-parse", "HEAD"],
capture_output=True,
text=True,
timeout=5,
)
if result.returncode == 0:
return result.stdout.strip()[:40]
except Exception:
pass
return None
@celery_app.task(
bind=True,
base=DatabaseTask,
name="app.modules.dev_tools.tasks.code_quality.execute_code_quality_scan",
max_retries=1,
time_limit=700, # 11+ minutes hard limit
soft_time_limit=600, # 10 minutes soft limit
)
def execute_code_quality_scan(self, scan_id: int):
"""
Celery task to execute a code quality scan.
This task:
1. Gets the scan record from DB
2. Updates status to 'running'
3. Runs the validator script
4. Parses JSON output and creates violation records
5. Updates scan with results and status 'completed' or 'failed'
Args:
scan_id: ID of the ArchitectureScan record
Returns:
dict: Scan results summary
"""
with self.get_db() as db:
# Get the scan record
scan = db.query(ArchitectureScan).filter(ArchitectureScan.id == scan_id).first()
if not scan:
logger.error(f"Code quality scan {scan_id} not found")
return {"error": f"Scan {scan_id} not found"}
# Store Celery task ID
scan.celery_task_id = self.request.id
validator_type = scan.validator_type
if validator_type not in VALID_VALIDATOR_TYPES:
scan.status = "failed"
scan.error_message = f"Invalid validator type: {validator_type}"
db.commit()
return {"error": f"Invalid validator type: {validator_type}"}
script_path = VALIDATOR_SCRIPTS[validator_type]
validator_name = VALIDATOR_NAMES[validator_type]
try:
# Update status to running
scan.status = "running"
scan.started_at = datetime.now(UTC)
scan.progress_message = f"Running {validator_name} validator..."
scan.git_commit_hash = _get_git_commit_hash()
db.commit()
logger.info(f"Starting {validator_name} scan (scan_id={scan_id})")
# Run validator with JSON output
start_time = datetime.now(UTC)
try:
result = subprocess.run(
["python", script_path, "--json"],
capture_output=True,
text=True,
timeout=600, # 10 minute timeout
)
except subprocess.TimeoutExpired:
logger.error(f"{validator_name} scan {scan_id} timed out after 10 minutes")
scan.status = "failed"
scan.error_message = "Scan timed out after 10 minutes"
scan.completed_at = datetime.now(UTC)
db.commit()
return {"error": "Scan timed out"}
duration = (datetime.now(UTC) - start_time).total_seconds()
# Update progress
scan.progress_message = "Parsing results..."
db.commit()
# Parse JSON output
try:
lines = result.stdout.strip().split("\n")
json_start = -1
for i, line in enumerate(lines):
if line.strip().startswith("{"):
json_start = i
break
if json_start == -1:
raise ValueError("No JSON output found in validator output")
json_output = "\n".join(lines[json_start:])
data = json.loads(json_output)
except (json.JSONDecodeError, ValueError) as e:
logger.error(f"Failed to parse {validator_name} validator output: {e}")
scan.status = "failed"
scan.error_message = f"Failed to parse validator output: {e}"
scan.completed_at = datetime.now(UTC)
scan.duration_seconds = duration
db.commit()
return {"error": str(e)}
# Update progress
scan.progress_message = "Storing violations..."
db.commit()
# Create violation records
violations_data = data.get("violations", [])
logger.info(f"Creating {len(violations_data)} {validator_name} violation records")
for v in violations_data:
violation = ArchitectureViolation(
scan_id=scan.id,
validator_type=validator_type,
rule_id=v.get("rule_id", "UNKNOWN"),
rule_name=v.get("rule_name", "Unknown Rule"),
severity=v.get("severity", "warning"),
file_path=v.get("file_path", ""),
line_number=v.get("line_number", 0),
message=v.get("message", ""),
context=v.get("context", ""),
suggestion=v.get("suggestion", ""),
status="open",
)
db.add(violation)
# Update scan with results
scan.total_files = data.get("files_checked", 0)
scan.total_violations = data.get("total_violations", len(violations_data))
scan.errors = data.get("errors", 0)
scan.warnings = data.get("warnings", 0)
scan.duration_seconds = duration
scan.completed_at = datetime.now(UTC)
scan.progress_message = None
# Set final status based on results
if scan.errors > 0:
scan.status = "completed_with_warnings"
else:
scan.status = "completed"
db.commit()
logger.info(
f"{validator_name} scan {scan_id} completed: "
f"files={scan.total_files}, violations={scan.total_violations}, "
f"errors={scan.errors}, warnings={scan.warnings}, "
f"duration={duration:.1f}s"
)
return {
"scan_id": scan_id,
"validator_type": validator_type,
"status": scan.status,
"total_files": scan.total_files,
"total_violations": scan.total_violations,
"errors": scan.errors,
"warnings": scan.warnings,
"duration_seconds": duration,
}
except Exception as e:
logger.error(f"Code quality scan {scan_id} failed: {e}", exc_info=True)
scan.status = "failed"
scan.error_message = str(e)[:500]
scan.completed_at = datetime.now(UTC)
scan.progress_message = None
# Create admin notification for scan failure
admin_notification_service.create_notification(
db=db,
title="Code Quality Scan Failed",
message=f"{VALIDATOR_NAMES.get(scan.validator_type, 'Unknown')} scan failed: {str(e)[:200]}",
notification_type="error",
category="code_quality",
action_url="/admin/code-quality",
)
db.commit()
raise # Re-raise for Celery

View File

@@ -0,0 +1,85 @@
# app/modules/dev_tools/tasks/test_runner.py
"""
Celery tasks for test execution.
Wraps the existing execute_test_run function for Celery execution.
This is the canonical location - task is re-exported from the legacy location
for backward compatibility.
"""
import logging
from app.core.celery_config import celery_app
from app.services.test_runner_service import test_runner_service
from app.tasks.celery_tasks.base import DatabaseTask
from app.modules.dev_tools.models import TestRun
logger = logging.getLogger(__name__)
@celery_app.task(
bind=True,
base=DatabaseTask,
name="app.modules.dev_tools.tasks.test_runner.execute_test_run",
max_retries=1,
time_limit=3600, # 1 hour hard limit
soft_time_limit=3300, # 55 minutes soft limit
)
def execute_test_run(
self,
run_id: int,
test_path: str = "tests",
extra_args: list[str] | None = None,
):
"""
Celery task to execute pytest tests.
Args:
run_id: ID of the TestRun record
test_path: Path to tests (relative to project root)
extra_args: Additional pytest arguments
Returns:
dict: Test run results summary
"""
with self.get_db() as db:
# Get the test run record
test_run = db.query(TestRun).filter(TestRun.id == run_id).first()
if not test_run:
logger.error(f"Test run {run_id} not found")
return {"error": f"Test run {run_id} not found"}
# Store Celery task ID
test_run.celery_task_id = self.request.id
db.commit()
try:
logger.info(f"Starting test execution: Run {run_id}, Path: {test_path}")
# Execute the tests
test_runner_service._execute_tests(db, test_run, test_path, extra_args)
db.commit()
logger.info(
f"Test run {run_id} completed: "
f"status={test_run.status}, passed={test_run.passed}, "
f"failed={test_run.failed}, duration={test_run.duration_seconds:.1f}s"
)
return {
"run_id": run_id,
"status": test_run.status,
"total_tests": test_run.total_tests,
"passed": test_run.passed,
"failed": test_run.failed,
"errors": test_run.errors,
"skipped": test_run.skipped,
"coverage_percent": test_run.coverage_percent,
"duration_seconds": test_run.duration_seconds,
}
except Exception as e:
logger.error(f"Test run {run_id} failed: {e}", exc_info=True)
test_run.status = "error"
db.commit()
raise # Re-raise for Celery