feat: add pytest testing dashboard with run history and statistics
Add a new Testing Dashboard page that replaces the old Testing Hub with pytest integration: - Database models for test runs, results, and collections (TestRun, TestResult, TestCollection) - Test runner service that executes pytest with JSON reporting and stores results in the database - REST API endpoints for running tests, viewing history, and statistics - Dashboard UI showing pass rates, trends, tests by category, and top failing tests - Alembic migration for the new test_* tables The dashboard allows admins to: - Run pytest directly from the UI - View test run history with pass/fail statistics - See trend data across recent runs - Identify frequently failing tests - Collect test information without running 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -37,6 +37,7 @@ from . import (
|
||||
notifications,
|
||||
products,
|
||||
settings,
|
||||
tests,
|
||||
users,
|
||||
vendor_domains,
|
||||
vendor_products,
|
||||
@@ -142,5 +143,10 @@ router.include_router(
|
||||
code_quality.router, prefix="/code-quality", tags=["admin-code-quality"]
|
||||
)
|
||||
|
||||
# Include test runner endpoints
|
||||
router.include_router(
|
||||
tests.router, prefix="/tests", tags=["admin-tests"]
|
||||
)
|
||||
|
||||
# Export the router
|
||||
__all__ = ["router"]
|
||||
|
||||
309
app/api/v1/admin/tests.py
Normal file
309
app/api/v1/admin/tests.py
Normal file
@@ -0,0 +1,309 @@
|
||||
"""
|
||||
Test Runner API Endpoints
|
||||
RESTful API for running pytest and viewing test results
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, Query
|
||||
from pydantic import BaseModel, Field
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.api.deps import get_current_admin_api
|
||||
from app.core.database import get_db
|
||||
from app.services.test_runner_service import test_runner_service
|
||||
from models.database.user import User
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# Pydantic Models for API
|
||||
|
||||
|
||||
class TestRunResponse(BaseModel):
|
||||
"""Response model for a test run"""
|
||||
|
||||
id: int
|
||||
timestamp: str
|
||||
total_tests: int
|
||||
passed: int
|
||||
failed: int
|
||||
errors: int
|
||||
skipped: int
|
||||
xfailed: int
|
||||
xpassed: int
|
||||
pass_rate: float
|
||||
duration_seconds: float
|
||||
coverage_percent: float | None
|
||||
triggered_by: str | None
|
||||
git_commit_hash: str | None
|
||||
git_branch: str | None
|
||||
test_path: str | None
|
||||
status: str
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class TestResultResponse(BaseModel):
|
||||
"""Response model for a single test result"""
|
||||
|
||||
id: int
|
||||
node_id: str
|
||||
test_name: str
|
||||
test_file: str
|
||||
test_class: str | None
|
||||
outcome: str
|
||||
duration_seconds: float
|
||||
error_message: str | None
|
||||
traceback: str | None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class RunTestsRequest(BaseModel):
|
||||
"""Request model for running tests"""
|
||||
|
||||
test_path: str = Field("tests", description="Path to tests to run")
|
||||
extra_args: list[str] | None = Field(None, description="Additional pytest arguments")
|
||||
|
||||
|
||||
class TestDashboardStatsResponse(BaseModel):
|
||||
"""Response model for dashboard statistics"""
|
||||
|
||||
# Current run stats
|
||||
total_tests: int
|
||||
passed: int
|
||||
failed: int
|
||||
errors: int
|
||||
skipped: int
|
||||
pass_rate: float
|
||||
duration_seconds: float
|
||||
coverage_percent: float | None
|
||||
last_run: str | None
|
||||
last_run_status: str | None
|
||||
|
||||
# Collection stats
|
||||
total_test_files: int
|
||||
|
||||
# Trend and breakdown data
|
||||
trend: list[dict]
|
||||
by_category: dict
|
||||
top_failing: list[dict]
|
||||
|
||||
|
||||
# API Endpoints
|
||||
|
||||
|
||||
@router.post("/run", response_model=TestRunResponse)
|
||||
async def run_tests(
|
||||
request: RunTestsRequest | None = None,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Run pytest and store results
|
||||
|
||||
Requires admin authentication. Runs pytest on the specified path
|
||||
and stores results in the database.
|
||||
"""
|
||||
test_path = request.test_path if request else "tests"
|
||||
extra_args = request.extra_args if request else None
|
||||
|
||||
run = test_runner_service.run_tests(
|
||||
db,
|
||||
test_path=test_path,
|
||||
triggered_by=f"manual:{current_user.username}",
|
||||
extra_args=extra_args,
|
||||
)
|
||||
db.commit()
|
||||
|
||||
return TestRunResponse(
|
||||
id=run.id,
|
||||
timestamp=run.timestamp.isoformat(),
|
||||
total_tests=run.total_tests,
|
||||
passed=run.passed,
|
||||
failed=run.failed,
|
||||
errors=run.errors,
|
||||
skipped=run.skipped,
|
||||
xfailed=run.xfailed,
|
||||
xpassed=run.xpassed,
|
||||
pass_rate=run.pass_rate,
|
||||
duration_seconds=run.duration_seconds,
|
||||
coverage_percent=run.coverage_percent,
|
||||
triggered_by=run.triggered_by,
|
||||
git_commit_hash=run.git_commit_hash,
|
||||
git_branch=run.git_branch,
|
||||
test_path=run.test_path,
|
||||
status=run.status,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/runs", response_model=list[TestRunResponse])
|
||||
async def list_runs(
|
||||
limit: int = Query(20, ge=1, le=100, description="Number of runs to return"),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get test run history
|
||||
|
||||
Returns recent test runs for trend analysis.
|
||||
"""
|
||||
runs = test_runner_service.get_run_history(db, limit=limit)
|
||||
|
||||
return [
|
||||
TestRunResponse(
|
||||
id=run.id,
|
||||
timestamp=run.timestamp.isoformat(),
|
||||
total_tests=run.total_tests,
|
||||
passed=run.passed,
|
||||
failed=run.failed,
|
||||
errors=run.errors,
|
||||
skipped=run.skipped,
|
||||
xfailed=run.xfailed,
|
||||
xpassed=run.xpassed,
|
||||
pass_rate=run.pass_rate,
|
||||
duration_seconds=run.duration_seconds,
|
||||
coverage_percent=run.coverage_percent,
|
||||
triggered_by=run.triggered_by,
|
||||
git_commit_hash=run.git_commit_hash,
|
||||
git_branch=run.git_branch,
|
||||
test_path=run.test_path,
|
||||
status=run.status,
|
||||
)
|
||||
for run in runs
|
||||
]
|
||||
|
||||
|
||||
@router.get("/runs/{run_id}", response_model=TestRunResponse)
|
||||
async def get_run(
|
||||
run_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get a specific test run
|
||||
"""
|
||||
run = test_runner_service.get_run_by_id(db, run_id)
|
||||
|
||||
if not run:
|
||||
from app.exceptions.base import ResourceNotFoundException
|
||||
raise ResourceNotFoundException("TestRun", str(run_id))
|
||||
|
||||
return TestRunResponse(
|
||||
id=run.id,
|
||||
timestamp=run.timestamp.isoformat(),
|
||||
total_tests=run.total_tests,
|
||||
passed=run.passed,
|
||||
failed=run.failed,
|
||||
errors=run.errors,
|
||||
skipped=run.skipped,
|
||||
xfailed=run.xfailed,
|
||||
xpassed=run.xpassed,
|
||||
pass_rate=run.pass_rate,
|
||||
duration_seconds=run.duration_seconds,
|
||||
coverage_percent=run.coverage_percent,
|
||||
triggered_by=run.triggered_by,
|
||||
git_commit_hash=run.git_commit_hash,
|
||||
git_branch=run.git_branch,
|
||||
test_path=run.test_path,
|
||||
status=run.status,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/runs/{run_id}/results", response_model=list[TestResultResponse])
|
||||
async def get_run_results(
|
||||
run_id: int,
|
||||
outcome: str | None = Query(None, description="Filter by outcome (passed, failed, error, skipped)"),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get test results for a specific run
|
||||
"""
|
||||
results = test_runner_service.get_run_results(db, run_id, outcome=outcome)
|
||||
|
||||
return [
|
||||
TestResultResponse(
|
||||
id=r.id,
|
||||
node_id=r.node_id,
|
||||
test_name=r.test_name,
|
||||
test_file=r.test_file,
|
||||
test_class=r.test_class,
|
||||
outcome=r.outcome,
|
||||
duration_seconds=r.duration_seconds,
|
||||
error_message=r.error_message,
|
||||
traceback=r.traceback,
|
||||
)
|
||||
for r in results
|
||||
]
|
||||
|
||||
|
||||
@router.get("/runs/{run_id}/failures", response_model=list[TestResultResponse])
|
||||
async def get_run_failures(
|
||||
run_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get failed tests from a specific run
|
||||
"""
|
||||
failures = test_runner_service.get_failed_tests(db, run_id)
|
||||
|
||||
return [
|
||||
TestResultResponse(
|
||||
id=r.id,
|
||||
node_id=r.node_id,
|
||||
test_name=r.test_name,
|
||||
test_file=r.test_file,
|
||||
test_class=r.test_class,
|
||||
outcome=r.outcome,
|
||||
duration_seconds=r.duration_seconds,
|
||||
error_message=r.error_message,
|
||||
traceback=r.traceback,
|
||||
)
|
||||
for r in failures
|
||||
]
|
||||
|
||||
|
||||
@router.get("/stats", response_model=TestDashboardStatsResponse)
|
||||
async def get_dashboard_stats(
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get dashboard statistics
|
||||
|
||||
Returns comprehensive stats for the testing dashboard including:
|
||||
- Total counts by outcome
|
||||
- Pass rate
|
||||
- Trend data
|
||||
- Tests by category
|
||||
- Top failing tests
|
||||
"""
|
||||
stats = test_runner_service.get_dashboard_stats(db)
|
||||
return TestDashboardStatsResponse(**stats)
|
||||
|
||||
|
||||
@router.post("/collect")
|
||||
async def collect_tests(
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Collect test information without running tests
|
||||
|
||||
Updates the test collection cache with current test counts.
|
||||
"""
|
||||
collection = test_runner_service.collect_tests(db)
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"total_tests": collection.total_tests,
|
||||
"total_files": collection.total_files,
|
||||
"unit_tests": collection.unit_tests,
|
||||
"integration_tests": collection.integration_tests,
|
||||
"performance_tests": collection.performance_tests,
|
||||
"system_tests": collection.system_tests,
|
||||
"collected_at": collection.collected_at.isoformat(),
|
||||
}
|
||||
Reference in New Issue
Block a user