feat: add pytest testing dashboard with run history and statistics
Add a new Testing Dashboard page that replaces the old Testing Hub with pytest integration: - Database models for test runs, results, and collections (TestRun, TestResult, TestCollection) - Test runner service that executes pytest with JSON reporting and stores results in the database - REST API endpoints for running tests, viewing history, and statistics - Dashboard UI showing pass rates, trends, tests by category, and top failing tests - Alembic migration for the new test_* tables The dashboard allows admins to: - Run pytest directly from the UI - View test run history with pass/fail statistics - See trend data across recent runs - Identify frequently failing tests - Collect test information without running 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
103
alembic/versions/82ea1b4a3ccb_add_test_run_tables.py
Normal file
103
alembic/versions/82ea1b4a3ccb_add_test_run_tables.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""add_test_run_tables
|
||||
|
||||
Revision ID: 82ea1b4a3ccb
|
||||
Revises: b4c5d6e7f8a9
|
||||
Create Date: 2025-12-12 22:48:09.501172
|
||||
|
||||
"""
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '82ea1b4a3ccb'
|
||||
down_revision: Union[str, None] = 'b4c5d6e7f8a9'
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Create test_collections table
|
||||
op.create_table('test_collections',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('total_tests', sa.Integer(), nullable=True),
|
||||
sa.Column('total_files', sa.Integer(), nullable=True),
|
||||
sa.Column('total_classes', sa.Integer(), nullable=True),
|
||||
sa.Column('unit_tests', sa.Integer(), nullable=True),
|
||||
sa.Column('integration_tests', sa.Integer(), nullable=True),
|
||||
sa.Column('performance_tests', sa.Integer(), nullable=True),
|
||||
sa.Column('system_tests', sa.Integer(), nullable=True),
|
||||
sa.Column('test_files', sa.JSON(), nullable=True),
|
||||
sa.Column('collected_at', sa.DateTime(timezone=True), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=False),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_test_collections_id'), 'test_collections', ['id'], unique=False)
|
||||
|
||||
# Create test_runs table
|
||||
op.create_table('test_runs',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('timestamp', sa.DateTime(timezone=True), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=False),
|
||||
sa.Column('total_tests', sa.Integer(), nullable=True),
|
||||
sa.Column('passed', sa.Integer(), nullable=True),
|
||||
sa.Column('failed', sa.Integer(), nullable=True),
|
||||
sa.Column('errors', sa.Integer(), nullable=True),
|
||||
sa.Column('skipped', sa.Integer(), nullable=True),
|
||||
sa.Column('xfailed', sa.Integer(), nullable=True),
|
||||
sa.Column('xpassed', sa.Integer(), nullable=True),
|
||||
sa.Column('coverage_percent', sa.Float(), nullable=True),
|
||||
sa.Column('duration_seconds', sa.Float(), nullable=True),
|
||||
sa.Column('triggered_by', sa.String(length=100), nullable=True),
|
||||
sa.Column('git_commit_hash', sa.String(length=40), nullable=True),
|
||||
sa.Column('git_branch', sa.String(length=100), nullable=True),
|
||||
sa.Column('test_path', sa.String(length=500), nullable=True),
|
||||
sa.Column('pytest_args', sa.String(length=500), nullable=True),
|
||||
sa.Column('status', sa.String(length=20), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_test_runs_id'), 'test_runs', ['id'], unique=False)
|
||||
op.create_index(op.f('ix_test_runs_status'), 'test_runs', ['status'], unique=False)
|
||||
op.create_index(op.f('ix_test_runs_timestamp'), 'test_runs', ['timestamp'], unique=False)
|
||||
|
||||
# Create test_results table
|
||||
op.create_table('test_results',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('run_id', sa.Integer(), nullable=False),
|
||||
sa.Column('node_id', sa.String(length=500), nullable=False),
|
||||
sa.Column('test_name', sa.String(length=200), nullable=False),
|
||||
sa.Column('test_file', sa.String(length=300), nullable=False),
|
||||
sa.Column('test_class', sa.String(length=200), nullable=True),
|
||||
sa.Column('outcome', sa.String(length=20), nullable=False),
|
||||
sa.Column('duration_seconds', sa.Float(), nullable=True),
|
||||
sa.Column('error_message', sa.Text(), nullable=True),
|
||||
sa.Column('traceback', sa.Text(), nullable=True),
|
||||
sa.Column('markers', sa.JSON(), nullable=True),
|
||||
sa.Column('parameters', sa.JSON(), nullable=True),
|
||||
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('(CURRENT_TIMESTAMP)'), nullable=False),
|
||||
sa.ForeignKeyConstraint(['run_id'], ['test_runs.id'], ),
|
||||
sa.PrimaryKeyConstraint('id')
|
||||
)
|
||||
op.create_index(op.f('ix_test_results_id'), 'test_results', ['id'], unique=False)
|
||||
op.create_index(op.f('ix_test_results_node_id'), 'test_results', ['node_id'], unique=False)
|
||||
op.create_index(op.f('ix_test_results_outcome'), 'test_results', ['outcome'], unique=False)
|
||||
op.create_index(op.f('ix_test_results_run_id'), 'test_results', ['run_id'], unique=False)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Drop test_results table first (has foreign key to test_runs)
|
||||
op.drop_index(op.f('ix_test_results_run_id'), table_name='test_results')
|
||||
op.drop_index(op.f('ix_test_results_outcome'), table_name='test_results')
|
||||
op.drop_index(op.f('ix_test_results_node_id'), table_name='test_results')
|
||||
op.drop_index(op.f('ix_test_results_id'), table_name='test_results')
|
||||
op.drop_table('test_results')
|
||||
|
||||
# Drop test_runs table
|
||||
op.drop_index(op.f('ix_test_runs_timestamp'), table_name='test_runs')
|
||||
op.drop_index(op.f('ix_test_runs_status'), table_name='test_runs')
|
||||
op.drop_index(op.f('ix_test_runs_id'), table_name='test_runs')
|
||||
op.drop_table('test_runs')
|
||||
|
||||
# Drop test_collections table
|
||||
op.drop_index(op.f('ix_test_collections_id'), table_name='test_collections')
|
||||
op.drop_table('test_collections')
|
||||
@@ -37,6 +37,7 @@ from . import (
|
||||
notifications,
|
||||
products,
|
||||
settings,
|
||||
tests,
|
||||
users,
|
||||
vendor_domains,
|
||||
vendor_products,
|
||||
@@ -142,5 +143,10 @@ router.include_router(
|
||||
code_quality.router, prefix="/code-quality", tags=["admin-code-quality"]
|
||||
)
|
||||
|
||||
# Include test runner endpoints
|
||||
router.include_router(
|
||||
tests.router, prefix="/tests", tags=["admin-tests"]
|
||||
)
|
||||
|
||||
# Export the router
|
||||
__all__ = ["router"]
|
||||
|
||||
309
app/api/v1/admin/tests.py
Normal file
309
app/api/v1/admin/tests.py
Normal file
@@ -0,0 +1,309 @@
|
||||
"""
|
||||
Test Runner API Endpoints
|
||||
RESTful API for running pytest and viewing test results
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, Query
|
||||
from pydantic import BaseModel, Field
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.api.deps import get_current_admin_api
|
||||
from app.core.database import get_db
|
||||
from app.services.test_runner_service import test_runner_service
|
||||
from models.database.user import User
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# Pydantic Models for API
|
||||
|
||||
|
||||
class TestRunResponse(BaseModel):
|
||||
"""Response model for a test run"""
|
||||
|
||||
id: int
|
||||
timestamp: str
|
||||
total_tests: int
|
||||
passed: int
|
||||
failed: int
|
||||
errors: int
|
||||
skipped: int
|
||||
xfailed: int
|
||||
xpassed: int
|
||||
pass_rate: float
|
||||
duration_seconds: float
|
||||
coverage_percent: float | None
|
||||
triggered_by: str | None
|
||||
git_commit_hash: str | None
|
||||
git_branch: str | None
|
||||
test_path: str | None
|
||||
status: str
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class TestResultResponse(BaseModel):
|
||||
"""Response model for a single test result"""
|
||||
|
||||
id: int
|
||||
node_id: str
|
||||
test_name: str
|
||||
test_file: str
|
||||
test_class: str | None
|
||||
outcome: str
|
||||
duration_seconds: float
|
||||
error_message: str | None
|
||||
traceback: str | None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class RunTestsRequest(BaseModel):
|
||||
"""Request model for running tests"""
|
||||
|
||||
test_path: str = Field("tests", description="Path to tests to run")
|
||||
extra_args: list[str] | None = Field(None, description="Additional pytest arguments")
|
||||
|
||||
|
||||
class TestDashboardStatsResponse(BaseModel):
|
||||
"""Response model for dashboard statistics"""
|
||||
|
||||
# Current run stats
|
||||
total_tests: int
|
||||
passed: int
|
||||
failed: int
|
||||
errors: int
|
||||
skipped: int
|
||||
pass_rate: float
|
||||
duration_seconds: float
|
||||
coverage_percent: float | None
|
||||
last_run: str | None
|
||||
last_run_status: str | None
|
||||
|
||||
# Collection stats
|
||||
total_test_files: int
|
||||
|
||||
# Trend and breakdown data
|
||||
trend: list[dict]
|
||||
by_category: dict
|
||||
top_failing: list[dict]
|
||||
|
||||
|
||||
# API Endpoints
|
||||
|
||||
|
||||
@router.post("/run", response_model=TestRunResponse)
|
||||
async def run_tests(
|
||||
request: RunTestsRequest | None = None,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Run pytest and store results
|
||||
|
||||
Requires admin authentication. Runs pytest on the specified path
|
||||
and stores results in the database.
|
||||
"""
|
||||
test_path = request.test_path if request else "tests"
|
||||
extra_args = request.extra_args if request else None
|
||||
|
||||
run = test_runner_service.run_tests(
|
||||
db,
|
||||
test_path=test_path,
|
||||
triggered_by=f"manual:{current_user.username}",
|
||||
extra_args=extra_args,
|
||||
)
|
||||
db.commit()
|
||||
|
||||
return TestRunResponse(
|
||||
id=run.id,
|
||||
timestamp=run.timestamp.isoformat(),
|
||||
total_tests=run.total_tests,
|
||||
passed=run.passed,
|
||||
failed=run.failed,
|
||||
errors=run.errors,
|
||||
skipped=run.skipped,
|
||||
xfailed=run.xfailed,
|
||||
xpassed=run.xpassed,
|
||||
pass_rate=run.pass_rate,
|
||||
duration_seconds=run.duration_seconds,
|
||||
coverage_percent=run.coverage_percent,
|
||||
triggered_by=run.triggered_by,
|
||||
git_commit_hash=run.git_commit_hash,
|
||||
git_branch=run.git_branch,
|
||||
test_path=run.test_path,
|
||||
status=run.status,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/runs", response_model=list[TestRunResponse])
|
||||
async def list_runs(
|
||||
limit: int = Query(20, ge=1, le=100, description="Number of runs to return"),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get test run history
|
||||
|
||||
Returns recent test runs for trend analysis.
|
||||
"""
|
||||
runs = test_runner_service.get_run_history(db, limit=limit)
|
||||
|
||||
return [
|
||||
TestRunResponse(
|
||||
id=run.id,
|
||||
timestamp=run.timestamp.isoformat(),
|
||||
total_tests=run.total_tests,
|
||||
passed=run.passed,
|
||||
failed=run.failed,
|
||||
errors=run.errors,
|
||||
skipped=run.skipped,
|
||||
xfailed=run.xfailed,
|
||||
xpassed=run.xpassed,
|
||||
pass_rate=run.pass_rate,
|
||||
duration_seconds=run.duration_seconds,
|
||||
coverage_percent=run.coverage_percent,
|
||||
triggered_by=run.triggered_by,
|
||||
git_commit_hash=run.git_commit_hash,
|
||||
git_branch=run.git_branch,
|
||||
test_path=run.test_path,
|
||||
status=run.status,
|
||||
)
|
||||
for run in runs
|
||||
]
|
||||
|
||||
|
||||
@router.get("/runs/{run_id}", response_model=TestRunResponse)
|
||||
async def get_run(
|
||||
run_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get a specific test run
|
||||
"""
|
||||
run = test_runner_service.get_run_by_id(db, run_id)
|
||||
|
||||
if not run:
|
||||
from app.exceptions.base import ResourceNotFoundException
|
||||
raise ResourceNotFoundException("TestRun", str(run_id))
|
||||
|
||||
return TestRunResponse(
|
||||
id=run.id,
|
||||
timestamp=run.timestamp.isoformat(),
|
||||
total_tests=run.total_tests,
|
||||
passed=run.passed,
|
||||
failed=run.failed,
|
||||
errors=run.errors,
|
||||
skipped=run.skipped,
|
||||
xfailed=run.xfailed,
|
||||
xpassed=run.xpassed,
|
||||
pass_rate=run.pass_rate,
|
||||
duration_seconds=run.duration_seconds,
|
||||
coverage_percent=run.coverage_percent,
|
||||
triggered_by=run.triggered_by,
|
||||
git_commit_hash=run.git_commit_hash,
|
||||
git_branch=run.git_branch,
|
||||
test_path=run.test_path,
|
||||
status=run.status,
|
||||
)
|
||||
|
||||
|
||||
@router.get("/runs/{run_id}/results", response_model=list[TestResultResponse])
|
||||
async def get_run_results(
|
||||
run_id: int,
|
||||
outcome: str | None = Query(None, description="Filter by outcome (passed, failed, error, skipped)"),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get test results for a specific run
|
||||
"""
|
||||
results = test_runner_service.get_run_results(db, run_id, outcome=outcome)
|
||||
|
||||
return [
|
||||
TestResultResponse(
|
||||
id=r.id,
|
||||
node_id=r.node_id,
|
||||
test_name=r.test_name,
|
||||
test_file=r.test_file,
|
||||
test_class=r.test_class,
|
||||
outcome=r.outcome,
|
||||
duration_seconds=r.duration_seconds,
|
||||
error_message=r.error_message,
|
||||
traceback=r.traceback,
|
||||
)
|
||||
for r in results
|
||||
]
|
||||
|
||||
|
||||
@router.get("/runs/{run_id}/failures", response_model=list[TestResultResponse])
|
||||
async def get_run_failures(
|
||||
run_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get failed tests from a specific run
|
||||
"""
|
||||
failures = test_runner_service.get_failed_tests(db, run_id)
|
||||
|
||||
return [
|
||||
TestResultResponse(
|
||||
id=r.id,
|
||||
node_id=r.node_id,
|
||||
test_name=r.test_name,
|
||||
test_file=r.test_file,
|
||||
test_class=r.test_class,
|
||||
outcome=r.outcome,
|
||||
duration_seconds=r.duration_seconds,
|
||||
error_message=r.error_message,
|
||||
traceback=r.traceback,
|
||||
)
|
||||
for r in failures
|
||||
]
|
||||
|
||||
|
||||
@router.get("/stats", response_model=TestDashboardStatsResponse)
|
||||
async def get_dashboard_stats(
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get dashboard statistics
|
||||
|
||||
Returns comprehensive stats for the testing dashboard including:
|
||||
- Total counts by outcome
|
||||
- Pass rate
|
||||
- Trend data
|
||||
- Tests by category
|
||||
- Top failing tests
|
||||
"""
|
||||
stats = test_runner_service.get_dashboard_stats(db)
|
||||
return TestDashboardStatsResponse(**stats)
|
||||
|
||||
|
||||
@router.post("/collect")
|
||||
async def collect_tests(
|
||||
db: Session = Depends(get_db),
|
||||
current_user: User = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Collect test information without running tests
|
||||
|
||||
Updates the test collection cache with current test counts.
|
||||
"""
|
||||
collection = test_runner_service.collect_tests(db)
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"total_tests": collection.total_tests,
|
||||
"total_files": collection.total_files,
|
||||
"unit_tests": collection.unit_tests,
|
||||
"integration_tests": collection.integration_tests,
|
||||
"performance_tests": collection.performance_tests,
|
||||
"system_tests": collection.system_tests,
|
||||
"collected_at": collection.collected_at.isoformat(),
|
||||
}
|
||||
@@ -789,14 +789,33 @@ async def admin_icons_page(
|
||||
|
||||
|
||||
@router.get("/testing", response_class=HTMLResponse, include_in_schema=False)
|
||||
async def admin_testing_dashboard(
|
||||
request: Request,
|
||||
current_user: User = Depends(get_current_admin_from_cookie_or_header),
|
||||
db: Session = Depends(get_db),
|
||||
):
|
||||
"""
|
||||
Render testing dashboard page.
|
||||
pytest results and test coverage overview.
|
||||
"""
|
||||
return templates.TemplateResponse(
|
||||
"admin/testing-dashboard.html",
|
||||
{
|
||||
"request": request,
|
||||
"user": current_user,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.get("/testing-hub", response_class=HTMLResponse, include_in_schema=False)
|
||||
async def admin_testing_hub(
|
||||
request: Request,
|
||||
current_user: User = Depends(get_current_admin_from_cookie_or_header),
|
||||
db: Session = Depends(get_db),
|
||||
):
|
||||
"""
|
||||
Render testing hub page.
|
||||
Central hub for all test suites and QA tools.
|
||||
Render manual testing hub page.
|
||||
Central hub for all manual test suites and QA tools.
|
||||
"""
|
||||
return templates.TemplateResponse(
|
||||
"admin/testing-hub.html",
|
||||
|
||||
444
app/services/test_runner_service.py
Normal file
444
app/services/test_runner_service.py
Normal file
@@ -0,0 +1,444 @@
|
||||
"""
|
||||
Test Runner Service
|
||||
Service for running pytest and storing results
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import subprocess
|
||||
import tempfile
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
|
||||
from sqlalchemy import desc, func
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from models.database.test_run import TestCollection, TestResult, TestRun
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TestRunnerService:
|
||||
"""Service for managing pytest test runs"""
|
||||
|
||||
def __init__(self):
|
||||
self.project_root = Path(__file__).parent.parent.parent
|
||||
|
||||
def run_tests(
|
||||
self,
|
||||
db: Session,
|
||||
test_path: str = "tests",
|
||||
triggered_by: str = "manual",
|
||||
extra_args: list[str] | None = None,
|
||||
) -> TestRun:
|
||||
"""
|
||||
Run pytest and store results in database
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
test_path: Path to tests (relative to project root)
|
||||
triggered_by: Who triggered the run
|
||||
extra_args: Additional pytest arguments
|
||||
|
||||
Returns:
|
||||
TestRun object with results
|
||||
"""
|
||||
# Create test run record
|
||||
test_run = TestRun(
|
||||
timestamp=datetime.now(UTC),
|
||||
triggered_by=triggered_by,
|
||||
test_path=test_path,
|
||||
status="running",
|
||||
)
|
||||
db.add(test_run)
|
||||
db.flush() # Get the ID
|
||||
|
||||
try:
|
||||
# Get git info
|
||||
test_run.git_commit_hash = self._get_git_commit()
|
||||
test_run.git_branch = self._get_git_branch()
|
||||
|
||||
# Build pytest command with JSON output
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
|
||||
json_report_path = f.name
|
||||
|
||||
pytest_args = [
|
||||
"python", "-m", "pytest",
|
||||
test_path,
|
||||
f"--json-report",
|
||||
f"--json-report-file={json_report_path}",
|
||||
"-v",
|
||||
"--tb=short",
|
||||
]
|
||||
|
||||
if extra_args:
|
||||
pytest_args.extend(extra_args)
|
||||
|
||||
test_run.pytest_args = " ".join(pytest_args)
|
||||
|
||||
# Run pytest
|
||||
start_time = datetime.now(UTC)
|
||||
result = subprocess.run(
|
||||
pytest_args,
|
||||
cwd=str(self.project_root),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=600, # 10 minute timeout
|
||||
)
|
||||
end_time = datetime.now(UTC)
|
||||
|
||||
test_run.duration_seconds = (end_time - start_time).total_seconds()
|
||||
|
||||
# Parse JSON report
|
||||
try:
|
||||
with open(json_report_path, 'r') as f:
|
||||
report = json.load(f)
|
||||
|
||||
self._process_json_report(db, test_run, report)
|
||||
except FileNotFoundError:
|
||||
# Fallback to parsing stdout
|
||||
self._parse_pytest_output(test_run, result.stdout, result.stderr)
|
||||
finally:
|
||||
# Clean up temp file
|
||||
try:
|
||||
Path(json_report_path).unlink()
|
||||
except:
|
||||
pass
|
||||
|
||||
# Set final status
|
||||
if test_run.failed > 0 or test_run.errors > 0:
|
||||
test_run.status = "failed"
|
||||
else:
|
||||
test_run.status = "passed"
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
test_run.status = "error"
|
||||
logger.error("Pytest run timed out")
|
||||
except Exception as e:
|
||||
test_run.status = "error"
|
||||
logger.error(f"Error running tests: {e}")
|
||||
|
||||
return test_run
|
||||
|
||||
def _process_json_report(self, db: Session, test_run: TestRun, report: dict):
|
||||
"""Process pytest-json-report output"""
|
||||
summary = report.get("summary", {})
|
||||
|
||||
test_run.total_tests = summary.get("total", 0)
|
||||
test_run.passed = summary.get("passed", 0)
|
||||
test_run.failed = summary.get("failed", 0)
|
||||
test_run.errors = summary.get("error", 0)
|
||||
test_run.skipped = summary.get("skipped", 0)
|
||||
test_run.xfailed = summary.get("xfailed", 0)
|
||||
test_run.xpassed = summary.get("xpassed", 0)
|
||||
|
||||
# Process individual test results
|
||||
tests = report.get("tests", [])
|
||||
for test in tests:
|
||||
node_id = test.get("nodeid", "")
|
||||
outcome = test.get("outcome", "unknown")
|
||||
|
||||
# Parse node_id to get file, class, function
|
||||
test_file, test_class, test_name = self._parse_node_id(node_id)
|
||||
|
||||
# Get failure details
|
||||
error_message = None
|
||||
traceback = None
|
||||
if outcome in ("failed", "error"):
|
||||
call_info = test.get("call", {})
|
||||
if "longrepr" in call_info:
|
||||
traceback = call_info["longrepr"]
|
||||
# Extract error message from traceback
|
||||
if isinstance(traceback, str):
|
||||
lines = traceback.strip().split('\n')
|
||||
if lines:
|
||||
error_message = lines[-1][:500] # Last line, limited length
|
||||
|
||||
test_result = TestResult(
|
||||
run_id=test_run.id,
|
||||
node_id=node_id,
|
||||
test_name=test_name,
|
||||
test_file=test_file,
|
||||
test_class=test_class,
|
||||
outcome=outcome,
|
||||
duration_seconds=test.get("duration", 0.0),
|
||||
error_message=error_message,
|
||||
traceback=traceback,
|
||||
markers=test.get("keywords", []),
|
||||
)
|
||||
db.add(test_result)
|
||||
|
||||
def _parse_node_id(self, node_id: str) -> tuple[str, str | None, str]:
|
||||
"""Parse pytest node_id into file, class, function"""
|
||||
# Format: tests/unit/test_foo.py::TestClass::test_method
|
||||
# or: tests/unit/test_foo.py::test_function
|
||||
parts = node_id.split("::")
|
||||
|
||||
test_file = parts[0] if parts else ""
|
||||
test_class = None
|
||||
test_name = parts[-1] if parts else ""
|
||||
|
||||
if len(parts) == 3:
|
||||
test_class = parts[1]
|
||||
elif len(parts) == 2:
|
||||
# Could be Class::method or file::function
|
||||
if parts[1].startswith("Test"):
|
||||
test_class = parts[1]
|
||||
test_name = parts[1]
|
||||
|
||||
# Handle parametrized tests
|
||||
if "[" in test_name:
|
||||
test_name = test_name.split("[")[0]
|
||||
|
||||
return test_file, test_class, test_name
|
||||
|
||||
def _parse_pytest_output(self, test_run: TestRun, stdout: str, stderr: str):
|
||||
"""Fallback parser for pytest text output"""
|
||||
# Parse summary line like: "10 passed, 2 failed, 1 skipped"
|
||||
summary_pattern = r"(\d+)\s+(passed|failed|error|skipped|xfailed|xpassed)"
|
||||
|
||||
for match in re.finditer(summary_pattern, stdout):
|
||||
count = int(match.group(1))
|
||||
status = match.group(2)
|
||||
|
||||
if status == "passed":
|
||||
test_run.passed = count
|
||||
elif status == "failed":
|
||||
test_run.failed = count
|
||||
elif status == "error":
|
||||
test_run.errors = count
|
||||
elif status == "skipped":
|
||||
test_run.skipped = count
|
||||
elif status == "xfailed":
|
||||
test_run.xfailed = count
|
||||
elif status == "xpassed":
|
||||
test_run.xpassed = count
|
||||
|
||||
test_run.total_tests = (
|
||||
test_run.passed + test_run.failed + test_run.errors +
|
||||
test_run.skipped + test_run.xfailed + test_run.xpassed
|
||||
)
|
||||
|
||||
def _get_git_commit(self) -> str | None:
|
||||
"""Get current git commit hash"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "rev-parse", "HEAD"],
|
||||
cwd=str(self.project_root),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5,
|
||||
)
|
||||
return result.stdout.strip()[:40] if result.returncode == 0 else None
|
||||
except:
|
||||
return None
|
||||
|
||||
def _get_git_branch(self) -> str | None:
|
||||
"""Get current git branch"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
||||
cwd=str(self.project_root),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5,
|
||||
)
|
||||
return result.stdout.strip() if result.returncode == 0 else None
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_run_history(self, db: Session, limit: int = 20) -> list[TestRun]:
|
||||
"""Get recent test run history"""
|
||||
return (
|
||||
db.query(TestRun)
|
||||
.order_by(desc(TestRun.timestamp))
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
def get_run_by_id(self, db: Session, run_id: int) -> TestRun | None:
|
||||
"""Get a specific test run with results"""
|
||||
return db.query(TestRun).filter(TestRun.id == run_id).first()
|
||||
|
||||
def get_failed_tests(self, db: Session, run_id: int) -> list[TestResult]:
|
||||
"""Get failed tests from a run"""
|
||||
return (
|
||||
db.query(TestResult)
|
||||
.filter(
|
||||
TestResult.run_id == run_id,
|
||||
TestResult.outcome.in_(["failed", "error"])
|
||||
)
|
||||
.all()
|
||||
)
|
||||
|
||||
def get_run_results(
|
||||
self, db: Session, run_id: int, outcome: str | None = None
|
||||
) -> list[TestResult]:
|
||||
"""Get test results for a specific run, optionally filtered by outcome"""
|
||||
query = db.query(TestResult).filter(TestResult.run_id == run_id)
|
||||
|
||||
if outcome:
|
||||
query = query.filter(TestResult.outcome == outcome)
|
||||
|
||||
return query.all()
|
||||
|
||||
def get_dashboard_stats(self, db: Session) -> dict:
|
||||
"""Get statistics for the testing dashboard"""
|
||||
# Get latest run
|
||||
latest_run = (
|
||||
db.query(TestRun)
|
||||
.filter(TestRun.status != "running")
|
||||
.order_by(desc(TestRun.timestamp))
|
||||
.first()
|
||||
)
|
||||
|
||||
# Get test collection info (or calculate from latest run)
|
||||
collection = db.query(TestCollection).order_by(desc(TestCollection.collected_at)).first()
|
||||
|
||||
# Get trend data (last 10 runs)
|
||||
trend_runs = (
|
||||
db.query(TestRun)
|
||||
.filter(TestRun.status != "running")
|
||||
.order_by(desc(TestRun.timestamp))
|
||||
.limit(10)
|
||||
.all()
|
||||
)
|
||||
|
||||
# Calculate stats by category from latest run
|
||||
by_category = {}
|
||||
if latest_run:
|
||||
results = db.query(TestResult).filter(TestResult.run_id == latest_run.id).all()
|
||||
for result in results:
|
||||
# Categorize by test path
|
||||
if "unit" in result.test_file:
|
||||
category = "Unit Tests"
|
||||
elif "integration" in result.test_file:
|
||||
category = "Integration Tests"
|
||||
elif "performance" in result.test_file:
|
||||
category = "Performance Tests"
|
||||
elif "system" in result.test_file:
|
||||
category = "System Tests"
|
||||
else:
|
||||
category = "Other"
|
||||
|
||||
if category not in by_category:
|
||||
by_category[category] = {"total": 0, "passed": 0, "failed": 0}
|
||||
by_category[category]["total"] += 1
|
||||
if result.outcome == "passed":
|
||||
by_category[category]["passed"] += 1
|
||||
elif result.outcome in ("failed", "error"):
|
||||
by_category[category]["failed"] += 1
|
||||
|
||||
# Get top failing tests (across recent runs)
|
||||
top_failing = (
|
||||
db.query(
|
||||
TestResult.test_name,
|
||||
TestResult.test_file,
|
||||
func.count(TestResult.id).label("failure_count")
|
||||
)
|
||||
.filter(TestResult.outcome.in_(["failed", "error"]))
|
||||
.group_by(TestResult.test_name, TestResult.test_file)
|
||||
.order_by(desc("failure_count"))
|
||||
.limit(10)
|
||||
.all()
|
||||
)
|
||||
|
||||
return {
|
||||
# Current run stats
|
||||
"total_tests": latest_run.total_tests if latest_run else 0,
|
||||
"passed": latest_run.passed if latest_run else 0,
|
||||
"failed": latest_run.failed if latest_run else 0,
|
||||
"errors": latest_run.errors if latest_run else 0,
|
||||
"skipped": latest_run.skipped if latest_run else 0,
|
||||
"pass_rate": round(latest_run.pass_rate, 1) if latest_run else 0,
|
||||
"duration_seconds": round(latest_run.duration_seconds, 2) if latest_run else 0,
|
||||
"coverage_percent": latest_run.coverage_percent if latest_run else None,
|
||||
"last_run": latest_run.timestamp.isoformat() if latest_run else None,
|
||||
"last_run_status": latest_run.status if latest_run else None,
|
||||
|
||||
# Collection stats
|
||||
"total_test_files": collection.total_files if collection else 0,
|
||||
|
||||
# Trend data
|
||||
"trend": [
|
||||
{
|
||||
"timestamp": run.timestamp.isoformat(),
|
||||
"total": run.total_tests,
|
||||
"passed": run.passed,
|
||||
"failed": run.failed,
|
||||
"pass_rate": round(run.pass_rate, 1),
|
||||
"duration": round(run.duration_seconds, 1),
|
||||
}
|
||||
for run in reversed(trend_runs)
|
||||
],
|
||||
|
||||
# By category
|
||||
"by_category": by_category,
|
||||
|
||||
# Top failing tests
|
||||
"top_failing": [
|
||||
{
|
||||
"test_name": t.test_name,
|
||||
"test_file": t.test_file,
|
||||
"failure_count": t.failure_count,
|
||||
}
|
||||
for t in top_failing
|
||||
],
|
||||
}
|
||||
|
||||
def collect_tests(self, db: Session) -> TestCollection:
|
||||
"""Collect test information without running tests"""
|
||||
collection = TestCollection(
|
||||
collected_at=datetime.now(UTC),
|
||||
)
|
||||
|
||||
try:
|
||||
# Run pytest --collect-only
|
||||
result = subprocess.run(
|
||||
["python", "-m", "pytest", "--collect-only", "-q", "tests"],
|
||||
cwd=str(self.project_root),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60,
|
||||
)
|
||||
|
||||
# Parse output
|
||||
lines = result.stdout.strip().split('\n')
|
||||
test_files = {}
|
||||
|
||||
for line in lines:
|
||||
if "::" in line:
|
||||
file_path = line.split("::")[0]
|
||||
if file_path not in test_files:
|
||||
test_files[file_path] = 0
|
||||
test_files[file_path] += 1
|
||||
|
||||
# Count by category
|
||||
for file_path, count in test_files.items():
|
||||
collection.total_tests += count
|
||||
collection.total_files += 1
|
||||
|
||||
if "unit" in file_path:
|
||||
collection.unit_tests += count
|
||||
elif "integration" in file_path:
|
||||
collection.integration_tests += count
|
||||
elif "performance" in file_path:
|
||||
collection.performance_tests += count
|
||||
elif "system" in file_path:
|
||||
collection.system_tests += count
|
||||
|
||||
collection.test_files = [
|
||||
{"file": f, "count": c}
|
||||
for f, c in sorted(test_files.items(), key=lambda x: -x[1])
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error collecting tests: {e}")
|
||||
|
||||
db.add(collection)
|
||||
return collection
|
||||
|
||||
|
||||
# Singleton instance
|
||||
test_runner_service = TestRunnerService()
|
||||
348
app/templates/admin/testing-dashboard.html
Normal file
348
app/templates/admin/testing-dashboard.html
Normal file
@@ -0,0 +1,348 @@
|
||||
{# app/templates/admin/testing-dashboard.html #}
|
||||
{% extends "admin/base.html" %}
|
||||
{% from 'shared/macros/alerts.html' import loading_state, error_state, alert_dynamic %}
|
||||
{% from 'shared/macros/headers.html' import page_header_flex, refresh_button, action_button %}
|
||||
|
||||
{% block title %}Testing Dashboard{% endblock %}
|
||||
|
||||
{% block alpine_data %}testingDashboard(){% endblock %}
|
||||
|
||||
{% block extra_scripts %}
|
||||
<script src="/static/admin/js/testing-dashboard.js"></script>
|
||||
{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
{% call page_header_flex(title='Testing Dashboard', subtitle='pytest results and test coverage') %}
|
||||
{{ refresh_button(variant='secondary') }}
|
||||
{{ action_button('Run Tests', 'Running...', 'running', 'runTests()', icon='play') }}
|
||||
{% endcall %}
|
||||
|
||||
{{ loading_state('Loading test results...') }}
|
||||
|
||||
{{ error_state('Error loading test results') }}
|
||||
|
||||
{{ alert_dynamic(type='success', message_var='successMessage', show_condition='successMessage') }}
|
||||
|
||||
<!-- Dashboard Content -->
|
||||
<div x-show="!loading && !error">
|
||||
<!-- Stats Cards Row 1 - Main Metrics -->
|
||||
<div class="grid gap-6 mb-8 md:grid-cols-2 xl:grid-cols-4">
|
||||
<!-- Card: Total Tests -->
|
||||
<div class="flex items-center p-4 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<div class="p-3 mr-4 text-blue-500 bg-blue-100 rounded-full dark:text-blue-100 dark:bg-blue-500">
|
||||
<span x-html="$icon('beaker', 'w-5 h-5')"></span>
|
||||
</div>
|
||||
<div>
|
||||
<p class="mb-2 text-sm font-medium text-gray-600 dark:text-gray-400">
|
||||
Total Tests
|
||||
</p>
|
||||
<p class="text-lg font-semibold text-gray-700 dark:text-gray-200" x-text="stats.total_tests">
|
||||
0
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Card: Passed -->
|
||||
<div class="flex items-center p-4 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<div class="p-3 mr-4 text-green-500 bg-green-100 rounded-full dark:text-green-100 dark:bg-green-500">
|
||||
<span x-html="$icon('check-circle', 'w-5 h-5')"></span>
|
||||
</div>
|
||||
<div>
|
||||
<p class="mb-2 text-sm font-medium text-gray-600 dark:text-gray-400">
|
||||
Passed
|
||||
</p>
|
||||
<p class="text-lg font-semibold text-gray-700 dark:text-gray-200" x-text="stats.passed">
|
||||
0
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Card: Failed -->
|
||||
<div class="flex items-center p-4 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<div class="p-3 mr-4 text-red-500 bg-red-100 rounded-full dark:text-red-100 dark:bg-red-500">
|
||||
<span x-html="$icon('x-circle', 'w-5 h-5')"></span>
|
||||
</div>
|
||||
<div>
|
||||
<p class="mb-2 text-sm font-medium text-gray-600 dark:text-gray-400">
|
||||
Failed
|
||||
</p>
|
||||
<p class="text-lg font-semibold text-gray-700 dark:text-gray-200" x-text="stats.failed + stats.errors">
|
||||
0
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Card: Pass Rate -->
|
||||
<div class="flex items-center p-4 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<div class="p-3 mr-4 rounded-full"
|
||||
:class="{
|
||||
'text-green-500 bg-green-100 dark:text-green-100 dark:bg-green-500': stats.pass_rate >= 90,
|
||||
'text-yellow-500 bg-yellow-100 dark:text-yellow-100 dark:bg-yellow-500': stats.pass_rate >= 70 && stats.pass_rate < 90,
|
||||
'text-red-500 bg-red-100 dark:text-red-100 dark:bg-red-500': stats.pass_rate < 70
|
||||
}">
|
||||
<span x-html="$icon('chart-bar', 'w-5 h-5')"></span>
|
||||
</div>
|
||||
<div>
|
||||
<p class="mb-2 text-sm font-medium text-gray-600 dark:text-gray-400">
|
||||
Pass Rate
|
||||
</p>
|
||||
<p class="text-lg font-semibold text-gray-700 dark:text-gray-200" x-text="stats.pass_rate + '%'">
|
||||
0%
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Stats Cards Row 2 - Secondary Metrics -->
|
||||
<div class="grid gap-6 mb-8 md:grid-cols-2 xl:grid-cols-4">
|
||||
<!-- Skipped -->
|
||||
<div class="p-4 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<p class="text-sm font-medium text-gray-600 dark:text-gray-400 mb-1">Skipped</p>
|
||||
<p class="text-2xl font-semibold text-yellow-600 dark:text-yellow-400" x-text="stats.skipped">0</p>
|
||||
</div>
|
||||
|
||||
<!-- Duration -->
|
||||
<div class="p-4 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<p class="text-sm font-medium text-gray-600 dark:text-gray-400 mb-1">Duration</p>
|
||||
<p class="text-2xl font-semibold text-gray-700 dark:text-gray-200" x-text="formatDuration(stats.duration_seconds)">0s</p>
|
||||
</div>
|
||||
|
||||
<!-- Coverage -->
|
||||
<div class="p-4 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<p class="text-sm font-medium text-gray-600 dark:text-gray-400 mb-1">Coverage</p>
|
||||
<p class="text-2xl font-semibold text-gray-700 dark:text-gray-200" x-text="stats.coverage_percent ? stats.coverage_percent + '%' : 'N/A'">N/A</p>
|
||||
</div>
|
||||
|
||||
<!-- Last Run Status -->
|
||||
<div class="p-4 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<p class="text-sm font-medium text-gray-600 dark:text-gray-400 mb-1">Status</p>
|
||||
<p class="text-2xl font-semibold"
|
||||
:class="{
|
||||
'text-green-600 dark:text-green-400': stats.last_run_status === 'passed',
|
||||
'text-red-600 dark:text-red-400': stats.last_run_status === 'failed',
|
||||
'text-yellow-600 dark:text-yellow-400': stats.last_run_status === 'running',
|
||||
'text-gray-600 dark:text-gray-400': !stats.last_run_status
|
||||
}"
|
||||
x-text="stats.last_run_status ? stats.last_run_status.toUpperCase() : 'NO RUNS'">
|
||||
NO RUNS
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Trend Chart and Tests by Category -->
|
||||
<div class="grid gap-6 mb-8 md:grid-cols-2">
|
||||
<!-- Trend Chart -->
|
||||
<div class="p-6 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<h4 class="mb-4 text-lg font-semibold text-gray-700 dark:text-gray-200">
|
||||
Pass Rate Trend (Last 10 Runs)
|
||||
</h4>
|
||||
<div class="h-64 flex items-center justify-center text-gray-500 dark:text-gray-400">
|
||||
<template x-if="stats.trend && stats.trend.length > 0">
|
||||
<div class="w-full">
|
||||
<template x-for="(run, idx) in stats.trend" :key="idx">
|
||||
<div class="mb-2">
|
||||
<div class="flex justify-between text-sm mb-1">
|
||||
<span x-text="new Date(run.timestamp).toLocaleDateString()"></span>
|
||||
<span>
|
||||
<span x-text="run.passed"></span>/<span x-text="run.total"></span>
|
||||
(<span x-text="run.pass_rate"></span>%)
|
||||
</span>
|
||||
</div>
|
||||
<div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2">
|
||||
<div class="h-2 rounded-full transition-all duration-300"
|
||||
:class="{
|
||||
'bg-green-500': run.pass_rate >= 90,
|
||||
'bg-yellow-500': run.pass_rate >= 70 && run.pass_rate < 90,
|
||||
'bg-red-500': run.pass_rate < 70
|
||||
}"
|
||||
:style="'width: ' + run.pass_rate + '%'">
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
</template>
|
||||
<template x-if="!stats.trend || stats.trend.length === 0">
|
||||
<div class="text-center">
|
||||
<span x-html="$icon('beaker', 'w-12 h-12 mx-auto mb-2 text-gray-400')"></span>
|
||||
<p>No test runs yet</p>
|
||||
<p class="text-sm">Run tests to see trend data</p>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Tests by Category -->
|
||||
<div class="p-6 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<h4 class="mb-4 text-lg font-semibold text-gray-700 dark:text-gray-200">
|
||||
Tests by Category
|
||||
</h4>
|
||||
<div class="space-y-3">
|
||||
<template x-if="stats.by_category && Object.keys(stats.by_category).length > 0">
|
||||
<template x-for="[category, data] in Object.entries(stats.by_category)" :key="category">
|
||||
<div>
|
||||
<div class="flex justify-between text-sm mb-1">
|
||||
<span class="text-gray-700 dark:text-gray-300" x-text="category"></span>
|
||||
<span class="font-semibold">
|
||||
<span class="text-green-600" x-text="data.passed"></span>
|
||||
/
|
||||
<span x-text="data.total"></span>
|
||||
</span>
|
||||
</div>
|
||||
<div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2">
|
||||
<div class="bg-green-500 h-2 rounded-full"
|
||||
:style="'width: ' + (data.total > 0 ? (data.passed / data.total * 100) : 0) + '%'">
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</template>
|
||||
<template x-if="!stats.by_category || Object.keys(stats.by_category).length === 0">
|
||||
<p class="text-sm text-gray-500 dark:text-gray-400">No category data available</p>
|
||||
</template>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Top Failing Tests -->
|
||||
<div class="mb-8">
|
||||
<div class="p-6 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<h4 class="mb-4 text-lg font-semibold text-gray-700 dark:text-gray-200">
|
||||
Top Failing Tests
|
||||
</h4>
|
||||
<template x-if="stats.top_failing && stats.top_failing.length > 0">
|
||||
<div class="overflow-x-auto">
|
||||
<table class="w-full whitespace-nowrap">
|
||||
<thead>
|
||||
<tr class="text-xs font-semibold tracking-wide text-left text-gray-500 uppercase border-b dark:border-gray-700 bg-gray-50 dark:text-gray-400 dark:bg-gray-800">
|
||||
<th class="px-4 py-3">Test Name</th>
|
||||
<th class="px-4 py-3">File</th>
|
||||
<th class="px-4 py-3 text-right">Failures</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody class="bg-white divide-y dark:divide-gray-700 dark:bg-gray-800">
|
||||
<template x-for="test in stats.top_failing" :key="test.test_name + test.test_file">
|
||||
<tr class="text-gray-700 dark:text-gray-400">
|
||||
<td class="px-4 py-3">
|
||||
<span class="font-semibold" x-text="test.test_name"></span>
|
||||
</td>
|
||||
<td class="px-4 py-3 text-sm text-gray-500 dark:text-gray-400 truncate max-w-xs" x-text="test.test_file"></td>
|
||||
<td class="px-4 py-3 text-right">
|
||||
<span class="px-2 py-1 text-xs font-semibold text-red-700 bg-red-100 rounded-full dark:bg-red-700 dark:text-red-100"
|
||||
x-text="test.failure_count">
|
||||
</span>
|
||||
</td>
|
||||
</tr>
|
||||
</template>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</template>
|
||||
<template x-if="!stats.top_failing || stats.top_failing.length === 0">
|
||||
<div class="text-center py-8 text-gray-500 dark:text-gray-400">
|
||||
<span x-html="$icon('check-circle', 'w-12 h-12 mx-auto mb-2 text-green-500')"></span>
|
||||
<p class="font-medium">No failing tests!</p>
|
||||
<p class="text-sm">All tests are passing</p>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Quick Actions -->
|
||||
<div class="mb-8">
|
||||
<div class="p-6 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<h4 class="mb-4 text-lg font-semibold text-gray-700 dark:text-gray-200">
|
||||
Quick Actions
|
||||
</h4>
|
||||
<div class="flex flex-wrap gap-3">
|
||||
<button @click="runTests('tests/unit')"
|
||||
:disabled="running"
|
||||
class="flex items-center px-4 py-2 text-sm font-medium leading-5 text-white transition-colors duration-150 bg-purple-600 border border-transparent rounded-lg hover:bg-purple-700 focus:outline-none focus:shadow-outline-purple disabled:opacity-50 disabled:cursor-not-allowed">
|
||||
<span x-html="$icon('beaker', 'w-4 h-4 mr-2')"></span>
|
||||
Run Unit Tests
|
||||
</button>
|
||||
<button @click="runTests('tests/integration')"
|
||||
:disabled="running"
|
||||
class="flex items-center px-4 py-2 text-sm font-medium leading-5 text-white transition-colors duration-150 bg-blue-600 border border-transparent rounded-lg hover:bg-blue-700 focus:outline-none disabled:opacity-50 disabled:cursor-not-allowed">
|
||||
<span x-html="$icon('server', 'w-4 h-4 mr-2')"></span>
|
||||
Run Integration Tests
|
||||
</button>
|
||||
<button @click="collectTests()"
|
||||
:disabled="collecting"
|
||||
class="flex items-center px-4 py-2 text-sm font-medium leading-5 text-gray-700 dark:text-gray-300 transition-colors duration-150 bg-white dark:bg-gray-700 border border-gray-300 dark:border-gray-600 rounded-lg hover:bg-gray-50 dark:hover:bg-gray-600 focus:outline-none disabled:opacity-50 disabled:cursor-not-allowed">
|
||||
<span x-html="$icon('collection', 'w-4 h-4 mr-2')"></span>
|
||||
<span x-text="collecting ? 'Collecting...' : 'Collect Tests'"></span>
|
||||
</button>
|
||||
<a href="/admin/testing-hub"
|
||||
class="flex items-center px-4 py-2 text-sm font-medium leading-5 text-gray-700 dark:text-gray-300 transition-colors duration-150 bg-white dark:bg-gray-700 border border-gray-300 dark:border-gray-600 rounded-lg hover:bg-gray-50 dark:hover:bg-gray-600 focus:outline-none">
|
||||
<span x-html="$icon('clipboard-list', 'w-4 h-4 mr-2')"></span>
|
||||
Manual Testing
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Recent Runs -->
|
||||
<div class="mb-8" x-show="runs.length > 0">
|
||||
<div class="p-6 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<h4 class="mb-4 text-lg font-semibold text-gray-700 dark:text-gray-200">
|
||||
Recent Test Runs
|
||||
</h4>
|
||||
<div class="overflow-x-auto">
|
||||
<table class="w-full whitespace-nowrap">
|
||||
<thead>
|
||||
<tr class="text-xs font-semibold tracking-wide text-left text-gray-500 uppercase border-b dark:border-gray-700 bg-gray-50 dark:text-gray-400 dark:bg-gray-800">
|
||||
<th class="px-4 py-3">Time</th>
|
||||
<th class="px-4 py-3">Path</th>
|
||||
<th class="px-4 py-3 text-center">Total</th>
|
||||
<th class="px-4 py-3 text-center">Passed</th>
|
||||
<th class="px-4 py-3 text-center">Failed</th>
|
||||
<th class="px-4 py-3 text-center">Pass Rate</th>
|
||||
<th class="px-4 py-3">Duration</th>
|
||||
<th class="px-4 py-3">Status</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody class="bg-white divide-y dark:divide-gray-700 dark:bg-gray-800">
|
||||
<template x-for="run in runs" :key="run.id">
|
||||
<tr class="text-gray-700 dark:text-gray-400 hover:bg-gray-50 dark:hover:bg-gray-700">
|
||||
<td class="px-4 py-3 text-sm" x-text="new Date(run.timestamp).toLocaleString()"></td>
|
||||
<td class="px-4 py-3 text-sm" x-text="run.test_path || 'tests'"></td>
|
||||
<td class="px-4 py-3 text-center" x-text="run.total_tests"></td>
|
||||
<td class="px-4 py-3 text-center text-green-600" x-text="run.passed"></td>
|
||||
<td class="px-4 py-3 text-center text-red-600" x-text="run.failed + run.errors"></td>
|
||||
<td class="px-4 py-3 text-center">
|
||||
<span class="px-2 py-1 text-xs font-semibold rounded-full"
|
||||
:class="{
|
||||
'text-green-700 bg-green-100 dark:bg-green-700 dark:text-green-100': run.pass_rate >= 90,
|
||||
'text-yellow-700 bg-yellow-100 dark:bg-yellow-700 dark:text-yellow-100': run.pass_rate >= 70 && run.pass_rate < 90,
|
||||
'text-red-700 bg-red-100 dark:bg-red-700 dark:text-red-100': run.pass_rate < 70
|
||||
}"
|
||||
x-text="run.pass_rate.toFixed(1) + '%'">
|
||||
</span>
|
||||
</td>
|
||||
<td class="px-4 py-3 text-sm" x-text="formatDuration(run.duration_seconds)"></td>
|
||||
<td class="px-4 py-3">
|
||||
<span class="px-2 py-1 text-xs font-semibold rounded-full"
|
||||
:class="{
|
||||
'text-green-700 bg-green-100 dark:bg-green-700 dark:text-green-100': run.status === 'passed',
|
||||
'text-red-700 bg-red-100 dark:bg-red-700 dark:text-red-100': run.status === 'failed',
|
||||
'text-yellow-700 bg-yellow-100 dark:bg-yellow-700 dark:text-yellow-100': run.status === 'running',
|
||||
'text-gray-700 bg-gray-100 dark:bg-gray-700 dark:text-gray-100': run.status === 'error'
|
||||
}"
|
||||
x-text="run.status">
|
||||
</span>
|
||||
</td>
|
||||
</tr>
|
||||
</template>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Last Run Info -->
|
||||
<div x-show="stats.last_run" class="text-sm text-gray-600 dark:text-gray-400 text-center">
|
||||
Last run: <span x-text="stats.last_run ? new Date(stats.last_run).toLocaleString() : 'Never'"></span>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
@@ -14,6 +14,7 @@ from .architecture_scan import (
|
||||
ViolationAssignment,
|
||||
ViolationComment,
|
||||
)
|
||||
from .test_run import TestCollection, TestResult, TestRun
|
||||
from .base import Base
|
||||
from .company import Company
|
||||
from .content_page import ContentPage
|
||||
@@ -46,6 +47,10 @@ __all__ = [
|
||||
"ArchitectureViolation",
|
||||
"ViolationAssignment",
|
||||
"ViolationComment",
|
||||
# Test Runs
|
||||
"TestRun",
|
||||
"TestResult",
|
||||
"TestCollection",
|
||||
# Base
|
||||
"Base",
|
||||
# User & Auth
|
||||
|
||||
141
models/database/test_run.py
Normal file
141
models/database/test_run.py
Normal file
@@ -0,0 +1,141 @@
|
||||
"""
|
||||
Test Run Models
|
||||
Database models for tracking pytest test runs and results
|
||||
"""
|
||||
|
||||
from sqlalchemy import (
|
||||
JSON,
|
||||
Boolean,
|
||||
Column,
|
||||
DateTime,
|
||||
Float,
|
||||
ForeignKey,
|
||||
Integer,
|
||||
String,
|
||||
Text,
|
||||
)
|
||||
from sqlalchemy.orm import relationship
|
||||
from sqlalchemy.sql import func
|
||||
|
||||
from app.core.database import Base
|
||||
|
||||
|
||||
class TestRun(Base):
|
||||
"""Represents a single pytest run"""
|
||||
|
||||
__tablename__ = "test_runs"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
timestamp = Column(
|
||||
DateTime(timezone=True), server_default=func.now(), nullable=False, index=True
|
||||
)
|
||||
|
||||
# Test counts
|
||||
total_tests = Column(Integer, default=0)
|
||||
passed = Column(Integer, default=0)
|
||||
failed = Column(Integer, default=0)
|
||||
errors = Column(Integer, default=0)
|
||||
skipped = Column(Integer, default=0)
|
||||
xfailed = Column(Integer, default=0) # Expected failures
|
||||
xpassed = Column(Integer, default=0) # Unexpected passes
|
||||
|
||||
# Coverage info (optional)
|
||||
coverage_percent = Column(Float, nullable=True)
|
||||
|
||||
# Timing
|
||||
duration_seconds = Column(Float, default=0.0)
|
||||
|
||||
# Run metadata
|
||||
triggered_by = Column(String(100)) # 'manual', 'scheduled', 'ci/cd'
|
||||
git_commit_hash = Column(String(40))
|
||||
git_branch = Column(String(100))
|
||||
test_path = Column(String(500)) # Which tests were run (e.g., 'tests/unit')
|
||||
pytest_args = Column(String(500)) # Command line arguments used
|
||||
|
||||
# Status
|
||||
status = Column(String(20), default="running", index=True) # 'running', 'passed', 'failed', 'error'
|
||||
|
||||
# Relationship to test results
|
||||
results = relationship(
|
||||
"TestResult", back_populates="run", cascade="all, delete-orphan"
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<TestRun(id={self.id}, total={self.total_tests}, passed={self.passed}, failed={self.failed})>"
|
||||
|
||||
@property
|
||||
def pass_rate(self) -> float:
|
||||
"""Calculate pass rate as percentage"""
|
||||
if self.total_tests == 0:
|
||||
return 0.0
|
||||
return (self.passed / self.total_tests) * 100
|
||||
|
||||
|
||||
class TestResult(Base):
|
||||
"""Represents a single test result from a pytest run"""
|
||||
|
||||
__tablename__ = "test_results"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
run_id = Column(
|
||||
Integer, ForeignKey("test_runs.id"), nullable=False, index=True
|
||||
)
|
||||
|
||||
# Test identification
|
||||
node_id = Column(String(500), nullable=False, index=True) # e.g., 'tests/unit/test_foo.py::test_bar'
|
||||
test_name = Column(String(200), nullable=False) # e.g., 'test_bar'
|
||||
test_file = Column(String(300), nullable=False) # e.g., 'tests/unit/test_foo.py'
|
||||
test_class = Column(String(200)) # e.g., 'TestFooClass' (optional)
|
||||
|
||||
# Result
|
||||
outcome = Column(String(20), nullable=False, index=True) # 'passed', 'failed', 'error', 'skipped', 'xfailed', 'xpassed'
|
||||
duration_seconds = Column(Float, default=0.0)
|
||||
|
||||
# Failure details (if applicable)
|
||||
error_message = Column(Text)
|
||||
traceback = Column(Text)
|
||||
|
||||
# Test metadata
|
||||
markers = Column(JSON) # List of pytest markers
|
||||
parameters = Column(JSON) # Parametrized test params
|
||||
|
||||
# Timestamps
|
||||
created_at = Column(
|
||||
DateTime(timezone=True), server_default=func.now(), nullable=False
|
||||
)
|
||||
|
||||
# Relationships
|
||||
run = relationship("TestRun", back_populates="results")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<TestResult(id={self.id}, node_id={self.node_id}, outcome={self.outcome})>"
|
||||
|
||||
|
||||
class TestCollection(Base):
|
||||
"""Cached test collection info for quick stats"""
|
||||
|
||||
__tablename__ = "test_collections"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
|
||||
# Collection stats
|
||||
total_tests = Column(Integer, default=0)
|
||||
total_files = Column(Integer, default=0)
|
||||
total_classes = Column(Integer, default=0)
|
||||
|
||||
# By category
|
||||
unit_tests = Column(Integer, default=0)
|
||||
integration_tests = Column(Integer, default=0)
|
||||
performance_tests = Column(Integer, default=0)
|
||||
system_tests = Column(Integer, default=0)
|
||||
|
||||
# Collection data
|
||||
test_files = Column(JSON) # List of test files with counts
|
||||
|
||||
# Timestamps
|
||||
collected_at = Column(
|
||||
DateTime(timezone=True), server_default=func.now(), nullable=False
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<TestCollection(id={self.id}, total={self.total_tests})>"
|
||||
162
static/admin/js/testing-dashboard.js
Normal file
162
static/admin/js/testing-dashboard.js
Normal file
@@ -0,0 +1,162 @@
|
||||
/**
|
||||
* Testing Dashboard Component
|
||||
* Manages the pytest testing dashboard page
|
||||
*/
|
||||
|
||||
// Use centralized logger
|
||||
const testingDashboardLog = window.LogConfig.createLogger('TESTING-DASHBOARD');
|
||||
|
||||
function testingDashboard() {
|
||||
return {
|
||||
// Extend base data
|
||||
...data(),
|
||||
|
||||
// Set current page for navigation
|
||||
currentPage: 'testing',
|
||||
|
||||
// Dashboard-specific data
|
||||
loading: false,
|
||||
running: false,
|
||||
collecting: false,
|
||||
error: null,
|
||||
successMessage: null,
|
||||
|
||||
// Statistics
|
||||
stats: {
|
||||
total_tests: 0,
|
||||
passed: 0,
|
||||
failed: 0,
|
||||
errors: 0,
|
||||
skipped: 0,
|
||||
pass_rate: 0,
|
||||
duration_seconds: 0,
|
||||
coverage_percent: null,
|
||||
last_run: null,
|
||||
last_run_status: null,
|
||||
total_test_files: 0,
|
||||
trend: [],
|
||||
by_category: {},
|
||||
top_failing: []
|
||||
},
|
||||
|
||||
// Recent runs
|
||||
runs: [],
|
||||
|
||||
async init() {
|
||||
testingDashboardLog.info('Initializing testing dashboard');
|
||||
await this.loadStats();
|
||||
await this.loadRuns();
|
||||
},
|
||||
|
||||
async loadStats() {
|
||||
this.loading = true;
|
||||
this.error = null;
|
||||
|
||||
try {
|
||||
const stats = await apiClient.get('/admin/tests/stats');
|
||||
this.stats = stats;
|
||||
testingDashboardLog.info('Stats loaded:', stats);
|
||||
} catch (err) {
|
||||
testingDashboardLog.error('Failed to load stats:', err);
|
||||
this.error = err.message;
|
||||
|
||||
// Redirect to login if unauthorized
|
||||
if (err.message.includes('Unauthorized')) {
|
||||
window.location.href = '/admin/login';
|
||||
}
|
||||
} finally {
|
||||
this.loading = false;
|
||||
}
|
||||
},
|
||||
|
||||
async loadRuns() {
|
||||
try {
|
||||
const runs = await apiClient.get('/admin/tests/runs?limit=10');
|
||||
this.runs = runs;
|
||||
testingDashboardLog.info('Runs loaded:', runs.length);
|
||||
} catch (err) {
|
||||
testingDashboardLog.error('Failed to load runs:', err);
|
||||
// Don't set error - stats are more important
|
||||
}
|
||||
},
|
||||
|
||||
async runTests(testPath = 'tests') {
|
||||
this.running = true;
|
||||
this.error = null;
|
||||
this.successMessage = null;
|
||||
|
||||
testingDashboardLog.info('Running tests:', testPath);
|
||||
|
||||
try {
|
||||
const result = await apiClient.post('/admin/tests/run', {
|
||||
test_path: testPath
|
||||
});
|
||||
|
||||
testingDashboardLog.info('Test run completed:', result);
|
||||
|
||||
// Format success message
|
||||
const status = result.status === 'passed' ? 'All tests passed!' : 'Tests completed with failures.';
|
||||
this.successMessage = `${status} ${result.passed}/${result.total_tests} passed (${result.pass_rate.toFixed(1)}%) in ${this.formatDuration(result.duration_seconds)}`;
|
||||
|
||||
// Reload stats and runs
|
||||
await this.loadStats();
|
||||
await this.loadRuns();
|
||||
|
||||
// Show toast notification
|
||||
Utils.showToast(this.successMessage, result.status === 'passed' ? 'success' : 'warning');
|
||||
|
||||
// Clear success message after 10 seconds
|
||||
setTimeout(() => {
|
||||
this.successMessage = null;
|
||||
}, 10000);
|
||||
} catch (err) {
|
||||
testingDashboardLog.error('Failed to run tests:', err);
|
||||
this.error = err.message;
|
||||
Utils.showToast('Failed to run tests: ' + err.message, 'error');
|
||||
|
||||
// Redirect to login if unauthorized
|
||||
if (err.message.includes('Unauthorized')) {
|
||||
window.location.href = '/admin/login';
|
||||
}
|
||||
} finally {
|
||||
this.running = false;
|
||||
}
|
||||
},
|
||||
|
||||
async collectTests() {
|
||||
this.collecting = true;
|
||||
this.error = null;
|
||||
|
||||
testingDashboardLog.info('Collecting tests');
|
||||
|
||||
try {
|
||||
const result = await apiClient.post('/admin/tests/collect');
|
||||
testingDashboardLog.info('Collection completed:', result);
|
||||
|
||||
Utils.showToast(`Collected ${result.total_tests} tests from ${result.total_files} files`, 'success');
|
||||
|
||||
// Reload stats
|
||||
await this.loadStats();
|
||||
} catch (err) {
|
||||
testingDashboardLog.error('Failed to collect tests:', err);
|
||||
Utils.showToast('Failed to collect tests: ' + err.message, 'error');
|
||||
} finally {
|
||||
this.collecting = false;
|
||||
}
|
||||
},
|
||||
|
||||
async refresh() {
|
||||
await this.loadStats();
|
||||
await this.loadRuns();
|
||||
},
|
||||
|
||||
formatDuration(seconds) {
|
||||
if (seconds === null || seconds === undefined) return 'N/A';
|
||||
if (seconds < 1) return `${Math.round(seconds * 1000)}ms`;
|
||||
if (seconds < 60) return `${seconds.toFixed(1)}s`;
|
||||
const minutes = Math.floor(seconds / 60);
|
||||
const secs = Math.round(seconds % 60);
|
||||
return `${minutes}m ${secs}s`;
|
||||
}
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user