feat: run tests in background with progress polling

Improve the testing dashboard to run pytest in the background:

- Add background task execution using FastAPI's BackgroundTasks
- Create test_runner_tasks.py following existing background task pattern
- API now returns immediately after starting the test run
- Frontend polls for status every 2 seconds until completion
- Show running indicator with elapsed time counter
- Resume polling if user navigates away and returns while tests running
- Tests continue running even if user closes the page

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2025-12-12 23:20:26 +01:00
parent 6e3ae4bebb
commit 0e6c9e3eea
5 changed files with 228 additions and 41 deletions

View File

@@ -3,13 +3,14 @@ Test Runner API Endpoints
RESTful API for running pytest and viewing test results
"""
from fastapi import APIRouter, Depends, Query
from fastapi import APIRouter, BackgroundTasks, Depends, Query
from pydantic import BaseModel, Field
from sqlalchemy.orm import Session
from app.api.deps import get_current_admin_api
from app.core.database import get_db
from app.services.test_runner_service import test_runner_service
from app.tasks.test_runner_tasks import execute_test_run
from models.database.user import User
router = APIRouter()
@@ -96,27 +97,37 @@ class TestDashboardStatsResponse(BaseModel):
@router.post("/run", response_model=TestRunResponse)
async def run_tests(
background_tasks: BackgroundTasks,
request: RunTestsRequest | None = None,
db: Session = Depends(get_db),
current_user: User = Depends(get_current_admin_api),
):
"""
Run pytest and store results
Start a pytest run in the background
Requires admin authentication. Runs pytest on the specified path
and stores results in the database.
Requires admin authentication. Creates a test run record and starts
pytest execution in the background. Returns immediately with the run ID.
Poll GET /runs/{run_id} to check status.
"""
test_path = request.test_path if request else "tests"
extra_args = request.extra_args if request else None
run = test_runner_service.run_tests(
# Create the test run record
run = test_runner_service.create_test_run(
db,
test_path=test_path,
triggered_by=f"manual:{current_user.username}",
extra_args=extra_args,
)
db.commit()
# Start background execution
background_tasks.add_task(
execute_test_run,
run.id,
test_path,
extra_args,
)
return TestRunResponse(
id=run.id,
timestamp=run.timestamp.isoformat(),

View File

@@ -25,6 +25,25 @@ class TestRunnerService:
def __init__(self):
self.project_root = Path(__file__).parent.parent.parent
def create_test_run(
self,
db: Session,
test_path: str = "tests",
triggered_by: str = "manual",
) -> TestRun:
"""Create a test run record without executing tests"""
test_run = TestRun(
timestamp=datetime.now(UTC),
triggered_by=triggered_by,
test_path=test_path,
status="running",
git_commit_hash=self._get_git_commit(),
git_branch=self._get_git_branch(),
)
db.add(test_run)
db.flush()
return test_run
def run_tests(
self,
db: Session,
@@ -33,7 +52,7 @@ class TestRunnerService:
extra_args: list[str] | None = None,
) -> TestRun:
"""
Run pytest and store results in database
Run pytest synchronously and store results in database
Args:
db: Database session
@@ -44,21 +63,19 @@ class TestRunnerService:
Returns:
TestRun object with results
"""
# Create test run record
test_run = TestRun(
timestamp=datetime.now(UTC),
triggered_by=triggered_by,
test_path=test_path,
status="running",
)
db.add(test_run)
db.flush() # Get the ID
test_run = self.create_test_run(db, test_path, triggered_by)
self._execute_tests(db, test_run, test_path, extra_args)
return test_run
def _execute_tests(
self,
db: Session,
test_run: TestRun,
test_path: str,
extra_args: list[str] | None,
) -> None:
"""Execute pytest and update the test run record"""
try:
# Get git info
test_run.git_commit_hash = self._get_git_commit()
test_run.git_branch = self._get_git_branch()
# Build pytest command with JSON output
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
json_report_path = f.name
@@ -66,7 +83,7 @@ class TestRunnerService:
pytest_args = [
"python", "-m", "pytest",
test_path,
f"--json-report",
"--json-report",
f"--json-report-file={json_report_path}",
"-v",
"--tb=short",
@@ -120,8 +137,6 @@ class TestRunnerService:
test_run.status = "error"
logger.error(f"Error running tests: {e}")
return test_run
def _process_json_report(self, db: Session, test_run: TestRun, report: dict):
"""Process pytest-json-report output"""
summary = report.get("summary", {})

View File

@@ -0,0 +1,62 @@
# app/tasks/test_runner_tasks.py
"""Background tasks for test runner."""
import logging
from datetime import UTC, datetime
from app.core.database import SessionLocal
from app.services.test_runner_service import test_runner_service
from models.database.test_run import TestRun
logger = logging.getLogger(__name__)
async def execute_test_run(
run_id: int,
test_path: str = "tests",
extra_args: list[str] | None = None,
):
"""Background task to execute pytest tests.
Args:
run_id: ID of the TestRun record
test_path: Path to tests (relative to project root)
extra_args: Additional pytest arguments
"""
db = SessionLocal()
test_run = None
try:
# Get the test run record
test_run = db.query(TestRun).filter(TestRun.id == run_id).first()
if not test_run:
logger.error(f"Test run {run_id} not found")
return
logger.info(f"Starting test execution: Run {run_id}, Path: {test_path}")
# Execute the tests
test_runner_service._execute_tests(db, test_run, test_path, extra_args)
db.commit()
logger.info(
f"Test run {run_id} completed: "
f"status={test_run.status}, passed={test_run.passed}, "
f"failed={test_run.failed}, duration={test_run.duration_seconds:.1f}s"
)
except Exception as e:
logger.error(f"Test run {run_id} failed: {e}", exc_info=True)
if test_run is not None:
try:
test_run.status = "error"
db.commit()
except Exception as commit_error:
logger.error(f"Failed to update test run status: {commit_error}")
db.rollback()
finally:
if hasattr(db, "close") and callable(db.close):
try:
db.close()
except Exception as close_error:
logger.error(f"Error closing database session: {close_error}")

View File

@@ -23,6 +23,33 @@
{{ alert_dynamic(type='success', message_var='successMessage', show_condition='successMessage') }}
<!-- Running Indicator -->
<div x-show="running" x-cloak class="mb-6">
<div class="p-4 bg-purple-100 dark:bg-purple-900 rounded-lg shadow-xs">
<div class="flex items-center justify-between">
<div class="flex items-center">
<svg class="animate-spin h-5 w-5 text-purple-600 dark:text-purple-400 mr-3" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24">
<circle class="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" stroke-width="4"></circle>
<path class="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path>
</svg>
<div>
<p class="font-semibold text-purple-800 dark:text-purple-200">Running tests...</p>
<p class="text-sm text-purple-600 dark:text-purple-300">Tests are executing in the background. You can leave this page and come back.</p>
</div>
</div>
<div class="text-right">
<p class="text-2xl font-bold text-purple-700 dark:text-purple-300" x-text="formatDuration(elapsedTime)">0s</p>
<p class="text-xs text-purple-600 dark:text-purple-400">elapsed</p>
</div>
</div>
<div class="mt-3">
<div class="w-full bg-purple-200 dark:bg-purple-800 rounded-full h-1.5 overflow-hidden">
<div class="bg-purple-600 dark:bg-purple-400 h-1.5 rounded-full animate-pulse" style="width: 100%"></div>
</div>
</div>
</div>
</div>
<!-- Dashboard Content -->
<div x-show="!loading && !error">
<!-- Stats Cards Row 1 - Main Metrics -->

View File

@@ -20,6 +20,10 @@ function testingDashboard() {
collecting: false,
error: null,
successMessage: null,
activeRunId: null,
pollInterval: null,
elapsedTime: 0,
elapsedTimer: null,
// Statistics
stats: {
@@ -46,6 +50,30 @@ function testingDashboard() {
testingDashboardLog.info('Initializing testing dashboard');
await this.loadStats();
await this.loadRuns();
// Check if there's a running test and resume polling
await this.checkForRunningTests();
},
async checkForRunningTests() {
// Check if there's already a test running
const runningRun = this.runs.find(r => r.status === 'running');
if (runningRun) {
testingDashboardLog.info('Found running test:', runningRun.id);
this.running = true;
this.activeRunId = runningRun.id;
// Calculate elapsed time from when the run started
const startTime = new Date(runningRun.timestamp);
this.elapsedTime = Math.floor((Date.now() - startTime.getTime()) / 1000);
// Start elapsed time counter
this.elapsedTimer = setInterval(() => {
this.elapsedTime++;
}, 1000);
// Start polling for status
this.pollInterval = setInterval(() => this.pollRunStatus(), 2000);
}
},
async loadStats() {
@@ -84,45 +112,89 @@ function testingDashboard() {
this.running = true;
this.error = null;
this.successMessage = null;
this.elapsedTime = 0;
testingDashboardLog.info('Running tests:', testPath);
testingDashboardLog.info('Starting tests:', testPath);
try {
// Start the test run (returns immediately)
const result = await apiClient.post('/admin/tests/run', {
test_path: testPath
});
testingDashboardLog.info('Test run completed:', result);
testingDashboardLog.info('Test run started:', result);
this.activeRunId = result.id;
// Format success message
const status = result.status === 'passed' ? 'All tests passed!' : 'Tests completed with failures.';
this.successMessage = `${status} ${result.passed}/${result.total_tests} passed (${result.pass_rate.toFixed(1)}%) in ${this.formatDuration(result.duration_seconds)}`;
// Start elapsed time counter
this.elapsedTimer = setInterval(() => {
this.elapsedTime++;
}, 1000);
// Reload stats and runs
await this.loadStats();
await this.loadRuns();
// Start polling for status
this.pollInterval = setInterval(() => this.pollRunStatus(), 2000);
// Show toast notification
Utils.showToast(this.successMessage, result.status === 'passed' ? 'success' : 'warning');
Utils.showToast('Test run started...', 'info');
// Clear success message after 10 seconds
setTimeout(() => {
this.successMessage = null;
}, 10000);
} catch (err) {
testingDashboardLog.error('Failed to run tests:', err);
testingDashboardLog.error('Failed to start tests:', err);
this.error = err.message;
Utils.showToast('Failed to run tests: ' + err.message, 'error');
this.running = false;
Utils.showToast('Failed to start tests: ' + err.message, 'error');
// Redirect to login if unauthorized
if (err.message.includes('Unauthorized')) {
window.location.href = '/admin/login';
}
} finally {
this.running = false;
}
},
async pollRunStatus() {
if (!this.activeRunId) return;
try {
const run = await apiClient.get(`/admin/tests/runs/${this.activeRunId}`);
if (run.status !== 'running') {
// Test run completed
this.stopPolling();
testingDashboardLog.info('Test run completed:', run);
// Format success message
const status = run.status === 'passed' ? 'All tests passed!' : 'Tests completed with failures.';
this.successMessage = `${status} ${run.passed}/${run.total_tests} passed (${run.pass_rate.toFixed(1)}%) in ${this.formatDuration(run.duration_seconds)}`;
// Reload stats and runs
await this.loadStats();
await this.loadRuns();
// Show toast notification
Utils.showToast(this.successMessage, run.status === 'passed' ? 'success' : 'warning');
// Clear success message after 10 seconds
setTimeout(() => {
this.successMessage = null;
}, 10000);
}
} catch (err) {
testingDashboardLog.error('Failed to poll run status:', err);
// Don't stop polling on error, might be transient
}
},
stopPolling() {
if (this.pollInterval) {
clearInterval(this.pollInterval);
this.pollInterval = null;
}
if (this.elapsedTimer) {
clearInterval(this.elapsedTimer);
this.elapsedTimer = null;
}
this.running = false;
this.activeRunId = null;
},
async collectTests() {
this.collecting = true;
this.error = null;