feat: fix test collection and add collection stats display

- Fix collect_tests to use JSON report parsing (was returning 0 tests)
- Add Test Collection panel to testing dashboard showing total tests,
  unit/integration/performance breakdown, and file count
- Reorganize sidebar: create Platform Health section with Testing Hub,
  Code Quality, and Background Tasks
- Keep Developer Tools for Components and Icons only
- Platform Monitoring now contains Import Jobs and Application Logs

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2025-12-12 23:53:23 +01:00
parent 8443cc3e80
commit c60243ceff
6 changed files with 128 additions and 32 deletions

View File

@@ -85,6 +85,12 @@ class TestDashboardStatsResponse(BaseModel):
# Collection stats # Collection stats
total_test_files: int total_test_files: int
collected_tests: int
unit_tests: int
integration_tests: int
performance_tests: int
system_tests: int
last_collected: str | None
# Trend and breakdown data # Trend and breakdown data
trend: list[dict] trend: list[dict]

View File

@@ -375,6 +375,12 @@ class TestRunnerService:
# Collection stats # Collection stats
"total_test_files": collection.total_files if collection else 0, "total_test_files": collection.total_files if collection else 0,
"collected_tests": collection.total_tests if collection else 0,
"unit_tests": collection.unit_tests if collection else 0,
"integration_tests": collection.integration_tests if collection else 0,
"performance_tests": collection.performance_tests if collection else 0,
"system_tests": collection.system_tests if collection else 0,
"last_collected": collection.collected_at.isoformat() if collection else None,
# Trend data # Trend data
"trend": [ "trend": [
@@ -410,47 +416,70 @@ class TestRunnerService:
) )
try: try:
# Run pytest --collect-only # Run pytest --collect-only with JSON report
with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f:
json_report_path = f.name
result = subprocess.run( result = subprocess.run(
["python", "-m", "pytest", "--collect-only", "-q", "tests"], [
"python", "-m", "pytest",
"--collect-only",
"--json-report",
f"--json-report-file={json_report_path}",
"tests"
],
cwd=str(self.project_root), cwd=str(self.project_root),
capture_output=True, capture_output=True,
text=True, text=True,
timeout=60, timeout=120,
) )
# Parse output # Parse JSON report
lines = result.stdout.strip().split('\n') json_path = Path(json_report_path)
test_files = {} if json_path.exists():
with open(json_path) as f:
report = json.load(f)
for line in lines: # Get total from summary
if "::" in line: collection.total_tests = report.get("summary", {}).get("collected", 0)
file_path = line.split("::")[0]
if file_path not in test_files:
test_files[file_path] = 0
test_files[file_path] += 1
# Count by category # Parse collectors to get test files and counts
for file_path, count in test_files.items(): test_files = {}
collection.total_tests += count for collector in report.get("collectors", []):
collection.total_files += 1 for item in collector.get("result", []):
if item.get("type") == "Function":
node_id = item.get("nodeid", "")
if "::" in node_id:
file_path = node_id.split("::")[0]
if file_path not in test_files:
test_files[file_path] = 0
test_files[file_path] += 1
if "unit" in file_path: # Count files and categorize
collection.unit_tests += count for file_path, count in test_files.items():
elif "integration" in file_path: collection.total_files += 1
collection.integration_tests += count
elif "performance" in file_path:
collection.performance_tests += count
elif "system" in file_path:
collection.system_tests += count
collection.test_files = [ if "/unit/" in file_path or file_path.startswith("tests/unit"):
{"file": f, "count": c} collection.unit_tests += count
for f, c in sorted(test_files.items(), key=lambda x: -x[1]) elif "/integration/" in file_path or file_path.startswith("tests/integration"):
] collection.integration_tests += count
elif "/performance/" in file_path or file_path.startswith("tests/performance"):
collection.performance_tests += count
elif "/system/" in file_path or file_path.startswith("tests/system"):
collection.system_tests += count
collection.test_files = [
{"file": f, "count": c}
for f, c in sorted(test_files.items(), key=lambda x: -x[1])
]
# Cleanup
json_path.unlink(missing_ok=True)
logger.info(f"Collected {collection.total_tests} tests from {collection.total_files} files")
except Exception as e: except Exception as e:
logger.error(f"Error collecting tests: {e}") logger.error(f"Error collecting tests: {e}", exc_info=True)
db.add(collection) db.add(collection)
return collection return collection

View File

@@ -96,14 +96,19 @@
{% call section_content('devTools') %} {% call section_content('devTools') %}
{{ menu_item('components', '/admin/components', 'view-grid', 'Components') }} {{ menu_item('components', '/admin/components', 'view-grid', 'Components') }}
{{ menu_item('icons', '/admin/icons', 'photograph', 'Icons') }} {{ menu_item('icons', '/admin/icons', 'photograph', 'Icons') }}
{% endcall %}
<!-- Platform Health Section -->
{{ section_header('Platform Health', 'platformHealth') }}
{% call section_content('platformHealth') %}
{{ menu_item('testing', '/admin/testing', 'beaker', 'Testing Hub') }} {{ menu_item('testing', '/admin/testing', 'beaker', 'Testing Hub') }}
{{ menu_item('code-quality', '/admin/code-quality', 'shield-check', 'Code Quality') }} {{ menu_item('code-quality', '/admin/code-quality', 'shield-check', 'Code Quality') }}
{{ menu_item('background-tasks', '/admin/background-tasks', 'collection', 'Background Tasks') }}
{% endcall %} {% endcall %}
<!-- Platform Monitoring Section --> <!-- Platform Monitoring Section -->
{{ section_header('Platform Monitoring', 'monitoring') }} {{ section_header('Platform Monitoring', 'monitoring') }}
{% call section_content('monitoring') %} {% call section_content('monitoring') %}
{{ menu_item('background-tasks', '/admin/background-tasks', 'collection', 'Background Tasks') }}
{{ menu_item('imports', '/admin/imports', 'cube', 'Import Jobs') }} {{ menu_item('imports', '/admin/imports', 'cube', 'Import Jobs') }}
{{ menu_item('logs', '/admin/logs', 'document-text', 'Application Logs') }} {{ menu_item('logs', '/admin/logs', 'document-text', 'Application Logs') }}
{% endcall %} {% endcall %}

View File

@@ -153,6 +153,53 @@
</div> </div>
</div> </div>
<!-- Test Collection Stats -->
<div class="mb-8 p-6 bg-white rounded-lg shadow-xs dark:bg-gray-800">
<div class="flex items-center justify-between mb-4">
<h4 class="text-lg font-semibold text-gray-700 dark:text-gray-200">
Test Collection
</h4>
<span x-show="stats.last_collected" class="text-xs text-gray-500 dark:text-gray-400">
Last collected: <span x-text="stats.last_collected ? new Date(stats.last_collected).toLocaleString() : 'Never'"></span>
</span>
</div>
<template x-if="stats.collected_tests > 0">
<div class="grid gap-4 md:grid-cols-5">
<!-- Total Collected -->
<div class="text-center p-3 bg-gray-50 dark:bg-gray-700 rounded-lg">
<p class="text-2xl font-bold text-gray-700 dark:text-gray-200" x-text="stats.collected_tests">0</p>
<p class="text-xs text-gray-500 dark:text-gray-400">Total Tests</p>
</div>
<!-- Unit Tests -->
<div class="text-center p-3 bg-blue-50 dark:bg-blue-900/30 rounded-lg">
<p class="text-2xl font-bold text-blue-600 dark:text-blue-400" x-text="stats.unit_tests">0</p>
<p class="text-xs text-gray-500 dark:text-gray-400">Unit</p>
</div>
<!-- Integration Tests -->
<div class="text-center p-3 bg-purple-50 dark:bg-purple-900/30 rounded-lg">
<p class="text-2xl font-bold text-purple-600 dark:text-purple-400" x-text="stats.integration_tests">0</p>
<p class="text-xs text-gray-500 dark:text-gray-400">Integration</p>
</div>
<!-- Performance Tests -->
<div class="text-center p-3 bg-orange-50 dark:bg-orange-900/30 rounded-lg">
<p class="text-2xl font-bold text-orange-600 dark:text-orange-400" x-text="stats.performance_tests">0</p>
<p class="text-xs text-gray-500 dark:text-gray-400">Performance</p>
</div>
<!-- Test Files -->
<div class="text-center p-3 bg-green-50 dark:bg-green-900/30 rounded-lg">
<p class="text-2xl font-bold text-green-600 dark:text-green-400" x-text="stats.total_test_files">0</p>
<p class="text-xs text-gray-500 dark:text-gray-400">Files</p>
</div>
</div>
</template>
<template x-if="stats.collected_tests === 0">
<div class="text-center py-4 text-gray-500 dark:text-gray-400">
<span x-html="$icon('collection', 'w-8 h-8 mx-auto mb-2')"></span>
<p class="text-sm">No collection data. Click "Collect Tests" to discover available tests.</p>
</div>
</template>
</div>
<!-- Trend Chart and Tests by Category --> <!-- Trend Chart and Tests by Category -->
<div class="grid gap-6 mb-8 md:grid-cols-2"> <div class="grid gap-6 mb-8 md:grid-cols-2">
<!-- Trend Chart --> <!-- Trend Chart -->

View File

@@ -31,6 +31,7 @@ function data() {
productCatalog: false, productCatalog: false,
contentMgmt: false, contentMgmt: false,
devTools: false, devTools: false,
platformHealth: false,
monitoring: false, monitoring: false,
settingsSection: false settingsSection: false
}; };
@@ -73,8 +74,10 @@ function data() {
// Developer Tools // Developer Tools
components: 'devTools', components: 'devTools',
icons: 'devTools', icons: 'devTools',
testing: 'devTools', // Platform Health
'code-quality': 'devTools', testing: 'platformHealth',
'code-quality': 'platformHealth',
'background-tasks': 'platformHealth',
// Platform Monitoring // Platform Monitoring
imports: 'monitoring', imports: 'monitoring',
logs: 'monitoring', logs: 'monitoring',

View File

@@ -38,6 +38,12 @@ function testingDashboard() {
last_run: null, last_run: null,
last_run_status: null, last_run_status: null,
total_test_files: 0, total_test_files: 0,
collected_tests: 0,
unit_tests: 0,
integration_tests: 0,
performance_tests: 0,
system_tests: 0,
last_collected: null,
trend: [], trend: [],
by_category: {}, by_category: {},
top_failing: [] top_failing: []