refactor: migrate templates and static files to self-contained modules
Templates Migration: - Migrate admin templates to modules (tenancy, billing, monitoring, marketplace, etc.) - Migrate vendor templates to modules (tenancy, billing, orders, messaging, etc.) - Migrate storefront templates to modules (catalog, customers, orders, cart, checkout, cms) - Migrate public templates to modules (billing, marketplace, cms) - Keep shared templates in app/templates/ (base.html, errors/, partials/, macros/) - Migrate letzshop partials to marketplace module Static Files Migration: - Migrate admin JS to modules: tenancy (23 files), core (5 files), monitoring (1 file) - Migrate vendor JS to modules: tenancy (4 files), core (2 files) - Migrate shared JS: vendor-selector.js to core, media-picker.js to cms - Migrate storefront JS: storefront-layout.js to core - Keep framework JS in static/ (api-client, utils, money, icons, log-config, lib/) - Update all template references to use module_static paths Naming Consistency: - Rename static/platform/ to static/public/ - Rename app/templates/platform/ to app/templates/public/ - Update all extends and static references Documentation: - Update module-system.md with shared templates documentation - Update frontend-structure.md with new module JS organization Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -2,14 +2,41 @@
|
||||
"""
|
||||
Monitoring module exceptions.
|
||||
|
||||
Module-specific exceptions for monitoring functionality.
|
||||
This module provides exception classes for monitoring operations including:
|
||||
- Background task tracking
|
||||
- Capacity snapshot management
|
||||
- Code quality/architecture scans
|
||||
"""
|
||||
|
||||
from app.exceptions.base import (
|
||||
BusinessLogicException,
|
||||
ExternalServiceException,
|
||||
ResourceNotFoundException,
|
||||
ValidationException,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# Task exceptions
|
||||
"TaskNotFoundException",
|
||||
# Capacity exceptions
|
||||
"CapacitySnapshotNotFoundException",
|
||||
# General monitoring exceptions
|
||||
"MonitoringServiceException",
|
||||
# Code quality exceptions
|
||||
"ViolationNotFoundException",
|
||||
"ScanNotFoundException",
|
||||
"ScanExecutionException",
|
||||
"ScanTimeoutException",
|
||||
"ScanParseException",
|
||||
"ViolationOperationException",
|
||||
"InvalidViolationStatusException",
|
||||
]
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Task Exceptions
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class TaskNotFoundException(ResourceNotFoundException):
|
||||
"""Raised when a background task is not found."""
|
||||
@@ -22,6 +49,11 @@ class TaskNotFoundException(ResourceNotFoundException):
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Capacity Exceptions
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class CapacitySnapshotNotFoundException(ResourceNotFoundException):
|
||||
"""Raised when a capacity snapshot is not found."""
|
||||
|
||||
@@ -33,6 +65,11 @@ class CapacitySnapshotNotFoundException(ResourceNotFoundException):
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# General Monitoring Exceptions
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class MonitoringServiceException(BusinessLogicException):
|
||||
"""Raised when a monitoring operation fails."""
|
||||
|
||||
@@ -40,11 +77,97 @@ class MonitoringServiceException(BusinessLogicException):
|
||||
super().__init__(
|
||||
message=f"Monitoring operation '{operation}' failed: {reason}",
|
||||
error_code="MONITORING_OPERATION_FAILED",
|
||||
details={"operation": operation, "reason": reason},
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"TaskNotFoundException",
|
||||
"CapacitySnapshotNotFoundException",
|
||||
"MonitoringServiceException",
|
||||
]
|
||||
# =============================================================================
|
||||
# Code Quality Exceptions
|
||||
# =============================================================================
|
||||
|
||||
|
||||
class ViolationNotFoundException(ResourceNotFoundException):
|
||||
"""Raised when a violation is not found."""
|
||||
|
||||
def __init__(self, violation_id: int):
|
||||
super().__init__(
|
||||
resource_type="Violation",
|
||||
identifier=str(violation_id),
|
||||
error_code="VIOLATION_NOT_FOUND",
|
||||
)
|
||||
|
||||
|
||||
class ScanNotFoundException(ResourceNotFoundException):
|
||||
"""Raised when a scan is not found."""
|
||||
|
||||
def __init__(self, scan_id: int):
|
||||
super().__init__(
|
||||
resource_type="Scan",
|
||||
identifier=str(scan_id),
|
||||
error_code="SCAN_NOT_FOUND",
|
||||
)
|
||||
|
||||
|
||||
class ScanExecutionException(ExternalServiceException):
|
||||
"""Raised when architecture scan execution fails."""
|
||||
|
||||
def __init__(self, reason: str):
|
||||
super().__init__(
|
||||
service_name="ArchitectureValidator",
|
||||
message=f"Scan execution failed: {reason}",
|
||||
error_code="SCAN_EXECUTION_FAILED",
|
||||
)
|
||||
|
||||
|
||||
class ScanTimeoutException(ExternalServiceException):
|
||||
"""Raised when architecture scan times out."""
|
||||
|
||||
def __init__(self, timeout_seconds: int = 300):
|
||||
super().__init__(
|
||||
service_name="ArchitectureValidator",
|
||||
message=f"Scan timed out after {timeout_seconds} seconds",
|
||||
error_code="SCAN_TIMEOUT",
|
||||
details={"timeout_seconds": timeout_seconds},
|
||||
)
|
||||
|
||||
|
||||
class ScanParseException(BusinessLogicException):
|
||||
"""Raised when scan results cannot be parsed."""
|
||||
|
||||
def __init__(self, reason: str):
|
||||
super().__init__(
|
||||
message=f"Failed to parse scan results: {reason}",
|
||||
error_code="SCAN_PARSE_FAILED",
|
||||
details={"reason": reason},
|
||||
)
|
||||
|
||||
|
||||
class ViolationOperationException(BusinessLogicException):
|
||||
"""Raised when a violation operation fails."""
|
||||
|
||||
def __init__(self, operation: str, violation_id: int, reason: str):
|
||||
super().__init__(
|
||||
message=f"Failed to {operation} violation {violation_id}: {reason}",
|
||||
error_code="VIOLATION_OPERATION_FAILED",
|
||||
details={
|
||||
"operation": operation,
|
||||
"violation_id": violation_id,
|
||||
"reason": reason,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
class InvalidViolationStatusException(ValidationException):
|
||||
"""Raised when a violation status transition is invalid."""
|
||||
|
||||
def __init__(self, violation_id: int, current_status: str, target_status: str):
|
||||
super().__init__(
|
||||
message=f"Cannot change violation {violation_id} from '{current_status}' to '{target_status}'",
|
||||
field="status",
|
||||
details={
|
||||
"violation_id": violation_id,
|
||||
"current_status": current_status,
|
||||
"target_status": target_status,
|
||||
},
|
||||
)
|
||||
self.error_code = "INVALID_VIOLATION_STATUS"
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
# app/modules/monitoring/routes/admin.py
|
||||
"""
|
||||
Monitoring module admin routes.
|
||||
|
||||
This module wraps the existing admin monitoring routes and adds
|
||||
module-based access control. Routes are re-exported from the
|
||||
original location with the module access dependency.
|
||||
|
||||
Includes:
|
||||
- /logs/* - Application logs
|
||||
- /background-tasks/* - Background task monitoring
|
||||
- /tests/* - Test runner
|
||||
- /code-quality/* - Code quality tools
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
|
||||
from app.api.deps import require_module_access
|
||||
|
||||
# Import original routers (direct import to avoid circular dependency)
|
||||
from app.api.v1.admin.logs import router as logs_original_router
|
||||
from app.api.v1.admin.background_tasks import router as tasks_original_router
|
||||
from app.api.v1.admin.tests import router as tests_original_router
|
||||
from app.api.v1.admin.code_quality import router as code_quality_original_router
|
||||
|
||||
# Create module-aware router for logs
|
||||
admin_router = APIRouter(
|
||||
prefix="/monitoring",
|
||||
dependencies=[Depends(require_module_access("monitoring"))],
|
||||
)
|
||||
|
||||
# Create sub-routers for each component
|
||||
logs_router = APIRouter(prefix="/logs")
|
||||
for route in logs_original_router.routes:
|
||||
logs_router.routes.append(route)
|
||||
|
||||
tasks_router = APIRouter(prefix="/background-tasks")
|
||||
for route in tasks_original_router.routes:
|
||||
tasks_router.routes.append(route)
|
||||
|
||||
tests_router = APIRouter(prefix="/tests")
|
||||
for route in tests_original_router.routes:
|
||||
tests_router.routes.append(route)
|
||||
|
||||
code_quality_router = APIRouter(prefix="/code-quality")
|
||||
for route in code_quality_original_router.routes:
|
||||
code_quality_router.routes.append(route)
|
||||
|
||||
# Include all sub-routers
|
||||
admin_router.include_router(logs_router)
|
||||
admin_router.include_router(tasks_router)
|
||||
admin_router.include_router(tests_router)
|
||||
admin_router.include_router(code_quality_router)
|
||||
@@ -1,4 +1,14 @@
|
||||
# Routes will be migrated here from legacy locations
|
||||
# TODO: Move actual route implementations from app/api/v1/
|
||||
# app/modules/monitoring/routes/api/__init__.py
|
||||
"""
|
||||
Monitoring module API routes.
|
||||
|
||||
__all__ = []
|
||||
Admin routes:
|
||||
- /logs/* - Application log management
|
||||
- /tasks/* - Background tasks monitoring
|
||||
- /tests/* - Test runner
|
||||
- /code-quality/* - Code quality tools
|
||||
"""
|
||||
|
||||
from app.modules.monitoring.routes.api.admin import admin_router
|
||||
|
||||
__all__ = ["admin_router"]
|
||||
|
||||
35
app/modules/monitoring/routes/api/admin.py
Normal file
35
app/modules/monitoring/routes/api/admin.py
Normal file
@@ -0,0 +1,35 @@
|
||||
# app/modules/monitoring/routes/api/admin.py
|
||||
"""
|
||||
Monitoring module admin API routes.
|
||||
|
||||
Aggregates all admin monitoring routes:
|
||||
- /logs/* - Application log management
|
||||
- /tasks/* - Background tasks monitoring
|
||||
- /tests/* - Test runner
|
||||
- /code-quality/* - Code quality tools
|
||||
- /audit/* - Admin audit logging
|
||||
- /platform/* - Platform health and capacity
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
|
||||
from app.api.deps import require_module_access
|
||||
|
||||
from .admin_logs import admin_logs_router
|
||||
from .admin_tasks import admin_tasks_router
|
||||
from .admin_tests import admin_tests_router
|
||||
from .admin_code_quality import admin_code_quality_router
|
||||
from .admin_audit import admin_audit_router
|
||||
from .admin_platform_health import admin_platform_health_router
|
||||
|
||||
admin_router = APIRouter(
|
||||
dependencies=[Depends(require_module_access("monitoring"))],
|
||||
)
|
||||
|
||||
# Aggregate all monitoring admin routes
|
||||
admin_router.include_router(admin_logs_router, tags=["admin-logs"])
|
||||
admin_router.include_router(admin_tasks_router, tags=["admin-tasks"])
|
||||
admin_router.include_router(admin_tests_router, tags=["admin-tests"])
|
||||
admin_router.include_router(admin_code_quality_router, tags=["admin-code-quality"])
|
||||
admin_router.include_router(admin_audit_router, tags=["admin-audit"])
|
||||
admin_router.include_router(admin_platform_health_router, tags=["admin-platform-health"])
|
||||
105
app/modules/monitoring/routes/api/admin_audit.py
Normal file
105
app/modules/monitoring/routes/api/admin_audit.py
Normal file
@@ -0,0 +1,105 @@
|
||||
# app/modules/monitoring/routes/api/admin_audit.py
|
||||
"""
|
||||
Admin audit log endpoints.
|
||||
|
||||
Provides endpoints for:
|
||||
- Viewing audit logs with filtering
|
||||
- Tracking admin actions
|
||||
- Generating audit reports
|
||||
"""
|
||||
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
from fastapi import APIRouter, Depends, Query
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.api.deps import get_current_admin_api
|
||||
from app.core.database import get_db
|
||||
from app.modules.monitoring.services.admin_audit_service import admin_audit_service
|
||||
from models.schema.auth import UserContext
|
||||
from models.schema.admin import (
|
||||
AdminAuditLogFilters,
|
||||
AdminAuditLogListResponse,
|
||||
AdminAuditLogResponse,
|
||||
)
|
||||
|
||||
admin_audit_router = APIRouter(prefix="/audit")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@admin_audit_router.get("/logs", response_model=AdminAuditLogListResponse)
|
||||
def get_audit_logs(
|
||||
admin_user_id: int | None = Query(None, description="Filter by admin user"),
|
||||
action: str | None = Query(None, description="Filter by action type"),
|
||||
target_type: str | None = Query(None, description="Filter by target type"),
|
||||
date_from: datetime | None = Query(None, description="Filter from date"),
|
||||
date_to: datetime | None = Query(None, description="Filter to date"),
|
||||
skip: int = Query(0, ge=0, description="Number of records to skip"),
|
||||
limit: int = Query(100, ge=1, le=1000, description="Maximum records to return"),
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get filtered admin audit logs.
|
||||
|
||||
Returns paginated list of all admin actions with filtering options.
|
||||
Useful for compliance, security audits, and tracking admin activities.
|
||||
"""
|
||||
filters = AdminAuditLogFilters(
|
||||
admin_user_id=admin_user_id,
|
||||
action=action,
|
||||
target_type=target_type,
|
||||
date_from=date_from,
|
||||
date_to=date_to,
|
||||
skip=skip,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
logs = admin_audit_service.get_audit_logs(db, filters)
|
||||
total = admin_audit_service.get_audit_logs_count(db, filters)
|
||||
|
||||
logger.info(f"Admin {current_admin.username} retrieved {len(logs)} audit logs")
|
||||
|
||||
return AdminAuditLogListResponse(logs=logs, total=total, skip=skip, limit=limit)
|
||||
|
||||
|
||||
@admin_audit_router.get("/logs/recent", response_model=list[AdminAuditLogResponse])
|
||||
def get_recent_audit_logs(
|
||||
limit: int = Query(20, ge=1, le=100),
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""Get recent audit logs (last 20 by default)."""
|
||||
filters = AdminAuditLogFilters(limit=limit)
|
||||
return admin_audit_service.get_audit_logs(db, filters)
|
||||
|
||||
|
||||
@admin_audit_router.get("/logs/my-actions", response_model=list[AdminAuditLogResponse])
|
||||
def get_my_actions(
|
||||
limit: int = Query(50, ge=1, le=100),
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""Get audit logs for current admin's actions."""
|
||||
return admin_audit_service.get_recent_actions_by_admin(
|
||||
db=db, admin_user_id=current_admin.id, limit=limit
|
||||
)
|
||||
|
||||
|
||||
@admin_audit_router.get("/logs/target/{target_type}/{target_id}")
|
||||
def get_actions_by_target(
|
||||
target_type: str,
|
||||
target_id: str,
|
||||
limit: int = Query(50, ge=1, le=100),
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get all actions performed on a specific target.
|
||||
|
||||
Useful for tracking the history of a specific vendor, user, or entity.
|
||||
"""
|
||||
return admin_audit_service.get_actions_by_target(
|
||||
db=db, target_type=target_type, target_id=target_id, limit=limit
|
||||
)
|
||||
619
app/modules/monitoring/routes/api/admin_code_quality.py
Normal file
619
app/modules/monitoring/routes/api/admin_code_quality.py
Normal file
@@ -0,0 +1,619 @@
|
||||
# app/modules/monitoring/routes/api/admin_code_quality.py
|
||||
"""
|
||||
Code Quality API Endpoints
|
||||
RESTful API for code quality validation and violation management
|
||||
Supports multiple validator types: architecture, security, performance
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
|
||||
from fastapi import APIRouter, BackgroundTasks, Depends, Query
|
||||
from pydantic import BaseModel, Field
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.api.deps import get_current_admin_api
|
||||
from app.core.database import get_db
|
||||
from app.modules.monitoring.exceptions import ScanNotFoundException, ViolationNotFoundException
|
||||
from app.modules.dev_tools.services.code_quality_service import (
|
||||
VALID_VALIDATOR_TYPES,
|
||||
code_quality_service,
|
||||
)
|
||||
from app.modules.dev_tools.models import ArchitectureScan
|
||||
from models.schema.auth import UserContext
|
||||
from app.modules.analytics.schemas import CodeQualityDashboardStatsResponse
|
||||
|
||||
admin_code_quality_router = APIRouter(prefix="/code-quality")
|
||||
|
||||
|
||||
# Enums and Constants
|
||||
|
||||
|
||||
class ValidatorType(str, Enum):
|
||||
"""Supported validator types"""
|
||||
|
||||
ARCHITECTURE = "architecture"
|
||||
SECURITY = "security"
|
||||
PERFORMANCE = "performance"
|
||||
|
||||
|
||||
# Pydantic Models for API
|
||||
|
||||
|
||||
class ScanResponse(BaseModel):
|
||||
"""Response model for a scan"""
|
||||
|
||||
id: int
|
||||
timestamp: str
|
||||
validator_type: str
|
||||
status: str
|
||||
started_at: str | None
|
||||
completed_at: str | None
|
||||
progress_message: str | None
|
||||
total_files: int
|
||||
total_violations: int
|
||||
errors: int
|
||||
warnings: int
|
||||
duration_seconds: float
|
||||
triggered_by: str | None
|
||||
git_commit_hash: str | None
|
||||
error_message: str | None = None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class ScanRequest(BaseModel):
|
||||
"""Request model for triggering scans"""
|
||||
|
||||
validator_types: list[ValidatorType] = Field(
|
||||
default=[ValidatorType.ARCHITECTURE, ValidatorType.SECURITY, ValidatorType.PERFORMANCE],
|
||||
description="Validator types to run",
|
||||
)
|
||||
|
||||
|
||||
class ScanJobResponse(BaseModel):
|
||||
"""Response model for a queued scan job"""
|
||||
|
||||
id: int
|
||||
validator_type: str
|
||||
status: str
|
||||
message: str
|
||||
|
||||
|
||||
class MultiScanJobResponse(BaseModel):
|
||||
"""Response model for multiple queued scans (background task pattern)"""
|
||||
|
||||
scans: list[ScanJobResponse]
|
||||
message: str
|
||||
status_url: str
|
||||
|
||||
|
||||
class MultiScanResponse(BaseModel):
|
||||
"""Response model for completed scans (legacy sync pattern)"""
|
||||
|
||||
scans: list[ScanResponse]
|
||||
total_violations: int
|
||||
total_errors: int
|
||||
total_warnings: int
|
||||
|
||||
|
||||
class ViolationResponse(BaseModel):
|
||||
"""Response model for a violation"""
|
||||
|
||||
id: int
|
||||
scan_id: int
|
||||
validator_type: str
|
||||
rule_id: str
|
||||
rule_name: str
|
||||
severity: str
|
||||
file_path: str
|
||||
line_number: int
|
||||
message: str
|
||||
context: str | None
|
||||
suggestion: str | None
|
||||
status: str
|
||||
assigned_to: int | None
|
||||
resolved_at: str | None
|
||||
resolved_by: int | None
|
||||
resolution_note: str | None
|
||||
created_at: str
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class ViolationListResponse(BaseModel):
|
||||
"""Response model for paginated violations list"""
|
||||
|
||||
violations: list[ViolationResponse]
|
||||
total: int
|
||||
page: int
|
||||
page_size: int
|
||||
total_pages: int
|
||||
|
||||
|
||||
class ViolationDetailResponse(ViolationResponse):
|
||||
"""Response model for single violation with relationships"""
|
||||
|
||||
assignments: list = []
|
||||
comments: list = []
|
||||
|
||||
|
||||
class AssignViolationRequest(BaseModel):
|
||||
"""Request model for assigning a violation"""
|
||||
|
||||
user_id: int = Field(..., description="User ID to assign to")
|
||||
due_date: datetime | None = Field(None, description="Due date for resolution")
|
||||
priority: str = Field(
|
||||
"medium", description="Priority level (low, medium, high, critical)"
|
||||
)
|
||||
|
||||
|
||||
class ResolveViolationRequest(BaseModel):
|
||||
"""Request model for resolving a violation"""
|
||||
|
||||
resolution_note: str = Field(..., description="Note about the resolution")
|
||||
|
||||
|
||||
class IgnoreViolationRequest(BaseModel):
|
||||
"""Request model for ignoring a violation"""
|
||||
|
||||
reason: str = Field(..., description="Reason for ignoring")
|
||||
|
||||
|
||||
class AddCommentRequest(BaseModel):
|
||||
"""Request model for adding a comment"""
|
||||
|
||||
comment: str = Field(..., min_length=1, description="Comment text")
|
||||
|
||||
|
||||
# API Endpoints
|
||||
|
||||
|
||||
def _scan_to_response(scan: ArchitectureScan) -> ScanResponse:
|
||||
"""Convert ArchitectureScan to ScanResponse."""
|
||||
return ScanResponse(
|
||||
id=scan.id,
|
||||
timestamp=scan.timestamp.isoformat() if scan.timestamp else None,
|
||||
validator_type=scan.validator_type,
|
||||
status=scan.status,
|
||||
started_at=scan.started_at.isoformat() if scan.started_at else None,
|
||||
completed_at=scan.completed_at.isoformat() if scan.completed_at else None,
|
||||
progress_message=scan.progress_message,
|
||||
total_files=scan.total_files or 0,
|
||||
total_violations=scan.total_violations or 0,
|
||||
errors=scan.errors or 0,
|
||||
warnings=scan.warnings or 0,
|
||||
duration_seconds=scan.duration_seconds or 0.0,
|
||||
triggered_by=scan.triggered_by,
|
||||
git_commit_hash=scan.git_commit_hash,
|
||||
error_message=scan.error_message,
|
||||
)
|
||||
|
||||
|
||||
@admin_code_quality_router.post("/scan", response_model=MultiScanJobResponse, status_code=202)
|
||||
async def trigger_scan(
|
||||
request: ScanRequest = None,
|
||||
background_tasks: BackgroundTasks = None,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Trigger code quality scan(s) as background tasks.
|
||||
|
||||
By default runs all validators. Specify validator_types to run specific validators.
|
||||
Returns immediately with job IDs. Poll /scan/{scan_id}/status for progress.
|
||||
|
||||
Scans run asynchronously - users can browse other pages while scans execute.
|
||||
"""
|
||||
if request is None:
|
||||
request = ScanRequest()
|
||||
|
||||
scan_jobs = []
|
||||
triggered_by = f"manual:{current_user.username}"
|
||||
|
||||
# Import dispatcher for Celery support
|
||||
from app.tasks.dispatcher import task_dispatcher
|
||||
|
||||
for vtype in request.validator_types:
|
||||
# Create scan record with pending status via service
|
||||
scan = code_quality_service.create_pending_scan(
|
||||
db, validator_type=vtype.value, triggered_by=triggered_by
|
||||
)
|
||||
|
||||
# Dispatch via task dispatcher (supports Celery or BackgroundTasks)
|
||||
celery_task_id = task_dispatcher.dispatch_code_quality_scan(
|
||||
background_tasks=background_tasks,
|
||||
scan_id=scan.id,
|
||||
)
|
||||
|
||||
# Store Celery task ID if using Celery
|
||||
if celery_task_id:
|
||||
scan.celery_task_id = celery_task_id
|
||||
|
||||
scan_jobs.append(
|
||||
ScanJobResponse(
|
||||
id=scan.id,
|
||||
validator_type=vtype.value,
|
||||
status="pending",
|
||||
message=f"{vtype.value.capitalize()} scan queued",
|
||||
)
|
||||
)
|
||||
|
||||
db.commit()
|
||||
|
||||
validator_names = ", ".join(vtype.value for vtype in request.validator_types)
|
||||
return MultiScanJobResponse(
|
||||
scans=scan_jobs,
|
||||
message=f"Scans queued for: {validator_names}",
|
||||
status_url="/admin/code-quality/scans/running",
|
||||
)
|
||||
|
||||
|
||||
@admin_code_quality_router.get("/scans/{scan_id}/status", response_model=ScanResponse)
|
||||
async def get_scan_status(
|
||||
scan_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get status of a specific scan.
|
||||
|
||||
Use this endpoint to poll for scan completion.
|
||||
"""
|
||||
scan = code_quality_service.get_scan_by_id(db, scan_id)
|
||||
if not scan:
|
||||
raise ScanNotFoundException(scan_id)
|
||||
|
||||
return _scan_to_response(scan)
|
||||
|
||||
|
||||
@admin_code_quality_router.get("/scans/running", response_model=list[ScanResponse])
|
||||
async def get_running_scans(
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get all currently running scans.
|
||||
|
||||
Returns scans with status 'pending' or 'running'.
|
||||
"""
|
||||
scans = code_quality_service.get_running_scans(db)
|
||||
return [_scan_to_response(scan) for scan in scans]
|
||||
|
||||
|
||||
@admin_code_quality_router.get("/scans", response_model=list[ScanResponse])
|
||||
async def list_scans(
|
||||
limit: int = Query(30, ge=1, le=100, description="Number of scans to return"),
|
||||
validator_type: ValidatorType | None = Query(
|
||||
None, description="Filter by validator type"
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get scan history
|
||||
|
||||
Returns recent scans for trend analysis.
|
||||
Optionally filter by validator type.
|
||||
"""
|
||||
scans = code_quality_service.get_scan_history(
|
||||
db, limit=limit, validator_type=validator_type.value if validator_type else None
|
||||
)
|
||||
|
||||
return [_scan_to_response(scan) for scan in scans]
|
||||
|
||||
|
||||
@admin_code_quality_router.get("/violations", response_model=ViolationListResponse)
|
||||
async def list_violations(
|
||||
scan_id: int | None = Query(
|
||||
None, description="Filter by scan ID (defaults to latest)"
|
||||
),
|
||||
validator_type: ValidatorType | None = Query(
|
||||
None, description="Filter by validator type"
|
||||
),
|
||||
severity: str | None = Query(
|
||||
None, description="Filter by severity (error, warning, info)"
|
||||
),
|
||||
status: str | None = Query(
|
||||
None, description="Filter by status (open, assigned, resolved, ignored)"
|
||||
),
|
||||
rule_id: str | None = Query(None, description="Filter by rule ID"),
|
||||
file_path: str | None = Query(
|
||||
None, description="Filter by file path (partial match)"
|
||||
),
|
||||
page: int = Query(1, ge=1, description="Page number"),
|
||||
page_size: int = Query(50, ge=1, le=200, description="Items per page"),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get violations with filtering and pagination
|
||||
|
||||
Returns violations from latest scan(s) by default.
|
||||
Filter by validator_type to get violations from a specific validator.
|
||||
"""
|
||||
offset = (page - 1) * page_size
|
||||
|
||||
violations, total = code_quality_service.get_violations(
|
||||
db,
|
||||
scan_id=scan_id,
|
||||
validator_type=validator_type.value if validator_type else None,
|
||||
severity=severity,
|
||||
status=status,
|
||||
rule_id=rule_id,
|
||||
file_path=file_path,
|
||||
limit=page_size,
|
||||
offset=offset,
|
||||
)
|
||||
|
||||
total_pages = (total + page_size - 1) // page_size
|
||||
|
||||
return ViolationListResponse(
|
||||
violations=[
|
||||
ViolationResponse(
|
||||
id=v.id,
|
||||
scan_id=v.scan_id,
|
||||
validator_type=v.validator_type,
|
||||
rule_id=v.rule_id,
|
||||
rule_name=v.rule_name,
|
||||
severity=v.severity,
|
||||
file_path=v.file_path,
|
||||
line_number=v.line_number,
|
||||
message=v.message,
|
||||
context=v.context,
|
||||
suggestion=v.suggestion,
|
||||
status=v.status,
|
||||
assigned_to=v.assigned_to,
|
||||
resolved_at=v.resolved_at.isoformat() if v.resolved_at else None,
|
||||
resolved_by=v.resolved_by,
|
||||
resolution_note=v.resolution_note,
|
||||
created_at=v.created_at.isoformat(),
|
||||
)
|
||||
for v in violations
|
||||
],
|
||||
total=total,
|
||||
page=page,
|
||||
page_size=page_size,
|
||||
total_pages=total_pages,
|
||||
)
|
||||
|
||||
|
||||
@admin_code_quality_router.get("/violations/{violation_id}", response_model=ViolationDetailResponse)
|
||||
async def get_violation(
|
||||
violation_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get single violation with details
|
||||
|
||||
Includes assignments and comments.
|
||||
"""
|
||||
violation = code_quality_service.get_violation_by_id(db, violation_id)
|
||||
|
||||
if not violation:
|
||||
raise ViolationNotFoundException(violation_id)
|
||||
|
||||
# Format assignments
|
||||
assignments = [
|
||||
{
|
||||
"id": a.id,
|
||||
"user_id": a.user_id,
|
||||
"assigned_at": a.assigned_at.isoformat(),
|
||||
"assigned_by": a.assigned_by,
|
||||
"due_date": a.due_date.isoformat() if a.due_date else None,
|
||||
"priority": a.priority,
|
||||
}
|
||||
for a in violation.assignments
|
||||
]
|
||||
|
||||
# Format comments
|
||||
comments = [
|
||||
{
|
||||
"id": c.id,
|
||||
"user_id": c.user_id,
|
||||
"comment": c.comment,
|
||||
"created_at": c.created_at.isoformat(),
|
||||
}
|
||||
for c in violation.comments
|
||||
]
|
||||
|
||||
return ViolationDetailResponse(
|
||||
id=violation.id,
|
||||
scan_id=violation.scan_id,
|
||||
validator_type=violation.validator_type,
|
||||
rule_id=violation.rule_id,
|
||||
rule_name=violation.rule_name,
|
||||
severity=violation.severity,
|
||||
file_path=violation.file_path,
|
||||
line_number=violation.line_number,
|
||||
message=violation.message,
|
||||
context=violation.context,
|
||||
suggestion=violation.suggestion,
|
||||
status=violation.status,
|
||||
assigned_to=violation.assigned_to,
|
||||
resolved_at=(
|
||||
violation.resolved_at.isoformat() if violation.resolved_at else None
|
||||
),
|
||||
resolved_by=violation.resolved_by,
|
||||
resolution_note=violation.resolution_note,
|
||||
created_at=violation.created_at.isoformat(),
|
||||
assignments=assignments,
|
||||
comments=comments,
|
||||
)
|
||||
|
||||
|
||||
@admin_code_quality_router.post("/violations/{violation_id}/assign")
|
||||
async def assign_violation(
|
||||
violation_id: int,
|
||||
request: AssignViolationRequest,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Assign violation to a developer
|
||||
|
||||
Updates violation status to 'assigned'.
|
||||
"""
|
||||
assignment = code_quality_service.assign_violation(
|
||||
db,
|
||||
violation_id=violation_id,
|
||||
user_id=request.user_id,
|
||||
assigned_by=current_user.id,
|
||||
due_date=request.due_date,
|
||||
priority=request.priority,
|
||||
)
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"id": assignment.id,
|
||||
"violation_id": assignment.violation_id,
|
||||
"user_id": assignment.user_id,
|
||||
"assigned_at": assignment.assigned_at.isoformat(),
|
||||
"assigned_by": assignment.assigned_by,
|
||||
"due_date": (assignment.due_date.isoformat() if assignment.due_date else None),
|
||||
"priority": assignment.priority,
|
||||
}
|
||||
|
||||
|
||||
@admin_code_quality_router.post("/violations/{violation_id}/resolve")
|
||||
async def resolve_violation(
|
||||
violation_id: int,
|
||||
request: ResolveViolationRequest,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Mark violation as resolved
|
||||
|
||||
Records resolution timestamp and user.
|
||||
ViolationNotFoundException bubbles up if violation doesn't exist.
|
||||
"""
|
||||
violation = code_quality_service.resolve_violation(
|
||||
db,
|
||||
violation_id=violation_id,
|
||||
resolved_by=current_user.id,
|
||||
resolution_note=request.resolution_note,
|
||||
)
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"id": violation.id,
|
||||
"status": violation.status,
|
||||
"resolved_at": (
|
||||
violation.resolved_at.isoformat() if violation.resolved_at else None
|
||||
),
|
||||
"resolved_by": violation.resolved_by,
|
||||
"resolution_note": violation.resolution_note,
|
||||
}
|
||||
|
||||
|
||||
@admin_code_quality_router.post("/violations/{violation_id}/ignore")
|
||||
async def ignore_violation(
|
||||
violation_id: int,
|
||||
request: IgnoreViolationRequest,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Mark violation as ignored (won't fix)
|
||||
|
||||
Records reason for ignoring.
|
||||
ViolationNotFoundException bubbles up if violation doesn't exist.
|
||||
"""
|
||||
violation = code_quality_service.ignore_violation(
|
||||
db,
|
||||
violation_id=violation_id,
|
||||
ignored_by=current_user.id,
|
||||
reason=request.reason,
|
||||
)
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"id": violation.id,
|
||||
"status": violation.status,
|
||||
"resolved_at": (
|
||||
violation.resolved_at.isoformat() if violation.resolved_at else None
|
||||
),
|
||||
"resolved_by": violation.resolved_by,
|
||||
"resolution_note": violation.resolution_note,
|
||||
}
|
||||
|
||||
|
||||
@admin_code_quality_router.post("/violations/{violation_id}/comments")
|
||||
async def add_comment(
|
||||
violation_id: int,
|
||||
request: AddCommentRequest,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Add comment to violation
|
||||
|
||||
For team collaboration and discussion.
|
||||
"""
|
||||
comment = code_quality_service.add_comment(
|
||||
db,
|
||||
violation_id=violation_id,
|
||||
user_id=current_user.id,
|
||||
comment=request.comment,
|
||||
)
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"id": comment.id,
|
||||
"violation_id": comment.violation_id,
|
||||
"user_id": comment.user_id,
|
||||
"comment": comment.comment,
|
||||
"created_at": comment.created_at.isoformat(),
|
||||
}
|
||||
|
||||
|
||||
@admin_code_quality_router.get("/stats", response_model=CodeQualityDashboardStatsResponse)
|
||||
async def get_dashboard_stats(
|
||||
validator_type: ValidatorType | None = Query(
|
||||
None, description="Filter by validator type (returns combined stats if not specified)"
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get dashboard statistics
|
||||
|
||||
Returns comprehensive stats for the dashboard including:
|
||||
- Total counts by severity and status
|
||||
- Technical debt score
|
||||
- Trend data (last 7 scans)
|
||||
- Top violating files
|
||||
- Violations by rule and module
|
||||
- Per-validator breakdown
|
||||
|
||||
When validator_type is specified, returns stats for that type only.
|
||||
When not specified, returns combined stats across all validators.
|
||||
"""
|
||||
stats = code_quality_service.get_dashboard_stats(
|
||||
db, validator_type=validator_type.value if validator_type else None
|
||||
)
|
||||
|
||||
return CodeQualityDashboardStatsResponse(**stats)
|
||||
|
||||
|
||||
@admin_code_quality_router.get("/validator-types")
|
||||
async def get_validator_types(
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get list of available validator types
|
||||
|
||||
Returns the supported validator types for filtering.
|
||||
"""
|
||||
return {
|
||||
"validator_types": VALID_VALIDATOR_TYPES,
|
||||
"descriptions": {
|
||||
"architecture": "Architectural patterns and code organization rules",
|
||||
"security": "Security vulnerabilities and best practices",
|
||||
"performance": "Performance issues and optimizations",
|
||||
},
|
||||
}
|
||||
343
app/modules/monitoring/routes/api/admin_logs.py
Normal file
343
app/modules/monitoring/routes/api/admin_logs.py
Normal file
@@ -0,0 +1,343 @@
|
||||
# app/modules/monitoring/routes/api/admin_logs.py
|
||||
"""
|
||||
Log management endpoints for admin.
|
||||
|
||||
Provides endpoints for:
|
||||
- Viewing database logs with filters
|
||||
- Reading file logs
|
||||
- Log statistics
|
||||
- Log settings management
|
||||
- Log cleanup operations
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter, Depends, Query
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.api.deps import get_current_admin_api
|
||||
from app.core.database import get_db
|
||||
from app.core.logging import reload_log_level
|
||||
from app.exceptions import ResourceNotFoundException
|
||||
from app.modules.tenancy.exceptions import ConfirmationRequiredException
|
||||
from app.modules.monitoring.services.admin_audit_service import admin_audit_service
|
||||
from app.modules.core.services.admin_settings_service import admin_settings_service
|
||||
from app.modules.monitoring.services.log_service import log_service
|
||||
from models.schema.auth import UserContext
|
||||
from models.schema.admin import (
|
||||
ApplicationLogFilters,
|
||||
ApplicationLogListResponse,
|
||||
FileLogResponse,
|
||||
LogCleanupResponse,
|
||||
LogDeleteResponse,
|
||||
LogFileListResponse,
|
||||
LogSettingsResponse,
|
||||
LogSettingsUpdate,
|
||||
LogSettingsUpdateResponse,
|
||||
LogStatistics,
|
||||
)
|
||||
|
||||
admin_logs_router = APIRouter(prefix="/logs")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# DATABASE LOGS ENDPOINTS
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@admin_logs_router.get("/database", response_model=ApplicationLogListResponse)
|
||||
def get_database_logs(
|
||||
level: str | None = Query(None, description="Filter by log level"),
|
||||
logger_name: str | None = Query(None, description="Filter by logger name"),
|
||||
module: str | None = Query(None, description="Filter by module"),
|
||||
user_id: int | None = Query(None, description="Filter by user ID"),
|
||||
vendor_id: int | None = Query(None, description="Filter by vendor ID"),
|
||||
search: str | None = Query(None, description="Search in message"),
|
||||
skip: int = Query(0, ge=0),
|
||||
limit: int = Query(100, ge=1, le=1000),
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get logs from database with filtering.
|
||||
|
||||
Supports filtering by level, logger, module, user, vendor, and date range.
|
||||
Returns paginated results.
|
||||
"""
|
||||
filters = ApplicationLogFilters(
|
||||
level=level,
|
||||
logger_name=logger_name,
|
||||
module=module,
|
||||
user_id=user_id,
|
||||
vendor_id=vendor_id,
|
||||
search=search,
|
||||
skip=skip,
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
return log_service.get_database_logs(db, filters)
|
||||
|
||||
|
||||
@admin_logs_router.get("/statistics", response_model=LogStatistics)
|
||||
def get_log_statistics(
|
||||
days: int = Query(7, ge=1, le=90, description="Number of days to analyze"),
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get log statistics for the last N days.
|
||||
|
||||
Returns counts by level, module, and recent critical errors.
|
||||
"""
|
||||
return log_service.get_log_statistics(db, days)
|
||||
|
||||
|
||||
@admin_logs_router.delete("/database/cleanup", response_model=LogCleanupResponse)
|
||||
def cleanup_old_logs(
|
||||
retention_days: int = Query(30, ge=1, le=365),
|
||||
confirm: bool = Query(False, description="Must be true to confirm cleanup"),
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Delete logs older than retention period.
|
||||
|
||||
Requires confirmation parameter.
|
||||
"""
|
||||
if not confirm:
|
||||
raise ConfirmationRequiredException(operation="cleanup_logs")
|
||||
|
||||
deleted_count = log_service.cleanup_old_logs(db, retention_days)
|
||||
|
||||
# Log action
|
||||
admin_audit_service.log_action(
|
||||
db=db,
|
||||
admin_user_id=current_admin.id,
|
||||
action="cleanup_logs",
|
||||
target_type="application_logs",
|
||||
target_id="bulk",
|
||||
details={"retention_days": retention_days, "deleted_count": deleted_count},
|
||||
)
|
||||
|
||||
return LogCleanupResponse(
|
||||
message=f"Deleted {deleted_count} log entries older than {retention_days} days",
|
||||
deleted_count=deleted_count,
|
||||
)
|
||||
|
||||
|
||||
@admin_logs_router.delete("/database/{log_id}", response_model=LogDeleteResponse)
|
||||
def delete_log(
|
||||
log_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""Delete a specific log entry."""
|
||||
message = log_service.delete_log(db, log_id)
|
||||
|
||||
# Log action
|
||||
admin_audit_service.log_action(
|
||||
db=db,
|
||||
admin_user_id=current_admin.id,
|
||||
action="delete_log",
|
||||
target_type="application_log",
|
||||
target_id=str(log_id),
|
||||
details={},
|
||||
)
|
||||
|
||||
return LogDeleteResponse(message=message)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# FILE LOGS ENDPOINTS
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@admin_logs_router.get("/files", response_model=LogFileListResponse)
|
||||
def list_log_files(
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
List all available log files.
|
||||
|
||||
Returns list of log files with size and modification date.
|
||||
"""
|
||||
return LogFileListResponse(files=log_service.list_log_files())
|
||||
|
||||
|
||||
@admin_logs_router.get("/files/{filename}", response_model=FileLogResponse)
|
||||
def get_file_log(
|
||||
filename: str,
|
||||
lines: int = Query(500, ge=1, le=10000, description="Number of lines to read"),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Read log file content.
|
||||
|
||||
Returns the last N lines from the specified log file.
|
||||
"""
|
||||
return log_service.get_file_logs(filename, lines)
|
||||
|
||||
|
||||
@admin_logs_router.get("/files/{filename}/download")
|
||||
def download_log_file(
|
||||
filename: str,
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Download log file.
|
||||
|
||||
Returns the entire log file for download.
|
||||
"""
|
||||
from pathlib import Path
|
||||
|
||||
from fastapi.responses import FileResponse
|
||||
|
||||
from app.core.config import settings
|
||||
|
||||
# Determine log file path
|
||||
log_file_path = settings.log_file
|
||||
if log_file_path:
|
||||
log_file = Path(log_file_path).parent / filename
|
||||
else:
|
||||
log_file = Path("logs") / filename
|
||||
|
||||
if not log_file.exists():
|
||||
raise ResourceNotFoundException(resource_type="LogFile", identifier=filename)
|
||||
|
||||
# Log action
|
||||
from app.core.database import get_db
|
||||
|
||||
db_gen = get_db()
|
||||
db = next(db_gen)
|
||||
try:
|
||||
admin_audit_service.log_action(
|
||||
db=db,
|
||||
admin_user_id=current_admin.id,
|
||||
action="download_log_file",
|
||||
target_type="log_file",
|
||||
target_id=filename,
|
||||
details={"size_bytes": log_file.stat().st_size},
|
||||
)
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
return FileResponse(
|
||||
log_file,
|
||||
media_type="text/plain",
|
||||
filename=filename,
|
||||
headers={"Content-Disposition": f'attachment; filename="{filename}"'},
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# LOG SETTINGS ENDPOINTS
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@admin_logs_router.get("/settings", response_model=LogSettingsResponse)
|
||||
def get_log_settings(
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""Get current log configuration settings."""
|
||||
log_level = admin_settings_service.get_setting_value(db, "log_level", "INFO")
|
||||
max_size_mb = admin_settings_service.get_setting_value(
|
||||
db, "log_file_max_size_mb", 10
|
||||
)
|
||||
backup_count = admin_settings_service.get_setting_value(
|
||||
db, "log_file_backup_count", 5
|
||||
)
|
||||
retention_days = admin_settings_service.get_setting_value(
|
||||
db, "db_log_retention_days", 30
|
||||
)
|
||||
file_enabled = admin_settings_service.get_setting_value(
|
||||
db, "file_logging_enabled", "true"
|
||||
)
|
||||
db_enabled = admin_settings_service.get_setting_value(
|
||||
db, "db_logging_enabled", "true"
|
||||
)
|
||||
|
||||
return LogSettingsResponse(
|
||||
log_level=str(log_level),
|
||||
log_file_max_size_mb=int(max_size_mb),
|
||||
log_file_backup_count=int(backup_count),
|
||||
db_log_retention_days=int(retention_days),
|
||||
file_logging_enabled=str(file_enabled).lower() == "true",
|
||||
db_logging_enabled=str(db_enabled).lower() == "true",
|
||||
)
|
||||
|
||||
|
||||
@admin_logs_router.put("/settings", response_model=LogSettingsUpdateResponse)
|
||||
def update_log_settings(
|
||||
settings_update: LogSettingsUpdate,
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Update log configuration settings.
|
||||
|
||||
Changes are applied immediately without restart (for log level).
|
||||
File rotation settings require restart.
|
||||
"""
|
||||
from models.schema.admin import AdminSettingUpdate
|
||||
|
||||
updated = []
|
||||
|
||||
# Update log level
|
||||
if settings_update.log_level:
|
||||
admin_settings_service.update_setting(
|
||||
db,
|
||||
"log_level",
|
||||
AdminSettingUpdate(value=settings_update.log_level),
|
||||
current_admin.id,
|
||||
)
|
||||
updated.append("log_level")
|
||||
|
||||
# Reload log level immediately
|
||||
reload_log_level()
|
||||
|
||||
# Update file rotation settings
|
||||
if settings_update.log_file_max_size_mb:
|
||||
admin_settings_service.update_setting(
|
||||
db,
|
||||
"log_file_max_size_mb",
|
||||
AdminSettingUpdate(value=str(settings_update.log_file_max_size_mb)),
|
||||
current_admin.id,
|
||||
)
|
||||
updated.append("log_file_max_size_mb")
|
||||
|
||||
if settings_update.log_file_backup_count is not None:
|
||||
admin_settings_service.update_setting(
|
||||
db,
|
||||
"log_file_backup_count",
|
||||
AdminSettingUpdate(value=str(settings_update.log_file_backup_count)),
|
||||
current_admin.id,
|
||||
)
|
||||
updated.append("log_file_backup_count")
|
||||
|
||||
# Update retention
|
||||
if settings_update.db_log_retention_days:
|
||||
admin_settings_service.update_setting(
|
||||
db,
|
||||
"db_log_retention_days",
|
||||
AdminSettingUpdate(value=str(settings_update.db_log_retention_days)),
|
||||
current_admin.id,
|
||||
)
|
||||
updated.append("db_log_retention_days")
|
||||
|
||||
# Log action
|
||||
admin_audit_service.log_action(
|
||||
db=db,
|
||||
admin_user_id=current_admin.id,
|
||||
action="update_log_settings",
|
||||
target_type="settings",
|
||||
target_id="logging",
|
||||
details={"updated_fields": updated},
|
||||
)
|
||||
|
||||
return LogSettingsUpdateResponse(
|
||||
message="Log settings updated successfully",
|
||||
updated_fields=updated,
|
||||
note="Log level changes are applied immediately. File rotation settings require restart.",
|
||||
)
|
||||
214
app/modules/monitoring/routes/api/admin_platform_health.py
Normal file
214
app/modules/monitoring/routes/api/admin_platform_health.py
Normal file
@@ -0,0 +1,214 @@
|
||||
# app/modules/monitoring/routes/api/admin_platform_health.py
|
||||
"""
|
||||
Platform health and capacity monitoring endpoints.
|
||||
|
||||
Provides:
|
||||
- Overall platform health status
|
||||
- Capacity metrics and thresholds
|
||||
- Scaling recommendations
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.api.deps import get_current_admin_api
|
||||
from app.core.database import get_db
|
||||
from app.modules.monitoring.services.platform_health_service import platform_health_service
|
||||
from models.schema.auth import UserContext
|
||||
|
||||
admin_platform_health_router = APIRouter(prefix="/platform")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Schemas
|
||||
# ============================================================================
|
||||
|
||||
|
||||
class SystemMetrics(BaseModel):
|
||||
"""System resource metrics."""
|
||||
|
||||
cpu_percent: float
|
||||
memory_percent: float
|
||||
memory_used_gb: float
|
||||
memory_total_gb: float
|
||||
disk_percent: float
|
||||
disk_used_gb: float
|
||||
disk_total_gb: float
|
||||
|
||||
|
||||
class DatabaseMetrics(BaseModel):
|
||||
"""Database metrics."""
|
||||
|
||||
size_mb: float
|
||||
products_count: int
|
||||
orders_count: int
|
||||
vendors_count: int
|
||||
inventory_count: int
|
||||
|
||||
|
||||
class ImageStorageMetrics(BaseModel):
|
||||
"""Image storage metrics."""
|
||||
|
||||
total_files: int
|
||||
total_size_mb: float
|
||||
total_size_gb: float
|
||||
max_files_per_dir: int
|
||||
products_estimated: int
|
||||
|
||||
|
||||
class CapacityThreshold(BaseModel):
|
||||
"""Capacity threshold status."""
|
||||
|
||||
name: str
|
||||
current: float
|
||||
warning: float
|
||||
critical: float
|
||||
limit: float
|
||||
status: str # ok, warning, critical
|
||||
percent_used: float
|
||||
|
||||
|
||||
class ScalingRecommendation(BaseModel):
|
||||
"""Scaling recommendation."""
|
||||
|
||||
priority: str # info, warning, critical
|
||||
title: str
|
||||
description: str
|
||||
action: str | None = None
|
||||
|
||||
|
||||
class PlatformHealthResponse(BaseModel):
|
||||
"""Complete platform health response."""
|
||||
|
||||
timestamp: str
|
||||
overall_status: str # healthy, degraded, critical
|
||||
system: SystemMetrics
|
||||
database: DatabaseMetrics
|
||||
image_storage: ImageStorageMetrics
|
||||
thresholds: list[CapacityThreshold]
|
||||
recommendations: list[ScalingRecommendation]
|
||||
infrastructure_tier: str
|
||||
next_tier_trigger: str | None = None
|
||||
|
||||
|
||||
class CapacityMetricsResponse(BaseModel):
|
||||
"""Capacity-focused metrics."""
|
||||
|
||||
products_total: int
|
||||
products_by_vendor: dict[str, int]
|
||||
images_total: int
|
||||
storage_used_gb: float
|
||||
database_size_mb: float
|
||||
orders_this_month: int
|
||||
active_vendors: int
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Endpoints
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@admin_platform_health_router.get("/health", response_model=PlatformHealthResponse)
|
||||
async def get_platform_health(
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""Get comprehensive platform health status.
|
||||
|
||||
Returns system metrics, database stats, storage info, and recommendations.
|
||||
"""
|
||||
health_data = platform_health_service.get_full_health_report(db)
|
||||
|
||||
return PlatformHealthResponse(
|
||||
timestamp=health_data["timestamp"],
|
||||
overall_status=health_data["overall_status"],
|
||||
system=SystemMetrics(**health_data["system"]),
|
||||
database=DatabaseMetrics(**health_data["database"]),
|
||||
image_storage=ImageStorageMetrics(**health_data["image_storage"]),
|
||||
thresholds=[CapacityThreshold(**t) for t in health_data["thresholds"]],
|
||||
recommendations=[ScalingRecommendation(**r) for r in health_data["recommendations"]],
|
||||
infrastructure_tier=health_data["infrastructure_tier"],
|
||||
next_tier_trigger=health_data["next_tier_trigger"],
|
||||
)
|
||||
|
||||
|
||||
@admin_platform_health_router.get("/capacity", response_model=CapacityMetricsResponse)
|
||||
async def get_capacity_metrics(
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""Get capacity-focused metrics for planning."""
|
||||
metrics = platform_health_service.get_capacity_metrics(db)
|
||||
return CapacityMetricsResponse(**metrics)
|
||||
|
||||
|
||||
@admin_platform_health_router.get("/subscription-capacity")
|
||||
async def get_subscription_capacity(
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get subscription-based capacity metrics.
|
||||
|
||||
Shows theoretical vs actual capacity based on all vendor subscriptions.
|
||||
"""
|
||||
return platform_health_service.get_subscription_capacity(db)
|
||||
|
||||
|
||||
@admin_platform_health_router.get("/trends")
|
||||
async def get_growth_trends(
|
||||
days: int = 30,
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get growth trends over the specified period.
|
||||
|
||||
Returns growth rates and projections for key metrics.
|
||||
"""
|
||||
from app.modules.billing.services.capacity_forecast_service import capacity_forecast_service
|
||||
|
||||
return capacity_forecast_service.get_growth_trends(db, days=days)
|
||||
|
||||
|
||||
@admin_platform_health_router.get("/recommendations")
|
||||
async def get_scaling_recommendations(
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get scaling recommendations based on current capacity and growth.
|
||||
|
||||
Returns prioritized list of recommendations.
|
||||
"""
|
||||
from app.modules.billing.services.capacity_forecast_service import capacity_forecast_service
|
||||
|
||||
return capacity_forecast_service.get_scaling_recommendations(db)
|
||||
|
||||
|
||||
@admin_platform_health_router.post("/snapshot")
|
||||
async def capture_snapshot(
|
||||
db: Session = Depends(get_db),
|
||||
current_admin: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Manually capture a capacity snapshot.
|
||||
|
||||
Normally run automatically by daily background job.
|
||||
"""
|
||||
from app.modules.billing.services.capacity_forecast_service import capacity_forecast_service
|
||||
|
||||
snapshot = capacity_forecast_service.capture_daily_snapshot(db)
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"id": snapshot.id,
|
||||
"snapshot_date": snapshot.snapshot_date.isoformat(),
|
||||
"total_vendors": snapshot.total_vendors,
|
||||
"total_products": snapshot.total_products,
|
||||
"message": "Snapshot captured successfully",
|
||||
}
|
||||
258
app/modules/monitoring/routes/api/admin_tasks.py
Normal file
258
app/modules/monitoring/routes/api/admin_tasks.py
Normal file
@@ -0,0 +1,258 @@
|
||||
# app/modules/monitoring/routes/api/admin_tasks.py
|
||||
"""
|
||||
Background Tasks Monitoring API
|
||||
Provides unified view of all background tasks across the system
|
||||
"""
|
||||
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from fastapi import APIRouter, Depends, Query
|
||||
from pydantic import BaseModel
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.api.deps import get_current_admin_api
|
||||
from app.core.database import get_db
|
||||
from app.modules.monitoring.services.background_tasks_service import background_tasks_service
|
||||
from models.schema.auth import UserContext
|
||||
|
||||
admin_tasks_router = APIRouter(prefix="/tasks")
|
||||
|
||||
|
||||
class BackgroundTaskResponse(BaseModel):
|
||||
"""Unified background task response"""
|
||||
|
||||
id: int
|
||||
task_type: str # 'import', 'test_run', or 'code_quality_scan'
|
||||
status: str
|
||||
started_at: str | None
|
||||
completed_at: str | None
|
||||
duration_seconds: float | None
|
||||
description: str
|
||||
triggered_by: str | None
|
||||
error_message: str | None
|
||||
details: dict | None
|
||||
celery_task_id: str | None = None # Celery task ID for Flower linking
|
||||
|
||||
|
||||
class BackgroundTasksStatsResponse(BaseModel):
|
||||
"""Statistics for background tasks"""
|
||||
|
||||
total_tasks: int
|
||||
running: int
|
||||
completed: int
|
||||
failed: int
|
||||
tasks_today: int
|
||||
avg_duration_seconds: float | None
|
||||
|
||||
# By type
|
||||
import_jobs: dict
|
||||
test_runs: dict
|
||||
code_quality_scans: dict
|
||||
|
||||
|
||||
def _convert_import_to_response(job) -> BackgroundTaskResponse:
|
||||
"""Convert MarketplaceImportJob to BackgroundTaskResponse"""
|
||||
duration = None
|
||||
if job.started_at and job.completed_at:
|
||||
duration = (job.completed_at - job.started_at).total_seconds()
|
||||
elif job.started_at and job.status == "processing":
|
||||
duration = (datetime.now(UTC) - job.started_at).total_seconds()
|
||||
|
||||
return BackgroundTaskResponse(
|
||||
id=job.id,
|
||||
task_type="import",
|
||||
status=job.status,
|
||||
started_at=job.started_at.isoformat() if job.started_at else None,
|
||||
completed_at=job.completed_at.isoformat() if job.completed_at else None,
|
||||
duration_seconds=duration,
|
||||
description=f"Import from {job.marketplace}: {job.source_url[:50]}..."
|
||||
if len(job.source_url) > 50
|
||||
else f"Import from {job.marketplace}: {job.source_url}",
|
||||
triggered_by=job.user.username if job.user else None,
|
||||
error_message=job.error_message,
|
||||
details={
|
||||
"marketplace": job.marketplace,
|
||||
"vendor_id": job.vendor_id,
|
||||
"imported": job.imported_count,
|
||||
"updated": job.updated_count,
|
||||
"errors": job.error_count,
|
||||
"total_processed": job.total_processed,
|
||||
},
|
||||
celery_task_id=getattr(job, "celery_task_id", None),
|
||||
)
|
||||
|
||||
|
||||
def _convert_test_run_to_response(run) -> BackgroundTaskResponse:
|
||||
"""Convert TestRun to BackgroundTaskResponse"""
|
||||
duration = run.duration_seconds
|
||||
if run.status == "running" and run.timestamp:
|
||||
duration = (datetime.now(UTC) - run.timestamp).total_seconds()
|
||||
|
||||
return BackgroundTaskResponse(
|
||||
id=run.id,
|
||||
task_type="test_run",
|
||||
status=run.status,
|
||||
started_at=run.timestamp.isoformat() if run.timestamp else None,
|
||||
completed_at=None,
|
||||
duration_seconds=duration,
|
||||
description=f"Test run: {run.test_path}",
|
||||
triggered_by=run.triggered_by,
|
||||
error_message=None,
|
||||
details={
|
||||
"test_path": run.test_path,
|
||||
"total_tests": run.total_tests,
|
||||
"passed": run.passed,
|
||||
"failed": run.failed,
|
||||
"errors": run.errors,
|
||||
"pass_rate": run.pass_rate,
|
||||
"git_branch": run.git_branch,
|
||||
},
|
||||
celery_task_id=getattr(run, "celery_task_id", None),
|
||||
)
|
||||
|
||||
|
||||
def _convert_scan_to_response(scan) -> BackgroundTaskResponse:
|
||||
"""Convert ArchitectureScan to BackgroundTaskResponse"""
|
||||
duration = scan.duration_seconds
|
||||
if scan.status in ["pending", "running"] and scan.started_at:
|
||||
duration = (datetime.now(UTC) - scan.started_at).total_seconds()
|
||||
|
||||
# Map validator type to human-readable name
|
||||
validator_names = {
|
||||
"architecture": "Architecture",
|
||||
"security": "Security",
|
||||
"performance": "Performance",
|
||||
}
|
||||
validator_name = validator_names.get(scan.validator_type, scan.validator_type)
|
||||
|
||||
return BackgroundTaskResponse(
|
||||
id=scan.id,
|
||||
task_type="code_quality_scan",
|
||||
status=scan.status,
|
||||
started_at=scan.started_at.isoformat() if scan.started_at else None,
|
||||
completed_at=scan.completed_at.isoformat() if scan.completed_at else None,
|
||||
duration_seconds=duration,
|
||||
description=f"{validator_name} code quality scan",
|
||||
triggered_by=scan.triggered_by,
|
||||
error_message=scan.error_message,
|
||||
details={
|
||||
"validator_type": scan.validator_type,
|
||||
"total_files": scan.total_files,
|
||||
"total_violations": scan.total_violations,
|
||||
"errors": scan.errors,
|
||||
"warnings": scan.warnings,
|
||||
"git_commit_hash": scan.git_commit_hash,
|
||||
"progress_message": scan.progress_message,
|
||||
},
|
||||
celery_task_id=getattr(scan, "celery_task_id", None),
|
||||
)
|
||||
|
||||
|
||||
@admin_tasks_router.get("", response_model=list[BackgroundTaskResponse])
|
||||
async def list_background_tasks(
|
||||
status: str | None = Query(None, description="Filter by status"),
|
||||
task_type: str | None = Query(
|
||||
None, description="Filter by type (import, test_run, code_quality_scan)"
|
||||
),
|
||||
limit: int = Query(50, ge=1, le=200),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
List all background tasks across the system
|
||||
|
||||
Returns a unified view of import jobs, test runs, and code quality scans.
|
||||
"""
|
||||
tasks = []
|
||||
|
||||
# Get import jobs
|
||||
if task_type is None or task_type == "import":
|
||||
import_jobs = background_tasks_service.get_import_jobs(
|
||||
db, status=status, limit=limit
|
||||
)
|
||||
tasks.extend([_convert_import_to_response(job) for job in import_jobs])
|
||||
|
||||
# Get test runs
|
||||
if task_type is None or task_type == "test_run":
|
||||
test_runs = background_tasks_service.get_test_runs(
|
||||
db, status=status, limit=limit
|
||||
)
|
||||
tasks.extend([_convert_test_run_to_response(run) for run in test_runs])
|
||||
|
||||
# Get code quality scans
|
||||
if task_type is None or task_type == "code_quality_scan":
|
||||
scans = background_tasks_service.get_code_quality_scans(
|
||||
db, status=status, limit=limit
|
||||
)
|
||||
tasks.extend([_convert_scan_to_response(scan) for scan in scans])
|
||||
|
||||
# Sort by start time (most recent first)
|
||||
tasks.sort(
|
||||
key=lambda t: t.started_at or "1970-01-01T00:00:00",
|
||||
reverse=True,
|
||||
)
|
||||
|
||||
return tasks[:limit]
|
||||
|
||||
|
||||
@admin_tasks_router.get("/stats", response_model=BackgroundTasksStatsResponse)
|
||||
async def get_background_tasks_stats(
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get statistics for background tasks
|
||||
"""
|
||||
import_stats = background_tasks_service.get_import_stats(db)
|
||||
test_stats = background_tasks_service.get_test_run_stats(db)
|
||||
scan_stats = background_tasks_service.get_scan_stats(db)
|
||||
|
||||
# Combined stats
|
||||
total_running = (
|
||||
import_stats["running"] + test_stats["running"] + scan_stats["running"]
|
||||
)
|
||||
total_completed = (
|
||||
import_stats["completed"] + test_stats["completed"] + scan_stats["completed"]
|
||||
)
|
||||
total_failed = (
|
||||
import_stats["failed"] + test_stats["failed"] + scan_stats["failed"]
|
||||
)
|
||||
total_tasks = import_stats["total"] + test_stats["total"] + scan_stats["total"]
|
||||
tasks_today = import_stats["today"] + test_stats["today"] + scan_stats["today"]
|
||||
|
||||
return BackgroundTasksStatsResponse(
|
||||
total_tasks=total_tasks,
|
||||
running=total_running,
|
||||
completed=total_completed,
|
||||
failed=total_failed,
|
||||
tasks_today=tasks_today,
|
||||
avg_duration_seconds=test_stats.get("avg_duration"),
|
||||
import_jobs=import_stats,
|
||||
test_runs=test_stats,
|
||||
code_quality_scans=scan_stats,
|
||||
)
|
||||
|
||||
|
||||
@admin_tasks_router.get("/running", response_model=list[BackgroundTaskResponse])
|
||||
async def list_running_tasks(
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
List currently running background tasks
|
||||
"""
|
||||
tasks = []
|
||||
|
||||
# Running imports
|
||||
running_imports = background_tasks_service.get_running_imports(db)
|
||||
tasks.extend([_convert_import_to_response(job) for job in running_imports])
|
||||
|
||||
# Running test runs
|
||||
running_tests = background_tasks_service.get_running_test_runs(db)
|
||||
tasks.extend([_convert_test_run_to_response(run) for run in running_tests])
|
||||
|
||||
# Running code quality scans
|
||||
running_scans = background_tasks_service.get_running_scans(db)
|
||||
tasks.extend([_convert_scan_to_response(scan) for scan in running_scans])
|
||||
|
||||
return tasks
|
||||
338
app/modules/monitoring/routes/api/admin_tests.py
Normal file
338
app/modules/monitoring/routes/api/admin_tests.py
Normal file
@@ -0,0 +1,338 @@
|
||||
# app/modules/monitoring/routes/api/admin_tests.py
|
||||
"""
|
||||
Test Runner API Endpoints
|
||||
RESTful API for running pytest and viewing test results
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, BackgroundTasks, Depends, Query
|
||||
from pydantic import BaseModel, Field
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.api.deps import get_current_admin_api
|
||||
from app.core.database import get_db
|
||||
from app.modules.dev_tools.services.test_runner_service import test_runner_service
|
||||
from models.schema.auth import UserContext
|
||||
|
||||
admin_tests_router = APIRouter(prefix="/tests")
|
||||
|
||||
|
||||
# Pydantic Models for API
|
||||
|
||||
|
||||
class TestRunResponse(BaseModel):
|
||||
"""Response model for a test run"""
|
||||
|
||||
id: int
|
||||
timestamp: str
|
||||
total_tests: int
|
||||
passed: int
|
||||
failed: int
|
||||
errors: int
|
||||
skipped: int
|
||||
xfailed: int
|
||||
xpassed: int
|
||||
pass_rate: float
|
||||
duration_seconds: float
|
||||
coverage_percent: float | None
|
||||
triggered_by: str | None
|
||||
git_commit_hash: str | None
|
||||
git_branch: str | None
|
||||
test_path: str | None
|
||||
status: str
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class TestResultResponse(BaseModel):
|
||||
"""Response model for a single test result"""
|
||||
|
||||
id: int
|
||||
node_id: str
|
||||
test_name: str
|
||||
test_file: str
|
||||
test_class: str | None
|
||||
outcome: str
|
||||
duration_seconds: float
|
||||
error_message: str | None
|
||||
traceback: str | None
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
|
||||
|
||||
class RunTestsRequest(BaseModel):
|
||||
"""Request model for running tests"""
|
||||
|
||||
test_path: str = Field("tests", description="Path to tests to run")
|
||||
extra_args: list[str] | None = Field(
|
||||
None, description="Additional pytest arguments"
|
||||
)
|
||||
|
||||
|
||||
class TestDashboardStatsResponse(BaseModel):
|
||||
"""Response model for dashboard statistics"""
|
||||
|
||||
# Current run stats
|
||||
total_tests: int
|
||||
passed: int
|
||||
failed: int
|
||||
errors: int
|
||||
skipped: int
|
||||
pass_rate: float
|
||||
duration_seconds: float
|
||||
coverage_percent: float | None
|
||||
last_run: str | None
|
||||
last_run_status: str | None
|
||||
|
||||
# Collection stats
|
||||
total_test_files: int
|
||||
collected_tests: int
|
||||
unit_tests: int
|
||||
integration_tests: int
|
||||
performance_tests: int
|
||||
system_tests: int
|
||||
last_collected: str | None
|
||||
|
||||
# Trend and breakdown data
|
||||
trend: list[dict]
|
||||
by_category: dict
|
||||
top_failing: list[dict]
|
||||
|
||||
|
||||
# API Endpoints
|
||||
|
||||
|
||||
@admin_tests_router.post("/run", response_model=TestRunResponse)
|
||||
async def run_tests(
|
||||
background_tasks: BackgroundTasks,
|
||||
request: RunTestsRequest | None = None,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Start a pytest run in the background
|
||||
|
||||
Requires admin authentication. Creates a test run record and starts
|
||||
pytest execution in the background. Returns immediately with the run ID.
|
||||
Poll GET /runs/{run_id} to check status.
|
||||
"""
|
||||
test_path = request.test_path if request else "tests"
|
||||
extra_args = request.extra_args if request else None
|
||||
|
||||
# Create the test run record
|
||||
run = test_runner_service.create_test_run(
|
||||
db,
|
||||
test_path=test_path,
|
||||
triggered_by=f"manual:{current_user.username}",
|
||||
)
|
||||
db.commit()
|
||||
|
||||
# Dispatch via task dispatcher (supports Celery or BackgroundTasks)
|
||||
from app.tasks.dispatcher import task_dispatcher
|
||||
|
||||
celery_task_id = task_dispatcher.dispatch_test_run(
|
||||
background_tasks=background_tasks,
|
||||
run_id=run.id,
|
||||
test_path=test_path,
|
||||
extra_args=extra_args,
|
||||
)
|
||||
|
||||
# Store Celery task ID if using Celery
|
||||
if celery_task_id:
|
||||
run.celery_task_id = celery_task_id
|
||||
db.commit()
|
||||
|
||||
return TestRunResponse(
|
||||
id=run.id,
|
||||
timestamp=run.timestamp.isoformat(),
|
||||
total_tests=run.total_tests,
|
||||
passed=run.passed,
|
||||
failed=run.failed,
|
||||
errors=run.errors,
|
||||
skipped=run.skipped,
|
||||
xfailed=run.xfailed,
|
||||
xpassed=run.xpassed,
|
||||
pass_rate=run.pass_rate,
|
||||
duration_seconds=run.duration_seconds,
|
||||
coverage_percent=run.coverage_percent,
|
||||
triggered_by=run.triggered_by,
|
||||
git_commit_hash=run.git_commit_hash,
|
||||
git_branch=run.git_branch,
|
||||
test_path=run.test_path,
|
||||
status=run.status,
|
||||
)
|
||||
|
||||
|
||||
@admin_tests_router.get("/runs", response_model=list[TestRunResponse])
|
||||
async def list_runs(
|
||||
limit: int = Query(20, ge=1, le=100, description="Number of runs to return"),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get test run history
|
||||
|
||||
Returns recent test runs for trend analysis.
|
||||
"""
|
||||
runs = test_runner_service.get_run_history(db, limit=limit)
|
||||
|
||||
return [
|
||||
TestRunResponse(
|
||||
id=run.id,
|
||||
timestamp=run.timestamp.isoformat(),
|
||||
total_tests=run.total_tests,
|
||||
passed=run.passed,
|
||||
failed=run.failed,
|
||||
errors=run.errors,
|
||||
skipped=run.skipped,
|
||||
xfailed=run.xfailed,
|
||||
xpassed=run.xpassed,
|
||||
pass_rate=run.pass_rate,
|
||||
duration_seconds=run.duration_seconds,
|
||||
coverage_percent=run.coverage_percent,
|
||||
triggered_by=run.triggered_by,
|
||||
git_commit_hash=run.git_commit_hash,
|
||||
git_branch=run.git_branch,
|
||||
test_path=run.test_path,
|
||||
status=run.status,
|
||||
)
|
||||
for run in runs
|
||||
]
|
||||
|
||||
|
||||
@admin_tests_router.get("/runs/{run_id}", response_model=TestRunResponse)
|
||||
async def get_run(
|
||||
run_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get a specific test run
|
||||
"""
|
||||
run = test_runner_service.get_run_by_id(db, run_id)
|
||||
|
||||
if not run:
|
||||
from app.exceptions.base import ResourceNotFoundException
|
||||
|
||||
raise ResourceNotFoundException("TestRun", str(run_id))
|
||||
|
||||
return TestRunResponse(
|
||||
id=run.id,
|
||||
timestamp=run.timestamp.isoformat(),
|
||||
total_tests=run.total_tests,
|
||||
passed=run.passed,
|
||||
failed=run.failed,
|
||||
errors=run.errors,
|
||||
skipped=run.skipped,
|
||||
xfailed=run.xfailed,
|
||||
xpassed=run.xpassed,
|
||||
pass_rate=run.pass_rate,
|
||||
duration_seconds=run.duration_seconds,
|
||||
coverage_percent=run.coverage_percent,
|
||||
triggered_by=run.triggered_by,
|
||||
git_commit_hash=run.git_commit_hash,
|
||||
git_branch=run.git_branch,
|
||||
test_path=run.test_path,
|
||||
status=run.status,
|
||||
)
|
||||
|
||||
|
||||
@admin_tests_router.get("/runs/{run_id}/results", response_model=list[TestResultResponse])
|
||||
async def get_run_results(
|
||||
run_id: int,
|
||||
outcome: str | None = Query(
|
||||
None, description="Filter by outcome (passed, failed, error, skipped)"
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get test results for a specific run
|
||||
"""
|
||||
results = test_runner_service.get_run_results(db, run_id, outcome=outcome)
|
||||
|
||||
return [
|
||||
TestResultResponse(
|
||||
id=r.id,
|
||||
node_id=r.node_id,
|
||||
test_name=r.test_name,
|
||||
test_file=r.test_file,
|
||||
test_class=r.test_class,
|
||||
outcome=r.outcome,
|
||||
duration_seconds=r.duration_seconds,
|
||||
error_message=r.error_message,
|
||||
traceback=r.traceback,
|
||||
)
|
||||
for r in results
|
||||
]
|
||||
|
||||
|
||||
@admin_tests_router.get("/runs/{run_id}/failures", response_model=list[TestResultResponse])
|
||||
async def get_run_failures(
|
||||
run_id: int,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get failed tests from a specific run
|
||||
"""
|
||||
failures = test_runner_service.get_failed_tests(db, run_id)
|
||||
|
||||
return [
|
||||
TestResultResponse(
|
||||
id=r.id,
|
||||
node_id=r.node_id,
|
||||
test_name=r.test_name,
|
||||
test_file=r.test_file,
|
||||
test_class=r.test_class,
|
||||
outcome=r.outcome,
|
||||
duration_seconds=r.duration_seconds,
|
||||
error_message=r.error_message,
|
||||
traceback=r.traceback,
|
||||
)
|
||||
for r in failures
|
||||
]
|
||||
|
||||
|
||||
@admin_tests_router.get("/stats", response_model=TestDashboardStatsResponse)
|
||||
async def get_dashboard_stats(
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Get dashboard statistics
|
||||
|
||||
Returns comprehensive stats for the testing dashboard including:
|
||||
- Total counts by outcome
|
||||
- Pass rate
|
||||
- Trend data
|
||||
- Tests by category
|
||||
- Top failing tests
|
||||
"""
|
||||
stats = test_runner_service.get_dashboard_stats(db)
|
||||
return TestDashboardStatsResponse(**stats)
|
||||
|
||||
|
||||
@admin_tests_router.post("/collect")
|
||||
async def collect_tests(
|
||||
db: Session = Depends(get_db),
|
||||
current_user: UserContext = Depends(get_current_admin_api),
|
||||
):
|
||||
"""
|
||||
Collect test information without running tests
|
||||
|
||||
Updates the test collection cache with current test counts.
|
||||
"""
|
||||
collection = test_runner_service.collect_tests(db)
|
||||
db.commit()
|
||||
|
||||
return {
|
||||
"total_tests": collection.total_tests,
|
||||
"total_files": collection.total_files,
|
||||
"unit_tests": collection.unit_tests,
|
||||
"integration_tests": collection.integration_tests,
|
||||
"performance_tests": collection.performance_tests,
|
||||
"system_tests": collection.system_tests,
|
||||
"collected_at": collection.collected_at.isoformat(),
|
||||
}
|
||||
@@ -1,4 +1,2 @@
|
||||
# Page routes will be added here
|
||||
# TODO: Add HTML page routes for admin/vendor dashboards
|
||||
|
||||
__all__ = []
|
||||
# app/modules/monitoring/routes/pages/__init__.py
|
||||
"""Monitoring module page routes."""
|
||||
|
||||
59
app/modules/monitoring/routes/pages/admin.py
Normal file
59
app/modules/monitoring/routes/pages/admin.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# app/modules/monitoring/routes/pages/admin.py
|
||||
"""
|
||||
Monitoring Admin Page Routes (HTML rendering).
|
||||
|
||||
Admin pages for platform monitoring:
|
||||
- Logs viewer
|
||||
- Platform health
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi.responses import HTMLResponse
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.api.deps import get_db, require_menu_access
|
||||
from app.modules.core.utils.page_context import get_admin_context
|
||||
from app.templates_config import templates
|
||||
from models.database.admin_menu_config import FrontendType
|
||||
from models.database.user import User
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# LOGS & MONITORING ROUTES
|
||||
# ============================================================================
|
||||
|
||||
|
||||
@router.get("/logs", response_class=HTMLResponse, include_in_schema=False)
|
||||
async def admin_logs_page(
|
||||
request: Request,
|
||||
current_user: User = Depends(require_menu_access("logs", FrontendType.ADMIN)),
|
||||
db: Session = Depends(get_db),
|
||||
):
|
||||
"""
|
||||
Render admin logs viewer page.
|
||||
View database and file logs with filtering and search.
|
||||
"""
|
||||
return templates.TemplateResponse(
|
||||
"monitoring/admin/logs.html",
|
||||
get_admin_context(request, current_user),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/platform-health", response_class=HTMLResponse, include_in_schema=False)
|
||||
async def admin_platform_health(
|
||||
request: Request,
|
||||
current_user: User = Depends(
|
||||
require_menu_access("platform-health", FrontendType.ADMIN)
|
||||
),
|
||||
db: Session = Depends(get_db),
|
||||
):
|
||||
"""
|
||||
Render platform health monitoring page.
|
||||
Shows system metrics, capacity thresholds, and scaling recommendations.
|
||||
"""
|
||||
return templates.TemplateResponse(
|
||||
"monitoring/admin/platform-health.html",
|
||||
get_admin_context(request, current_user),
|
||||
)
|
||||
@@ -5,12 +5,30 @@ Monitoring module services.
|
||||
This module contains the canonical implementations of monitoring-related services.
|
||||
"""
|
||||
|
||||
from app.modules.monitoring.services.admin_audit_service import (
|
||||
admin_audit_service,
|
||||
AdminAuditService,
|
||||
)
|
||||
from app.modules.monitoring.services.background_tasks_service import (
|
||||
background_tasks_service,
|
||||
BackgroundTasksService,
|
||||
)
|
||||
from app.modules.monitoring.services.log_service import (
|
||||
log_service,
|
||||
LogService,
|
||||
)
|
||||
from app.modules.monitoring.services.platform_health_service import (
|
||||
platform_health_service,
|
||||
PlatformHealthService,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"admin_audit_service",
|
||||
"AdminAuditService",
|
||||
"background_tasks_service",
|
||||
"BackgroundTasksService",
|
||||
"log_service",
|
||||
"LogService",
|
||||
"platform_health_service",
|
||||
"PlatformHealthService",
|
||||
]
|
||||
|
||||
234
app/modules/monitoring/services/admin_audit_service.py
Normal file
234
app/modules/monitoring/services/admin_audit_service.py
Normal file
@@ -0,0 +1,234 @@
|
||||
# app/modules/monitoring/services/admin_audit_service.py
|
||||
"""
|
||||
Admin audit service for tracking admin actions.
|
||||
|
||||
This module provides functions for:
|
||||
- Logging admin actions
|
||||
- Querying audit logs
|
||||
- Generating audit reports
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from sqlalchemy import and_
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.modules.tenancy.exceptions import AdminOperationException
|
||||
from models.database.admin import AdminAuditLog
|
||||
from models.database.user import User
|
||||
from models.schema.admin import AdminAuditLogFilters, AdminAuditLogResponse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AdminAuditService:
|
||||
"""Service for admin audit logging."""
|
||||
|
||||
def log_action(
|
||||
self,
|
||||
db: Session,
|
||||
admin_user_id: int,
|
||||
action: str,
|
||||
target_type: str,
|
||||
target_id: str,
|
||||
details: dict[str, Any] | None = None,
|
||||
ip_address: str | None = None,
|
||||
user_agent: str | None = None,
|
||||
request_id: str | None = None,
|
||||
) -> AdminAuditLog | None:
|
||||
"""
|
||||
Log an admin action to the audit trail.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
admin_user_id: ID of the admin performing the action
|
||||
action: Action performed (e.g., 'create_vendor', 'delete_user')
|
||||
target_type: Type of target (e.g., 'vendor', 'user')
|
||||
target_id: ID of the target entity
|
||||
details: Additional context about the action
|
||||
ip_address: IP address of the admin
|
||||
user_agent: User agent string
|
||||
request_id: Request ID for correlation
|
||||
|
||||
Returns:
|
||||
Created AdminAuditLog instance
|
||||
"""
|
||||
try:
|
||||
audit_log = AdminAuditLog(
|
||||
admin_user_id=admin_user_id,
|
||||
action=action,
|
||||
target_type=target_type,
|
||||
target_id=str(target_id),
|
||||
details=details or {},
|
||||
ip_address=ip_address,
|
||||
user_agent=user_agent,
|
||||
request_id=request_id,
|
||||
)
|
||||
|
||||
db.add(audit_log)
|
||||
db.flush()
|
||||
db.refresh(audit_log)
|
||||
|
||||
logger.info(
|
||||
f"Admin action logged: {action} on {target_type}:{target_id} "
|
||||
f"by admin {admin_user_id}"
|
||||
)
|
||||
|
||||
return audit_log
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to log admin action: {str(e)}")
|
||||
# Don't raise exception - audit logging should not break operations
|
||||
return None
|
||||
|
||||
def get_audit_logs(
|
||||
self, db: Session, filters: AdminAuditLogFilters
|
||||
) -> list[AdminAuditLogResponse]:
|
||||
"""
|
||||
Get filtered admin audit logs with pagination.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
filters: Filter criteria for audit logs
|
||||
|
||||
Returns:
|
||||
List of audit log responses
|
||||
"""
|
||||
try:
|
||||
query = db.query(AdminAuditLog).join(
|
||||
User, AdminAuditLog.admin_user_id == User.id
|
||||
)
|
||||
|
||||
# Apply filters
|
||||
conditions = []
|
||||
|
||||
if filters.admin_user_id:
|
||||
conditions.append(AdminAuditLog.admin_user_id == filters.admin_user_id)
|
||||
|
||||
if filters.action:
|
||||
conditions.append(AdminAuditLog.action.ilike(f"%{filters.action}%"))
|
||||
|
||||
if filters.target_type:
|
||||
conditions.append(AdminAuditLog.target_type == filters.target_type)
|
||||
|
||||
if filters.date_from:
|
||||
conditions.append(AdminAuditLog.created_at >= filters.date_from)
|
||||
|
||||
if filters.date_to:
|
||||
conditions.append(AdminAuditLog.created_at <= filters.date_to)
|
||||
|
||||
if conditions:
|
||||
query = query.filter(and_(*conditions))
|
||||
|
||||
# Execute query with pagination
|
||||
logs = (
|
||||
query.order_by(AdminAuditLog.created_at.desc())
|
||||
.offset(filters.skip)
|
||||
.limit(filters.limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
# Convert to response models
|
||||
return [
|
||||
AdminAuditLogResponse(
|
||||
id=log.id,
|
||||
admin_user_id=log.admin_user_id,
|
||||
admin_username=log.admin_user.username if log.admin_user else None,
|
||||
action=log.action,
|
||||
target_type=log.target_type,
|
||||
target_id=log.target_id,
|
||||
details=log.details,
|
||||
ip_address=log.ip_address,
|
||||
user_agent=log.user_agent,
|
||||
request_id=log.request_id,
|
||||
created_at=log.created_at,
|
||||
)
|
||||
for log in logs
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to retrieve audit logs: {str(e)}")
|
||||
raise AdminOperationException(
|
||||
operation="get_audit_logs", reason="Database query failed"
|
||||
)
|
||||
|
||||
def get_audit_logs_count(self, db: Session, filters: AdminAuditLogFilters) -> int:
|
||||
"""Get total count of audit logs matching filters."""
|
||||
try:
|
||||
query = db.query(AdminAuditLog)
|
||||
|
||||
# Apply same filters as get_audit_logs
|
||||
conditions = []
|
||||
|
||||
if filters.admin_user_id:
|
||||
conditions.append(AdminAuditLog.admin_user_id == filters.admin_user_id)
|
||||
|
||||
if filters.action:
|
||||
conditions.append(AdminAuditLog.action.ilike(f"%{filters.action}%"))
|
||||
|
||||
if filters.target_type:
|
||||
conditions.append(AdminAuditLog.target_type == filters.target_type)
|
||||
|
||||
if filters.date_from:
|
||||
conditions.append(AdminAuditLog.created_at >= filters.date_from)
|
||||
|
||||
if filters.date_to:
|
||||
conditions.append(AdminAuditLog.created_at <= filters.date_to)
|
||||
|
||||
if conditions:
|
||||
query = query.filter(and_(*conditions))
|
||||
|
||||
return query.count()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to count audit logs: {str(e)}")
|
||||
return 0
|
||||
|
||||
def get_recent_actions_by_admin(
|
||||
self, db: Session, admin_user_id: int, limit: int = 10
|
||||
) -> list[AdminAuditLogResponse]:
|
||||
"""Get recent actions by a specific admin."""
|
||||
filters = AdminAuditLogFilters(admin_user_id=admin_user_id, limit=limit)
|
||||
return self.get_audit_logs(db, filters)
|
||||
|
||||
def get_actions_by_target(
|
||||
self, db: Session, target_type: str, target_id: str, limit: int = 50
|
||||
) -> list[AdminAuditLogResponse]:
|
||||
"""Get all actions performed on a specific target."""
|
||||
try:
|
||||
logs = (
|
||||
db.query(AdminAuditLog)
|
||||
.filter(
|
||||
and_(
|
||||
AdminAuditLog.target_type == target_type,
|
||||
AdminAuditLog.target_id == str(target_id),
|
||||
)
|
||||
)
|
||||
.order_by(AdminAuditLog.created_at.desc())
|
||||
.limit(limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return [
|
||||
AdminAuditLogResponse(
|
||||
id=log.id,
|
||||
admin_user_id=log.admin_user_id,
|
||||
admin_username=log.admin_user.username if log.admin_user else None,
|
||||
action=log.action,
|
||||
target_type=log.target_type,
|
||||
target_id=log.target_id,
|
||||
details=log.details,
|
||||
ip_address=log.ip_address,
|
||||
created_at=log.created_at,
|
||||
)
|
||||
for log in logs
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get actions by target: {str(e)}")
|
||||
return []
|
||||
|
||||
|
||||
# Create service instance
|
||||
admin_audit_service = AdminAuditService()
|
||||
387
app/modules/monitoring/services/log_service.py
Normal file
387
app/modules/monitoring/services/log_service.py
Normal file
@@ -0,0 +1,387 @@
|
||||
# app/modules/monitoring/services/log_service.py
|
||||
"""
|
||||
Log management service for viewing and managing application logs.
|
||||
|
||||
This module provides functions for:
|
||||
- Querying database logs with filters
|
||||
- Reading file logs
|
||||
- Log statistics and analytics
|
||||
- Log retention and cleanup
|
||||
- Downloading log files
|
||||
"""
|
||||
|
||||
import logging
|
||||
from datetime import UTC, datetime, timedelta
|
||||
from pathlib import Path
|
||||
|
||||
from sqlalchemy import and_, func, or_
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.core.config import settings
|
||||
from app.exceptions import ResourceNotFoundException
|
||||
from app.modules.tenancy.exceptions import AdminOperationException
|
||||
from models.database.admin import ApplicationLog
|
||||
from models.schema.admin import (
|
||||
ApplicationLogFilters,
|
||||
ApplicationLogListResponse,
|
||||
ApplicationLogResponse,
|
||||
FileLogResponse,
|
||||
LogStatistics,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LogService:
|
||||
"""Service for managing application logs."""
|
||||
|
||||
def get_database_logs(
|
||||
self, db: Session, filters: ApplicationLogFilters
|
||||
) -> ApplicationLogListResponse:
|
||||
"""
|
||||
Get logs from database with filtering and pagination.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
filters: Filter criteria
|
||||
|
||||
Returns:
|
||||
Paginated list of logs
|
||||
"""
|
||||
try:
|
||||
query = db.query(ApplicationLog)
|
||||
|
||||
# Apply filters
|
||||
conditions = []
|
||||
|
||||
if filters.level:
|
||||
conditions.append(ApplicationLog.level == filters.level.upper())
|
||||
|
||||
if filters.logger_name:
|
||||
conditions.append(
|
||||
ApplicationLog.logger_name.like(f"%{filters.logger_name}%")
|
||||
)
|
||||
|
||||
if filters.module:
|
||||
conditions.append(ApplicationLog.module.like(f"%{filters.module}%"))
|
||||
|
||||
if filters.user_id:
|
||||
conditions.append(ApplicationLog.user_id == filters.user_id)
|
||||
|
||||
if filters.vendor_id:
|
||||
conditions.append(ApplicationLog.vendor_id == filters.vendor_id)
|
||||
|
||||
if filters.date_from:
|
||||
conditions.append(ApplicationLog.timestamp >= filters.date_from)
|
||||
|
||||
if filters.date_to:
|
||||
conditions.append(ApplicationLog.timestamp <= filters.date_to)
|
||||
|
||||
if filters.search:
|
||||
search_pattern = f"%{filters.search}%"
|
||||
conditions.append(
|
||||
or_(
|
||||
ApplicationLog.message.like(search_pattern),
|
||||
ApplicationLog.exception_message.like(search_pattern),
|
||||
)
|
||||
)
|
||||
|
||||
if conditions:
|
||||
query = query.filter(and_(*conditions))
|
||||
|
||||
# Get total count
|
||||
total = query.count()
|
||||
|
||||
# Apply pagination and sorting
|
||||
logs = (
|
||||
query.order_by(ApplicationLog.timestamp.desc())
|
||||
.offset(filters.skip)
|
||||
.limit(filters.limit)
|
||||
.all()
|
||||
)
|
||||
|
||||
return ApplicationLogListResponse(
|
||||
logs=[ApplicationLogResponse.model_validate(log) for log in logs],
|
||||
total=total,
|
||||
skip=filters.skip,
|
||||
limit=filters.limit,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get database logs: {e}")
|
||||
raise AdminOperationException(
|
||||
operation="get_database_logs", reason=f"Database query failed: {str(e)}"
|
||||
)
|
||||
|
||||
def get_log_statistics(self, db: Session, days: int = 7) -> LogStatistics:
|
||||
"""
|
||||
Get statistics about logs from the last N days.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
days: Number of days to analyze
|
||||
|
||||
Returns:
|
||||
Log statistics
|
||||
"""
|
||||
try:
|
||||
cutoff_date = datetime.now(UTC) - timedelta(days=days)
|
||||
|
||||
# Total counts
|
||||
total_count = (
|
||||
db.query(func.count(ApplicationLog.id))
|
||||
.filter(ApplicationLog.timestamp >= cutoff_date)
|
||||
.scalar()
|
||||
)
|
||||
|
||||
warning_count = (
|
||||
db.query(func.count(ApplicationLog.id))
|
||||
.filter(
|
||||
and_(
|
||||
ApplicationLog.timestamp >= cutoff_date,
|
||||
ApplicationLog.level == "WARNING",
|
||||
)
|
||||
)
|
||||
.scalar()
|
||||
)
|
||||
|
||||
error_count = (
|
||||
db.query(func.count(ApplicationLog.id))
|
||||
.filter(
|
||||
and_(
|
||||
ApplicationLog.timestamp >= cutoff_date,
|
||||
ApplicationLog.level == "ERROR",
|
||||
)
|
||||
)
|
||||
.scalar()
|
||||
)
|
||||
|
||||
critical_count = (
|
||||
db.query(func.count(ApplicationLog.id))
|
||||
.filter(
|
||||
and_(
|
||||
ApplicationLog.timestamp >= cutoff_date,
|
||||
ApplicationLog.level == "CRITICAL",
|
||||
)
|
||||
)
|
||||
.scalar()
|
||||
)
|
||||
|
||||
# Count by level
|
||||
by_level_raw = (
|
||||
db.query(ApplicationLog.level, func.count(ApplicationLog.id))
|
||||
.filter(ApplicationLog.timestamp >= cutoff_date)
|
||||
.group_by(ApplicationLog.level)
|
||||
.all()
|
||||
)
|
||||
by_level = {level: count for level, count in by_level_raw}
|
||||
|
||||
# Count by module (top 10)
|
||||
by_module_raw = (
|
||||
db.query(ApplicationLog.module, func.count(ApplicationLog.id))
|
||||
.filter(ApplicationLog.timestamp >= cutoff_date)
|
||||
.filter(ApplicationLog.module.isnot(None))
|
||||
.group_by(ApplicationLog.module)
|
||||
.order_by(func.count(ApplicationLog.id).desc())
|
||||
.limit(10)
|
||||
.all()
|
||||
)
|
||||
by_module = {module: count for module, count in by_module_raw}
|
||||
|
||||
# Recent errors (last 5)
|
||||
recent_errors = (
|
||||
db.query(ApplicationLog)
|
||||
.filter(
|
||||
and_(
|
||||
ApplicationLog.timestamp >= cutoff_date,
|
||||
ApplicationLog.level.in_(["ERROR", "CRITICAL"]),
|
||||
)
|
||||
)
|
||||
.order_by(ApplicationLog.timestamp.desc())
|
||||
.limit(5)
|
||||
.all()
|
||||
)
|
||||
|
||||
return LogStatistics(
|
||||
total_count=total_count or 0,
|
||||
warning_count=warning_count or 0,
|
||||
error_count=error_count or 0,
|
||||
critical_count=critical_count or 0,
|
||||
by_level=by_level,
|
||||
by_module=by_module,
|
||||
recent_errors=[
|
||||
ApplicationLogResponse.model_validate(log) for log in recent_errors
|
||||
],
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get log statistics: {e}")
|
||||
raise AdminOperationException(
|
||||
operation="get_log_statistics",
|
||||
reason=f"Database query failed: {str(e)}",
|
||||
)
|
||||
|
||||
def get_file_logs(
|
||||
self, filename: str = "app.log", lines: int = 500
|
||||
) -> FileLogResponse:
|
||||
"""
|
||||
Read logs from file.
|
||||
|
||||
Args:
|
||||
filename: Log filename (default: app.log)
|
||||
lines: Number of lines to return from end of file
|
||||
|
||||
Returns:
|
||||
File log content
|
||||
"""
|
||||
try:
|
||||
# Determine log file path
|
||||
log_file_path = settings.log_file
|
||||
if log_file_path:
|
||||
log_file = Path(log_file_path)
|
||||
else:
|
||||
log_file = Path("logs") / "app.log"
|
||||
|
||||
# Allow reading backup files
|
||||
if filename != "app.log":
|
||||
log_file = log_file.parent / filename
|
||||
|
||||
if not log_file.exists():
|
||||
raise ResourceNotFoundException(
|
||||
resource_type="log_file", identifier=str(log_file)
|
||||
)
|
||||
|
||||
# Get file stats
|
||||
stat = log_file.stat()
|
||||
|
||||
# Read last N lines efficiently
|
||||
with open(log_file, encoding="utf-8", errors="replace") as f:
|
||||
# For large files, seek to end and read backwards
|
||||
all_lines = f.readlines()
|
||||
log_lines = all_lines[-lines:] if len(all_lines) > lines else all_lines
|
||||
|
||||
return FileLogResponse(
|
||||
filename=log_file.name,
|
||||
size_bytes=stat.st_size,
|
||||
last_modified=datetime.fromtimestamp(stat.st_mtime, tz=UTC),
|
||||
lines=[line.rstrip("\n") for line in log_lines],
|
||||
total_lines=len(all_lines),
|
||||
)
|
||||
|
||||
except ResourceNotFoundException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to read log file: {e}")
|
||||
raise AdminOperationException(
|
||||
operation="get_file_logs", reason=f"File read failed: {str(e)}"
|
||||
)
|
||||
|
||||
def list_log_files(self) -> list[dict]:
|
||||
"""
|
||||
List all available log files.
|
||||
|
||||
Returns:
|
||||
List of log file info (name, size, modified date)
|
||||
"""
|
||||
try:
|
||||
# Determine log directory
|
||||
log_file_path = settings.log_file
|
||||
if log_file_path:
|
||||
log_dir = Path(log_file_path).parent
|
||||
else:
|
||||
log_dir = Path("logs")
|
||||
|
||||
if not log_dir.exists():
|
||||
return []
|
||||
|
||||
files = []
|
||||
for log_file in log_dir.glob("*.log*"):
|
||||
if log_file.is_file():
|
||||
stat = log_file.stat()
|
||||
files.append(
|
||||
{
|
||||
"filename": log_file.name,
|
||||
"size_bytes": stat.st_size,
|
||||
"size_mb": round(stat.st_size / (1024 * 1024), 2),
|
||||
"last_modified": datetime.fromtimestamp(
|
||||
stat.st_mtime, tz=UTC
|
||||
).isoformat(),
|
||||
}
|
||||
)
|
||||
|
||||
# Sort by modified date (newest first)
|
||||
files.sort(key=lambda x: x["last_modified"], reverse=True)
|
||||
|
||||
return files
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to list log files: {e}")
|
||||
raise AdminOperationException(
|
||||
operation="list_log_files", reason=f"Directory read failed: {str(e)}"
|
||||
)
|
||||
|
||||
def cleanup_old_logs(self, db: Session, retention_days: int) -> int:
|
||||
"""
|
||||
Delete logs older than retention period from database.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
retention_days: Days to retain logs
|
||||
|
||||
Returns:
|
||||
Number of logs deleted
|
||||
"""
|
||||
try:
|
||||
cutoff_date = datetime.now(UTC) - timedelta(days=retention_days)
|
||||
|
||||
deleted_count = (
|
||||
db.query(ApplicationLog)
|
||||
.filter(ApplicationLog.timestamp < cutoff_date)
|
||||
.delete()
|
||||
)
|
||||
|
||||
db.commit()
|
||||
|
||||
logger.info(
|
||||
f"Cleaned up {deleted_count} logs older than {retention_days} days"
|
||||
)
|
||||
|
||||
return deleted_count
|
||||
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
logger.error(f"Failed to cleanup old logs: {e}")
|
||||
raise AdminOperationException(
|
||||
operation="cleanup_old_logs",
|
||||
reason=f"Delete operation failed: {str(e)}",
|
||||
)
|
||||
|
||||
def delete_log(self, db: Session, log_id: int) -> str:
|
||||
"""Delete a specific log entry."""
|
||||
try:
|
||||
log_entry = (
|
||||
db.query(ApplicationLog).filter(ApplicationLog.id == log_id).first()
|
||||
)
|
||||
|
||||
if not log_entry:
|
||||
raise ResourceNotFoundException(
|
||||
resource_type="log", identifier=str(log_id)
|
||||
)
|
||||
|
||||
db.delete(log_entry)
|
||||
db.commit()
|
||||
|
||||
return f"Log entry {log_id} deleted successfully"
|
||||
|
||||
except ResourceNotFoundException:
|
||||
raise
|
||||
except Exception as e:
|
||||
db.rollback()
|
||||
logger.error(f"Failed to delete log {log_id}: {e}")
|
||||
raise AdminOperationException(
|
||||
operation="delete_log", reason=f"Delete operation failed: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
# Create service instance
|
||||
log_service = LogService()
|
||||
525
app/modules/monitoring/services/platform_health_service.py
Normal file
525
app/modules/monitoring/services/platform_health_service.py
Normal file
@@ -0,0 +1,525 @@
|
||||
# app/modules/monitoring/services/platform_health_service.py
|
||||
"""
|
||||
Platform health and capacity monitoring service.
|
||||
|
||||
Provides:
|
||||
- System resource metrics (CPU, memory, disk)
|
||||
- Database metrics and statistics
|
||||
- Capacity threshold calculations
|
||||
- Scaling recommendations
|
||||
"""
|
||||
|
||||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
import psutil
|
||||
from sqlalchemy import func, text
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
from app.modules.core.services.image_service import image_service
|
||||
from app.modules.inventory.models import Inventory
|
||||
from app.modules.orders.models import Order
|
||||
from app.modules.catalog.models import Product
|
||||
from models.database.vendor import Vendor
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Thresholds Configuration
|
||||
# ============================================================================
|
||||
|
||||
CAPACITY_THRESHOLDS = {
|
||||
"products_total": {
|
||||
"warning": 400_000,
|
||||
"critical": 475_000,
|
||||
"limit": 500_000,
|
||||
},
|
||||
"storage_gb": {
|
||||
"warning": 800,
|
||||
"critical": 950,
|
||||
"limit": 1000,
|
||||
},
|
||||
"db_size_mb": {
|
||||
"warning": 20_000,
|
||||
"critical": 24_000,
|
||||
"limit": 25_000,
|
||||
},
|
||||
"disk_percent": {
|
||||
"warning": 70,
|
||||
"critical": 85,
|
||||
"limit": 100,
|
||||
},
|
||||
"memory_percent": {
|
||||
"warning": 75,
|
||||
"critical": 90,
|
||||
"limit": 100,
|
||||
},
|
||||
"cpu_percent": {
|
||||
"warning": 70,
|
||||
"critical": 85,
|
||||
"limit": 100,
|
||||
},
|
||||
}
|
||||
|
||||
INFRASTRUCTURE_TIERS = [
|
||||
{"name": "Starter", "max_clients": 50, "max_products": 10_000},
|
||||
{"name": "Small", "max_clients": 100, "max_products": 30_000},
|
||||
{"name": "Medium", "max_clients": 300, "max_products": 100_000},
|
||||
{"name": "Large", "max_clients": 500, "max_products": 250_000},
|
||||
{"name": "Scale", "max_clients": 1000, "max_products": 500_000},
|
||||
{"name": "Enterprise", "max_clients": None, "max_products": None},
|
||||
]
|
||||
|
||||
|
||||
class PlatformHealthService:
|
||||
"""Service for platform health and capacity monitoring."""
|
||||
|
||||
def get_system_metrics(self) -> dict:
|
||||
"""Get current system resource metrics."""
|
||||
cpu_percent = psutil.cpu_percent(interval=0.1)
|
||||
memory = psutil.virtual_memory()
|
||||
disk = psutil.disk_usage("/")
|
||||
|
||||
return {
|
||||
"cpu_percent": cpu_percent,
|
||||
"memory_percent": memory.percent,
|
||||
"memory_used_gb": round(memory.used / (1024**3), 2),
|
||||
"memory_total_gb": round(memory.total / (1024**3), 2),
|
||||
"disk_percent": disk.percent,
|
||||
"disk_used_gb": round(disk.used / (1024**3), 2),
|
||||
"disk_total_gb": round(disk.total / (1024**3), 2),
|
||||
}
|
||||
|
||||
def get_database_metrics(self, db: Session) -> dict:
|
||||
"""Get database statistics."""
|
||||
products_count = db.query(func.count(Product.id)).scalar() or 0
|
||||
orders_count = db.query(func.count(Order.id)).scalar() or 0
|
||||
vendors_count = db.query(func.count(Vendor.id)).scalar() or 0
|
||||
inventory_count = db.query(func.count(Inventory.id)).scalar() or 0
|
||||
|
||||
db_size = self._get_database_size(db)
|
||||
|
||||
return {
|
||||
"size_mb": db_size,
|
||||
"products_count": products_count,
|
||||
"orders_count": orders_count,
|
||||
"vendors_count": vendors_count,
|
||||
"inventory_count": inventory_count,
|
||||
}
|
||||
|
||||
def get_image_storage_metrics(self) -> dict:
|
||||
"""Get image storage statistics."""
|
||||
stats = image_service.get_storage_stats()
|
||||
return {
|
||||
"total_files": stats["total_files"],
|
||||
"total_size_mb": stats["total_size_mb"],
|
||||
"total_size_gb": stats["total_size_gb"],
|
||||
"max_files_per_dir": stats["max_files_per_dir"],
|
||||
"products_estimated": stats["products_estimated"],
|
||||
}
|
||||
|
||||
def get_capacity_metrics(self, db: Session) -> dict:
|
||||
"""Get capacity-focused metrics for planning."""
|
||||
# Products total
|
||||
products_total = db.query(func.count(Product.id)).scalar() or 0
|
||||
|
||||
# Products by vendor
|
||||
vendor_counts = (
|
||||
db.query(Vendor.name, func.count(Product.id))
|
||||
.join(Product, Vendor.id == Product.vendor_id)
|
||||
.group_by(Vendor.name)
|
||||
.all()
|
||||
)
|
||||
products_by_vendor = {name or "Unknown": count for name, count in vendor_counts}
|
||||
|
||||
# Image storage
|
||||
image_stats = image_service.get_storage_stats()
|
||||
|
||||
# Database size
|
||||
db_size = self._get_database_size(db)
|
||||
|
||||
# Orders this month
|
||||
start_of_month = datetime.utcnow().replace(day=1, hour=0, minute=0, second=0)
|
||||
orders_this_month = (
|
||||
db.query(func.count(Order.id))
|
||||
.filter(Order.created_at >= start_of_month)
|
||||
.scalar()
|
||||
or 0
|
||||
)
|
||||
|
||||
# Active vendors
|
||||
active_vendors = (
|
||||
db.query(func.count(Vendor.id))
|
||||
.filter(Vendor.is_active == True) # noqa: E712
|
||||
.scalar()
|
||||
or 0
|
||||
)
|
||||
|
||||
return {
|
||||
"products_total": products_total,
|
||||
"products_by_vendor": products_by_vendor,
|
||||
"images_total": image_stats["total_files"],
|
||||
"storage_used_gb": image_stats["total_size_gb"],
|
||||
"database_size_mb": db_size,
|
||||
"orders_this_month": orders_this_month,
|
||||
"active_vendors": active_vendors,
|
||||
}
|
||||
|
||||
def get_subscription_capacity(self, db: Session) -> dict:
|
||||
"""
|
||||
Calculate theoretical capacity based on all vendor subscriptions.
|
||||
|
||||
Returns aggregated limits and current usage for capacity planning.
|
||||
"""
|
||||
from app.modules.billing.models import VendorSubscription
|
||||
from models.database.vendor import VendorUser
|
||||
|
||||
# Get all active subscriptions with their limits
|
||||
subscriptions = (
|
||||
db.query(VendorSubscription)
|
||||
.filter(VendorSubscription.status.in_(["active", "trial"]))
|
||||
.all()
|
||||
)
|
||||
|
||||
# Aggregate theoretical limits
|
||||
total_products_limit = 0
|
||||
total_orders_limit = 0
|
||||
total_team_limit = 0
|
||||
unlimited_products = 0
|
||||
unlimited_orders = 0
|
||||
unlimited_team = 0
|
||||
|
||||
tier_distribution = {}
|
||||
|
||||
for sub in subscriptions:
|
||||
# Track tier distribution
|
||||
tier = sub.tier or "unknown"
|
||||
tier_distribution[tier] = tier_distribution.get(tier, 0) + 1
|
||||
|
||||
# Aggregate limits
|
||||
if sub.products_limit is None:
|
||||
unlimited_products += 1
|
||||
else:
|
||||
total_products_limit += sub.products_limit
|
||||
|
||||
if sub.orders_limit is None:
|
||||
unlimited_orders += 1
|
||||
else:
|
||||
total_orders_limit += sub.orders_limit
|
||||
|
||||
if sub.team_members_limit is None:
|
||||
unlimited_team += 1
|
||||
else:
|
||||
total_team_limit += sub.team_members_limit
|
||||
|
||||
# Get actual usage
|
||||
actual_products = db.query(func.count(Product.id)).scalar() or 0
|
||||
actual_team = (
|
||||
db.query(func.count(VendorUser.id))
|
||||
.filter(VendorUser.is_active == True) # noqa: E712
|
||||
.scalar()
|
||||
or 0
|
||||
)
|
||||
|
||||
# Orders this period (aggregate across all subscriptions)
|
||||
total_orders_used = sum(s.orders_this_period for s in subscriptions)
|
||||
|
||||
def calc_utilization(actual: int, limit: int, unlimited: int) -> dict:
|
||||
if unlimited > 0:
|
||||
# Some subscriptions have unlimited - can't calculate true %
|
||||
return {
|
||||
"actual": actual,
|
||||
"theoretical_limit": limit,
|
||||
"unlimited_count": unlimited,
|
||||
"utilization_percent": None,
|
||||
"has_unlimited": True,
|
||||
}
|
||||
elif limit > 0:
|
||||
return {
|
||||
"actual": actual,
|
||||
"theoretical_limit": limit,
|
||||
"unlimited_count": 0,
|
||||
"utilization_percent": round((actual / limit) * 100, 1),
|
||||
"headroom": limit - actual,
|
||||
"has_unlimited": False,
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"actual": actual,
|
||||
"theoretical_limit": 0,
|
||||
"unlimited_count": 0,
|
||||
"utilization_percent": 0,
|
||||
"has_unlimited": False,
|
||||
}
|
||||
|
||||
return {
|
||||
"total_subscriptions": len(subscriptions),
|
||||
"tier_distribution": tier_distribution,
|
||||
"products": calc_utilization(actual_products, total_products_limit, unlimited_products),
|
||||
"orders_monthly": calc_utilization(total_orders_used, total_orders_limit, unlimited_orders),
|
||||
"team_members": calc_utilization(actual_team, total_team_limit, unlimited_team),
|
||||
}
|
||||
|
||||
def get_full_health_report(self, db: Session) -> dict:
|
||||
"""Get comprehensive platform health report."""
|
||||
# System metrics
|
||||
system = self.get_system_metrics()
|
||||
|
||||
# Database metrics
|
||||
database = self.get_database_metrics(db)
|
||||
|
||||
# Image storage metrics
|
||||
image_storage = self.get_image_storage_metrics()
|
||||
|
||||
# Subscription capacity
|
||||
subscription_capacity = self.get_subscription_capacity(db)
|
||||
|
||||
# Calculate thresholds
|
||||
thresholds = self._calculate_thresholds(system, database, image_storage)
|
||||
|
||||
# Generate recommendations
|
||||
recommendations = self._generate_recommendations(thresholds, database)
|
||||
|
||||
# Determine infrastructure tier
|
||||
tier, next_trigger = self._determine_tier(
|
||||
database["vendors_count"], database["products_count"]
|
||||
)
|
||||
|
||||
# Overall status
|
||||
overall_status = self._determine_overall_status(thresholds)
|
||||
|
||||
return {
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"overall_status": overall_status,
|
||||
"system": system,
|
||||
"database": database,
|
||||
"image_storage": image_storage,
|
||||
"subscription_capacity": subscription_capacity,
|
||||
"thresholds": thresholds,
|
||||
"recommendations": recommendations,
|
||||
"infrastructure_tier": tier,
|
||||
"next_tier_trigger": next_trigger,
|
||||
}
|
||||
|
||||
def _get_database_size(self, db: Session) -> float:
|
||||
"""Get database size in MB."""
|
||||
try:
|
||||
result = db.execute(text("SELECT pg_database_size(current_database())"))
|
||||
row = result.fetchone()
|
||||
if row:
|
||||
return round(row[0] / (1024 * 1024), 2)
|
||||
except Exception:
|
||||
logger.warning("Failed to get database size")
|
||||
return 0.0
|
||||
|
||||
return 0.0
|
||||
|
||||
def _calculate_thresholds(
|
||||
self, system: dict, database: dict, image_storage: dict
|
||||
) -> list[dict]:
|
||||
"""Calculate threshold status for each metric."""
|
||||
thresholds = []
|
||||
|
||||
# Products threshold
|
||||
products_config = CAPACITY_THRESHOLDS["products_total"]
|
||||
thresholds.append(
|
||||
self._create_threshold(
|
||||
"Products",
|
||||
database["products_count"],
|
||||
products_config["warning"],
|
||||
products_config["critical"],
|
||||
products_config["limit"],
|
||||
)
|
||||
)
|
||||
|
||||
# Storage threshold
|
||||
storage_config = CAPACITY_THRESHOLDS["storage_gb"]
|
||||
thresholds.append(
|
||||
self._create_threshold(
|
||||
"Image Storage (GB)",
|
||||
image_storage["total_size_gb"],
|
||||
storage_config["warning"],
|
||||
storage_config["critical"],
|
||||
storage_config["limit"],
|
||||
)
|
||||
)
|
||||
|
||||
# Database size threshold
|
||||
db_config = CAPACITY_THRESHOLDS["db_size_mb"]
|
||||
thresholds.append(
|
||||
self._create_threshold(
|
||||
"Database (MB)",
|
||||
database["size_mb"],
|
||||
db_config["warning"],
|
||||
db_config["critical"],
|
||||
db_config["limit"],
|
||||
)
|
||||
)
|
||||
|
||||
# Disk threshold
|
||||
disk_config = CAPACITY_THRESHOLDS["disk_percent"]
|
||||
thresholds.append(
|
||||
self._create_threshold(
|
||||
"Disk Usage (%)",
|
||||
system["disk_percent"],
|
||||
disk_config["warning"],
|
||||
disk_config["critical"],
|
||||
disk_config["limit"],
|
||||
)
|
||||
)
|
||||
|
||||
# Memory threshold
|
||||
memory_config = CAPACITY_THRESHOLDS["memory_percent"]
|
||||
thresholds.append(
|
||||
self._create_threshold(
|
||||
"Memory Usage (%)",
|
||||
system["memory_percent"],
|
||||
memory_config["warning"],
|
||||
memory_config["critical"],
|
||||
memory_config["limit"],
|
||||
)
|
||||
)
|
||||
|
||||
# CPU threshold
|
||||
cpu_config = CAPACITY_THRESHOLDS["cpu_percent"]
|
||||
thresholds.append(
|
||||
self._create_threshold(
|
||||
"CPU Usage (%)",
|
||||
system["cpu_percent"],
|
||||
cpu_config["warning"],
|
||||
cpu_config["critical"],
|
||||
cpu_config["limit"],
|
||||
)
|
||||
)
|
||||
|
||||
return thresholds
|
||||
|
||||
def _create_threshold(
|
||||
self, name: str, current: float, warning: float, critical: float, limit: float
|
||||
) -> dict:
|
||||
"""Create a threshold status object."""
|
||||
percent_used = (current / limit) * 100 if limit > 0 else 0
|
||||
|
||||
if current >= critical:
|
||||
status = "critical"
|
||||
elif current >= warning:
|
||||
status = "warning"
|
||||
else:
|
||||
status = "ok"
|
||||
|
||||
return {
|
||||
"name": name,
|
||||
"current": current,
|
||||
"warning": warning,
|
||||
"critical": critical,
|
||||
"limit": limit,
|
||||
"status": status,
|
||||
"percent_used": round(percent_used, 1),
|
||||
}
|
||||
|
||||
def _generate_recommendations(
|
||||
self, thresholds: list[dict], database: dict
|
||||
) -> list[dict]:
|
||||
"""Generate scaling recommendations based on thresholds."""
|
||||
recommendations = []
|
||||
|
||||
for threshold in thresholds:
|
||||
if threshold["status"] == "critical":
|
||||
recommendations.append(
|
||||
{
|
||||
"priority": "critical",
|
||||
"title": f"{threshold['name']} at critical level",
|
||||
"description": (
|
||||
f"Currently at {threshold['percent_used']:.0f}% of capacity "
|
||||
f"({threshold['current']:.0f} of {threshold['limit']:.0f})"
|
||||
),
|
||||
"action": "Immediate scaling or cleanup required",
|
||||
}
|
||||
)
|
||||
elif threshold["status"] == "warning":
|
||||
recommendations.append(
|
||||
{
|
||||
"priority": "warning",
|
||||
"title": f"{threshold['name']} approaching limit",
|
||||
"description": (
|
||||
f"Currently at {threshold['percent_used']:.0f}% of capacity "
|
||||
f"({threshold['current']:.0f} of {threshold['limit']:.0f})"
|
||||
),
|
||||
"action": "Plan scaling in the next 2-4 weeks",
|
||||
}
|
||||
)
|
||||
|
||||
# Add tier-based recommendations
|
||||
if database["vendors_count"] > 0:
|
||||
tier, next_trigger = self._determine_tier(
|
||||
database["vendors_count"], database["products_count"]
|
||||
)
|
||||
if next_trigger:
|
||||
recommendations.append(
|
||||
{
|
||||
"priority": "info",
|
||||
"title": f"Current tier: {tier}",
|
||||
"description": next_trigger,
|
||||
"action": "Review capacity planning documentation",
|
||||
}
|
||||
)
|
||||
|
||||
# If no issues, add positive status
|
||||
if not recommendations:
|
||||
recommendations.append(
|
||||
{
|
||||
"priority": "info",
|
||||
"title": "All systems healthy",
|
||||
"description": "No capacity concerns at this time",
|
||||
"action": None,
|
||||
}
|
||||
)
|
||||
|
||||
return recommendations
|
||||
|
||||
def _determine_tier(self, vendors: int, products: int) -> tuple[str, str | None]:
|
||||
"""Determine current infrastructure tier and next trigger."""
|
||||
current_tier = "Starter"
|
||||
next_trigger = None
|
||||
|
||||
for i, tier in enumerate(INFRASTRUCTURE_TIERS):
|
||||
max_clients = tier["max_clients"]
|
||||
max_products = tier["max_products"]
|
||||
|
||||
if max_clients is None:
|
||||
current_tier = tier["name"]
|
||||
break
|
||||
|
||||
if vendors <= max_clients and products <= max_products:
|
||||
current_tier = tier["name"]
|
||||
|
||||
# Check proximity to next tier
|
||||
if i < len(INFRASTRUCTURE_TIERS) - 1:
|
||||
next_tier = INFRASTRUCTURE_TIERS[i + 1]
|
||||
vendor_percent = (vendors / max_clients) * 100
|
||||
product_percent = (products / max_products) * 100
|
||||
|
||||
if vendor_percent > 70 or product_percent > 70:
|
||||
next_trigger = (
|
||||
f"Approaching {next_tier['name']} tier "
|
||||
f"(vendors: {vendor_percent:.0f}%, products: {product_percent:.0f}%)"
|
||||
)
|
||||
break
|
||||
|
||||
return current_tier, next_trigger
|
||||
|
||||
def _determine_overall_status(self, thresholds: list[dict]) -> str:
|
||||
"""Determine overall platform status."""
|
||||
statuses = [t["status"] for t in thresholds]
|
||||
|
||||
if "critical" in statuses:
|
||||
return "critical"
|
||||
elif "warning" in statuses:
|
||||
return "degraded"
|
||||
else:
|
||||
return "healthy"
|
||||
|
||||
|
||||
# Create service instance
|
||||
platform_health_service = PlatformHealthService()
|
||||
129
app/modules/monitoring/static/admin/js/platform-health.js
Normal file
129
app/modules/monitoring/static/admin/js/platform-health.js
Normal file
@@ -0,0 +1,129 @@
|
||||
// noqa: js-006 - async init pattern is safe, loadData has try/catch
|
||||
// static/admin/js/platform-health.js
|
||||
/**
|
||||
* Admin platform health monitoring page logic
|
||||
* Displays system metrics, capacity thresholds, and scaling recommendations
|
||||
*/
|
||||
|
||||
const adminPlatformHealthLog = window.LogConfig.loggers.adminPlatformHealth ||
|
||||
window.LogConfig.createLogger('adminPlatformHealth', false);
|
||||
|
||||
adminPlatformHealthLog.info('Loading...');
|
||||
|
||||
function adminPlatformHealth() {
|
||||
adminPlatformHealthLog.info('adminPlatformHealth() called');
|
||||
|
||||
return {
|
||||
// Inherit base layout state
|
||||
...data(),
|
||||
|
||||
// Set page identifier
|
||||
currentPage: 'platform-health',
|
||||
|
||||
// Loading states
|
||||
loading: true,
|
||||
error: '',
|
||||
|
||||
// Health data
|
||||
health: null,
|
||||
|
||||
// Auto-refresh interval (30 seconds)
|
||||
refreshInterval: null,
|
||||
|
||||
async init() {
|
||||
adminPlatformHealthLog.info('Platform Health init() called');
|
||||
|
||||
// Guard against multiple initialization
|
||||
if (window._adminPlatformHealthInitialized) {
|
||||
adminPlatformHealthLog.warn('Already initialized, skipping');
|
||||
return;
|
||||
}
|
||||
window._adminPlatformHealthInitialized = true;
|
||||
|
||||
// Load initial data
|
||||
await this.loadHealth();
|
||||
|
||||
// Set up auto-refresh every 30 seconds
|
||||
this.refreshInterval = setInterval(() => {
|
||||
this.loadHealth();
|
||||
}, 30000);
|
||||
|
||||
adminPlatformHealthLog.info('Platform Health initialization complete');
|
||||
},
|
||||
|
||||
/**
|
||||
* Clean up on component destroy
|
||||
*/
|
||||
destroy() {
|
||||
if (this.refreshInterval) {
|
||||
clearInterval(this.refreshInterval);
|
||||
this.refreshInterval = null;
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Load platform health data
|
||||
*/
|
||||
async loadHealth() {
|
||||
this.loading = true;
|
||||
this.error = '';
|
||||
|
||||
try {
|
||||
const response = await apiClient.get('/admin/platform/health');
|
||||
this.health = response;
|
||||
|
||||
adminPlatformHealthLog.info('Loaded health data:', {
|
||||
status: response.overall_status,
|
||||
tier: response.infrastructure_tier
|
||||
});
|
||||
} catch (error) {
|
||||
adminPlatformHealthLog.error('Failed to load health:', error);
|
||||
this.error = error.message || 'Failed to load platform health';
|
||||
} finally {
|
||||
this.loading = false;
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Manual refresh
|
||||
*/
|
||||
async refresh() {
|
||||
await this.loadHealth();
|
||||
},
|
||||
|
||||
/**
|
||||
* Format number with locale
|
||||
*/
|
||||
formatNumber(num) {
|
||||
if (num === null || num === undefined) return '0';
|
||||
if (typeof num === 'number' && num % 1 !== 0) {
|
||||
return num.toFixed(2);
|
||||
}
|
||||
return new Intl.NumberFormat('en-US').format(num);
|
||||
},
|
||||
|
||||
/**
|
||||
* Format storage size
|
||||
*/
|
||||
formatStorage(gb) {
|
||||
if (gb === null || gb === undefined) return '0 GB';
|
||||
if (gb < 1) {
|
||||
return (gb * 1024).toFixed(0) + ' MB';
|
||||
}
|
||||
return gb.toFixed(2) + ' GB';
|
||||
},
|
||||
|
||||
/**
|
||||
* Format timestamp
|
||||
*/
|
||||
formatTime(timestamp) {
|
||||
if (!timestamp) return 'Unknown';
|
||||
try {
|
||||
const date = new Date(timestamp);
|
||||
return date.toLocaleTimeString();
|
||||
} catch (e) {
|
||||
return 'Unknown';
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
11
app/modules/monitoring/tasks/__init__.py
Normal file
11
app/modules/monitoring/tasks/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
# app/modules/monitoring/tasks/__init__.py
|
||||
"""
|
||||
Monitoring module Celery tasks.
|
||||
|
||||
Tasks for:
|
||||
- Capacity snapshot capture for forecasting
|
||||
"""
|
||||
|
||||
from app.modules.monitoring.tasks.capacity import capture_capacity_snapshot
|
||||
|
||||
__all__ = ["capture_capacity_snapshot"]
|
||||
45
app/modules/monitoring/tasks/capacity.py
Normal file
45
app/modules/monitoring/tasks/capacity.py
Normal file
@@ -0,0 +1,45 @@
|
||||
# app/modules/monitoring/tasks/capacity.py
|
||||
"""
|
||||
Celery tasks for capacity monitoring and forecasting.
|
||||
|
||||
Captures daily snapshots of platform capacity metrics for trend analysis.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from app.core.celery_config import celery_app
|
||||
from app.modules.task_base import ModuleTask
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@celery_app.task(
|
||||
bind=True,
|
||||
base=ModuleTask,
|
||||
name="app.modules.monitoring.tasks.capacity.capture_capacity_snapshot",
|
||||
)
|
||||
def capture_capacity_snapshot(self):
|
||||
"""
|
||||
Capture a daily snapshot of platform capacity metrics.
|
||||
|
||||
Runs daily at midnight via Celery beat.
|
||||
|
||||
Returns:
|
||||
dict: Snapshot summary with vendor and product counts.
|
||||
"""
|
||||
from app.modules.billing.services.capacity_forecast_service import capacity_forecast_service
|
||||
|
||||
with self.get_db() as db:
|
||||
snapshot = capacity_forecast_service.capture_daily_snapshot(db)
|
||||
|
||||
logger.info(
|
||||
f"Captured capacity snapshot: {snapshot.total_vendors} vendors, "
|
||||
f"{snapshot.total_products} products"
|
||||
)
|
||||
|
||||
return {
|
||||
"snapshot_id": snapshot.id,
|
||||
"snapshot_date": snapshot.snapshot_date.isoformat(),
|
||||
"total_vendors": snapshot.total_vendors,
|
||||
"total_products": snapshot.total_products,
|
||||
}
|
||||
411
app/modules/monitoring/templates/monitoring/admin/logs.html
Normal file
411
app/modules/monitoring/templates/monitoring/admin/logs.html
Normal file
@@ -0,0 +1,411 @@
|
||||
{# app/templates/admin/logs.html #}
|
||||
{% extends "admin/base.html" %}
|
||||
{% from 'shared/macros/pagination.html' import pagination %}
|
||||
{% from 'shared/macros/alerts.html' import alert_dynamic, error_state %}
|
||||
{% from 'shared/macros/headers.html' import page_header_refresh %}
|
||||
{% from 'shared/macros/tabs.html' import tabs_nav, tab_button %}
|
||||
|
||||
{% block title %}Application Logs{% endblock %}
|
||||
|
||||
{% block alpine_data %}adminLogs(){% endblock %}
|
||||
|
||||
{% block content %}
|
||||
{{ page_header_refresh('Application Logs') }}
|
||||
|
||||
{{ alert_dynamic(type='success', title='Success', message_var='successMessage', show_condition='successMessage') }}
|
||||
|
||||
{{ error_state('Error', show_condition='error') }}
|
||||
|
||||
<!-- Statistics Cards -->
|
||||
<div x-show="stats" class="grid gap-6 mb-8 md:grid-cols-2 xl:grid-cols-4">
|
||||
<!-- Total Logs -->
|
||||
<div class="flex items-center p-4 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<div class="p-3 mr-4 text-blue-500 bg-blue-100 rounded-full dark:text-blue-100 dark:bg-blue-500">
|
||||
<span x-html="$icon('document-text', 'w-5 h-5')"></span>
|
||||
</div>
|
||||
<div>
|
||||
<p class="mb-2 text-sm font-medium text-gray-600 dark:text-gray-400">Total Logs (7d)</p>
|
||||
<p class="text-lg font-semibold text-gray-700 dark:text-gray-200" x-text="stats.total_count">0</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Warnings -->
|
||||
<div class="flex items-center p-4 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<div class="p-3 mr-4 text-yellow-500 bg-yellow-100 rounded-full dark:text-yellow-100 dark:bg-yellow-500">
|
||||
<span x-html="$icon('exclamation', 'w-5 h-5')"></span>
|
||||
</div>
|
||||
<div>
|
||||
<p class="mb-2 text-sm font-medium text-gray-600 dark:text-gray-400">Warnings</p>
|
||||
<p class="text-lg font-semibold text-gray-700 dark:text-gray-200" x-text="stats.warning_count">0</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Errors -->
|
||||
<div class="flex items-center p-4 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<div class="p-3 mr-4 text-red-500 bg-red-100 rounded-full dark:text-white dark:bg-red-600">
|
||||
<span x-html="$icon('x-circle', 'w-5 h-5')"></span>
|
||||
</div>
|
||||
<div>
|
||||
<p class="mb-2 text-sm font-medium text-gray-600 dark:text-gray-400">Errors</p>
|
||||
<p class="text-lg font-semibold text-gray-700 dark:text-gray-200" x-text="stats.error_count">0</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Critical -->
|
||||
<div class="flex items-center p-4 bg-white rounded-lg shadow-xs dark:bg-gray-800">
|
||||
<div class="p-3 mr-4 text-purple-500 bg-purple-100 rounded-full dark:text-purple-100 dark:bg-purple-500">
|
||||
<span x-html="$icon('lightning-bolt', 'w-5 h-5')"></span>
|
||||
</div>
|
||||
<div>
|
||||
<p class="mb-2 text-sm font-medium text-gray-600 dark:text-gray-400">Critical</p>
|
||||
<p class="text-lg font-semibold text-gray-700 dark:text-gray-200" x-text="stats.critical_count">0</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Log Source Tabs -->
|
||||
{% call tabs_nav() %}
|
||||
{{ tab_button('database', 'Database Logs', tab_var='logSource', icon='database', onclick="logSource = 'database'; loadLogs()") }}
|
||||
{{ tab_button('file', 'File Logs', tab_var='logSource', icon='document', onclick="logSource = 'file'; loadFileLogs()") }}
|
||||
{% endcall %}
|
||||
|
||||
<!-- Database Logs Section -->
|
||||
<div x-show="logSource === 'database'" x-transition>
|
||||
<!-- Filters -->
|
||||
<div class="bg-white dark:bg-gray-800 rounded-lg shadow-md p-6 mb-6">
|
||||
<div class="flex items-center justify-between mb-4">
|
||||
<h3 class="text-lg font-semibold text-gray-700 dark:text-gray-200">Filters</h3>
|
||||
<button
|
||||
@click="resetFilters()"
|
||||
class="text-sm text-purple-600 hover:text-purple-700 dark:text-purple-400"
|
||||
>
|
||||
Reset Filters
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div class="grid grid-cols-1 md:grid-cols-3 gap-4">
|
||||
<!-- Log Level Filter -->
|
||||
<div>
|
||||
<label class="block text-sm font-medium text-gray-700 dark:text-gray-300 mb-2">Log Level</label>
|
||||
<select
|
||||
x-model="filters.level"
|
||||
@change="loadLogs()"
|
||||
class="block w-full px-3 py-2 text-gray-700 dark:text-gray-300 bg-white dark:bg-gray-700 border border-gray-300 dark:border-gray-600 rounded-lg focus:outline-none focus:ring-2 focus:ring-purple-600"
|
||||
>
|
||||
<option value="">All Levels</option>
|
||||
<option value="WARNING">WARNING</option>
|
||||
<option value="ERROR">ERROR</option>
|
||||
<option value="CRITICAL">CRITICAL</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<!-- Module Filter -->
|
||||
<div>
|
||||
<label class="block text-sm font-medium text-gray-700 dark:text-gray-300 mb-2">Module</label>
|
||||
<input
|
||||
type="text"
|
||||
x-model="filters.module"
|
||||
@keyup.enter="loadLogs()"
|
||||
placeholder="Filter by module..."
|
||||
class="block w-full px-3 py-2 text-gray-700 dark:text-gray-300 bg-white dark:bg-gray-700 border border-gray-300 dark:border-gray-600 rounded-lg focus:outline-none focus:ring-2 focus:ring-purple-600"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<!-- Search -->
|
||||
<div>
|
||||
<label class="block text-sm font-medium text-gray-700 dark:text-gray-300 mb-2">Search</label>
|
||||
<input
|
||||
type="text"
|
||||
x-model="filters.search"
|
||||
@keyup.enter="loadLogs()"
|
||||
placeholder="Search in messages..."
|
||||
class="block w-full px-3 py-2 text-gray-700 dark:text-gray-300 bg-white dark:bg-gray-700 border border-gray-300 dark:border-gray-600 rounded-lg focus:outline-none focus:ring-2 focus:ring-purple-600"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Database Logs Table -->
|
||||
<div class="bg-white dark:bg-gray-800 rounded-lg shadow-md overflow-hidden">
|
||||
<div class="overflow-x-auto">
|
||||
<table class="w-full whitespace-no-wrap">
|
||||
<thead>
|
||||
<tr class="text-xs font-semibold tracking-wide text-left text-gray-500 uppercase border-b dark:border-gray-700 bg-gray-50 dark:text-gray-400 dark:bg-gray-800">
|
||||
<th class="px-4 py-3">Timestamp</th>
|
||||
<th class="px-4 py-3">Level</th>
|
||||
<th class="px-4 py-3">Module</th>
|
||||
<th class="px-4 py-3">Message</th>
|
||||
<th class="px-4 py-3">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody class="bg-white divide-y dark:divide-gray-700 dark:bg-gray-800">
|
||||
<template x-if="loading">
|
||||
<tr>
|
||||
<td colspan="5" class="px-4 py-8 text-center">
|
||||
<span x-html="$icon('spinner', 'inline w-6 h-6 text-purple-600')"></span>
|
||||
<p class="mt-2 text-sm text-gray-600 dark:text-gray-400">Loading logs...</p>
|
||||
</td>
|
||||
</tr>
|
||||
</template>
|
||||
|
||||
<template x-if="!loading && logs.length === 0">
|
||||
<tr>
|
||||
<td colspan="5" class="px-4 py-8 text-center text-gray-500 dark:text-gray-400">
|
||||
No logs found
|
||||
</td>
|
||||
</tr>
|
||||
</template>
|
||||
|
||||
<template x-for="log in logs" :key="log.id">
|
||||
<tr class="text-gray-700 dark:text-gray-400 hover:bg-gray-50 dark:hover:bg-gray-700">
|
||||
<td class="px-4 py-3 text-sm">
|
||||
<span x-text="formatTimestamp(log.timestamp)"></span>
|
||||
</td>
|
||||
<td class="px-4 py-3 text-xs">
|
||||
<span
|
||||
:class="{
|
||||
'bg-yellow-100 text-yellow-800 dark:bg-yellow-800 dark:text-yellow-100': log.level === 'WARNING',
|
||||
'bg-red-100 text-red-800 dark:bg-red-800 dark:text-red-100': log.level === 'ERROR',
|
||||
'bg-purple-100 text-purple-800 dark:bg-purple-800 dark:text-purple-100': log.level === 'CRITICAL'
|
||||
}"
|
||||
class="px-2 py-1 font-semibold leading-tight rounded-full"
|
||||
x-text="log.level"
|
||||
></span>
|
||||
</td>
|
||||
<td class="px-4 py-3 text-sm">
|
||||
<span x-text="log.module || '-'"></span>
|
||||
</td>
|
||||
<td class="px-4 py-3 text-sm">
|
||||
<div class="max-w-2xl truncate" x-text="log.message"></div>
|
||||
</td>
|
||||
<td class="px-4 py-3 text-sm">
|
||||
<button
|
||||
@click="showLogDetail(log)"
|
||||
class="text-purple-600 hover:text-purple-700 dark:text-purple-400"
|
||||
>
|
||||
<span x-html="$icon('eye', 'w-5 h-5')"></span>
|
||||
</button>
|
||||
</td>
|
||||
</tr>
|
||||
</template>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
{{ pagination(show_condition="!loading && logs.length > 0") }}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- File Logs Section -->
|
||||
<div x-show="logSource === 'file'" x-transition>
|
||||
<!-- File Selection -->
|
||||
<div class="bg-white dark:bg-gray-800 rounded-lg shadow-md p-6 mb-6">
|
||||
<div class="flex items-center justify-between mb-4">
|
||||
<h3 class="text-lg font-semibold text-gray-700 dark:text-gray-200">Log Files</h3>
|
||||
<button
|
||||
@click="loadFileLogs()"
|
||||
class="text-sm text-purple-600 hover:text-purple-700 dark:text-purple-400"
|
||||
>
|
||||
Refresh List
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div class="grid grid-cols-1 md:grid-cols-2 gap-4">
|
||||
<div>
|
||||
<label class="block text-sm font-medium text-gray-700 dark:text-gray-300 mb-2">Select Log File</label>
|
||||
<select
|
||||
x-model="selectedFile"
|
||||
@change="loadFileContent()"
|
||||
class="block w-full px-3 py-2 text-gray-700 dark:text-gray-300 bg-white dark:bg-gray-700 border border-gray-300 dark:border-gray-600 rounded-lg focus:outline-none focus:ring-2 focus:ring-purple-600"
|
||||
>
|
||||
<option value="">Select a file...</option>
|
||||
<template x-for="file in logFiles" :key="file.filename">
|
||||
<option :value="file.filename" x-text="`${file.filename} (${file.size_mb} MB)`"></option>
|
||||
</template>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<div class="flex items-end">
|
||||
<button
|
||||
@click="downloadLogFile()"
|
||||
:disabled="!selectedFile"
|
||||
class="px-4 py-2 text-sm font-medium leading-5 text-white transition-colors duration-150 bg-green-600 border border-transparent rounded-lg hover:bg-green-700 focus:outline-none focus:shadow-outline-green disabled:opacity-50 disabled:cursor-not-allowed"
|
||||
>
|
||||
<span x-html="$icon('download', 'inline w-4 h-4 mr-2')"></span>
|
||||
Download File
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- File Content -->
|
||||
<div x-show="fileContent" class="bg-white dark:bg-gray-800 rounded-lg shadow-md overflow-hidden">
|
||||
<div class="p-4 border-b dark:border-gray-700 flex items-center justify-between">
|
||||
<div>
|
||||
<h3 class="text-lg font-semibold text-gray-700 dark:text-gray-200" x-text="selectedFile"></h3>
|
||||
<p class="text-sm text-gray-600 dark:text-gray-400">
|
||||
Showing last <span x-text="fileContent?.lines?.length || 0"></span> lines of <span x-text="fileContent?.total_lines || 0"></span> total
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="p-4 bg-gray-900 overflow-x-auto">
|
||||
<pre class="text-xs text-green-400 font-mono"><code x-text="fileContent?.lines?.join('\n')"></code></pre>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{# noqa: FE-004 - Log detail modal with dynamic show variable and custom content layout #}
|
||||
<!-- Log Detail Modal -->
|
||||
<div x-show="selectedLog" x-transition class="fixed inset-0 z-50 flex items-center justify-center bg-black bg-opacity-50" @click.self="selectedLog = null">
|
||||
<div class="bg-white dark:bg-gray-800 rounded-lg shadow-xl max-w-4xl w-full mx-4 max-h-[90vh] overflow-hidden">
|
||||
{# Modal Header with Level Badge #}
|
||||
<div class="px-6 py-4 border-b border-gray-200 dark:border-gray-700">
|
||||
<div class="flex items-center justify-between">
|
||||
<div class="flex items-center gap-3">
|
||||
<div :class="{
|
||||
'bg-yellow-100 dark:bg-yellow-900/30': selectedLog?.level === 'WARNING',
|
||||
'bg-red-100 dark:bg-red-900/30': selectedLog?.level === 'ERROR',
|
||||
'bg-purple-100 dark:bg-purple-900/30': selectedLog?.level === 'CRITICAL',
|
||||
'bg-blue-100 dark:bg-blue-900/30': selectedLog?.level === 'INFO' || selectedLog?.level === 'DEBUG'
|
||||
}" class="p-2 rounded-lg">
|
||||
<span :class="{
|
||||
'text-yellow-600 dark:text-yellow-400': selectedLog?.level === 'WARNING',
|
||||
'text-red-600 dark:text-red-400': selectedLog?.level === 'ERROR',
|
||||
'text-purple-600 dark:text-purple-400': selectedLog?.level === 'CRITICAL',
|
||||
'text-blue-600 dark:text-blue-400': selectedLog?.level === 'INFO' || selectedLog?.level === 'DEBUG'
|
||||
}" x-html="$icon(selectedLog?.level === 'WARNING' ? 'exclamation' : selectedLog?.level === 'CRITICAL' ? 'lightning-bolt' : selectedLog?.level === 'ERROR' ? 'x-circle' : 'information-circle', 'w-6 h-6')"></span>
|
||||
</div>
|
||||
<div>
|
||||
<h3 class="text-lg font-semibold text-gray-900 dark:text-gray-100">Log Entry Details</h3>
|
||||
<p class="text-sm text-gray-500 dark:text-gray-400">ID: <span x-text="selectedLog?.id"></span></p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex items-center gap-3">
|
||||
<span :class="{
|
||||
'bg-yellow-100 text-yellow-800 dark:bg-yellow-800 dark:text-yellow-100': selectedLog?.level === 'WARNING',
|
||||
'bg-red-100 text-red-800 dark:bg-red-800 dark:text-red-100': selectedLog?.level === 'ERROR',
|
||||
'bg-purple-100 text-purple-800 dark:bg-purple-800 dark:text-purple-100': selectedLog?.level === 'CRITICAL',
|
||||
'bg-blue-100 text-blue-800 dark:bg-blue-800 dark:text-blue-100': selectedLog?.level === 'INFO' || selectedLog?.level === 'DEBUG'
|
||||
}" class="px-3 py-1 text-sm font-semibold rounded-full" x-text="selectedLog?.level"></span>
|
||||
<button @click="selectedLog = null" class="p-1 text-gray-400 hover:text-gray-600 dark:hover:text-gray-300 transition-colors">
|
||||
<span x-html="$icon('close', 'w-6 h-6')"></span>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{# Modal Body #}
|
||||
<div class="p-6 overflow-y-auto max-h-[calc(90vh-140px)]">
|
||||
<template x-if="selectedLog">
|
||||
<div class="space-y-6">
|
||||
{# Details Table #}
|
||||
<div class="overflow-hidden border border-gray-200 dark:border-gray-700 rounded-lg">
|
||||
<table class="min-w-full divide-y divide-gray-200 dark:divide-gray-700">
|
||||
<tbody class="divide-y divide-gray-200 dark:divide-gray-700">
|
||||
<tr>
|
||||
<td class="px-4 py-3 text-sm font-medium text-gray-600 dark:text-gray-400 bg-gray-50 dark:bg-gray-700/50 w-1/4">
|
||||
<div class="flex items-center gap-2">
|
||||
<span x-html="$icon('clock', 'w-4 h-4')"></span>
|
||||
Timestamp
|
||||
</div>
|
||||
</td>
|
||||
<td class="px-4 py-3 text-sm text-gray-900 dark:text-gray-100" x-text="formatTimestamp(selectedLog.timestamp)"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="px-4 py-3 text-sm font-medium text-gray-600 dark:text-gray-400 bg-gray-50 dark:bg-gray-700/50">
|
||||
<div class="flex items-center gap-2">
|
||||
<span x-html="$icon('tag', 'w-4 h-4')"></span>
|
||||
Logger
|
||||
</div>
|
||||
</td>
|
||||
<td class="px-4 py-3 text-sm text-gray-900 dark:text-gray-100">
|
||||
<code class="px-2 py-1 bg-gray-100 dark:bg-gray-700 rounded text-xs" x-text="selectedLog.logger_name || '-'"></code>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="px-4 py-3 text-sm font-medium text-gray-600 dark:text-gray-400 bg-gray-50 dark:bg-gray-700/50">
|
||||
<div class="flex items-center gap-2">
|
||||
<span x-html="$icon('cube', 'w-4 h-4')"></span>
|
||||
Module
|
||||
</div>
|
||||
</td>
|
||||
<td class="px-4 py-3 text-sm text-gray-900 dark:text-gray-100">
|
||||
<code class="px-2 py-1 bg-gray-100 dark:bg-gray-700 rounded text-xs" x-text="selectedLog.module || '-'"></code>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
{# Message Section #}
|
||||
<div>
|
||||
<div class="flex items-center gap-2 mb-2">
|
||||
<span class="text-gray-500 dark:text-gray-400" x-html="$icon('chat-alt', 'w-4 h-4')"></span>
|
||||
<h4 class="text-sm font-semibold text-gray-700 dark:text-gray-300">Message</h4>
|
||||
</div>
|
||||
<div class="bg-gray-50 dark:bg-gray-700/50 border border-gray-200 dark:border-gray-600 rounded-lg p-4">
|
||||
<p class="text-sm text-gray-800 dark:text-gray-200 whitespace-pre-wrap break-words" x-text="selectedLog.message"></p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{# Exception Section (conditional) #}
|
||||
<div x-show="selectedLog.exception_message" x-transition>
|
||||
<div class="flex items-center gap-2 mb-2">
|
||||
<span class="text-red-500 dark:text-red-400" x-html="$icon('exclamation-circle', 'w-4 h-4')"></span>
|
||||
<h4 class="text-sm font-semibold text-red-700 dark:text-red-300">Exception</h4>
|
||||
</div>
|
||||
<div class="bg-red-50 dark:bg-red-900/20 border border-red-200 dark:border-red-800 rounded-lg p-4">
|
||||
<div class="flex items-start gap-3">
|
||||
<div class="flex-shrink-0 p-1.5 bg-red-100 dark:bg-red-900/50 rounded">
|
||||
<span class="text-red-600 dark:text-red-400" x-html="$icon('x-circle', 'w-5 h-5')"></span>
|
||||
</div>
|
||||
<div class="flex-1 min-w-0">
|
||||
<p class="text-sm font-medium text-red-800 dark:text-red-200" x-text="selectedLog.exception_type"></p>
|
||||
<p class="text-sm text-red-600 dark:text-red-300 mt-1 break-words" x-text="selectedLog.exception_message"></p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{# Stack Trace Section (conditional) #}
|
||||
<div x-show="selectedLog.stack_trace" x-transition>
|
||||
<div class="flex items-center justify-between mb-2">
|
||||
<div class="flex items-center gap-2">
|
||||
<span class="text-gray-500 dark:text-gray-400" x-html="$icon('code', 'w-4 h-4')"></span>
|
||||
<h4 class="text-sm font-semibold text-gray-700 dark:text-gray-300">Stack Trace</h4>
|
||||
</div>
|
||||
<button
|
||||
@click="navigator.clipboard.writeText(selectedLog.stack_trace); $dispatch('notify', {message: 'Stack trace copied!', type: 'success'})"
|
||||
class="text-xs text-purple-600 hover:text-purple-700 dark:text-purple-400 dark:hover:text-purple-300 flex items-center gap-1"
|
||||
>
|
||||
<span x-html="$icon('clipboard-copy', 'w-4 h-4')"></span>
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
<div class="bg-gray-900 dark:bg-gray-950 border border-gray-700 rounded-lg overflow-hidden">
|
||||
<pre class="p-4 text-xs text-green-400 font-mono overflow-x-auto max-h-64 overflow-y-auto"><code x-text="selectedLog.stack_trace"></code></pre>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
|
||||
{# Modal Footer #}
|
||||
<div class="px-6 py-4 border-t border-gray-200 dark:border-gray-700 bg-gray-50 dark:bg-gray-700/30">
|
||||
<div class="flex justify-end">
|
||||
<button
|
||||
@click="selectedLog = null"
|
||||
class="px-4 py-2 text-sm font-medium text-gray-700 dark:text-gray-300 bg-white dark:bg-gray-700 border border-gray-300 dark:border-gray-600 rounded-lg hover:bg-gray-50 dark:hover:bg-gray-600 focus:outline-none focus:ring-2 focus:ring-purple-500 transition-colors"
|
||||
>
|
||||
Close
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% endblock %}
|
||||
|
||||
{% block extra_scripts %}
|
||||
<script src="{{ url_for('monitoring_static', path='admin/js/logs.js') }}"></script>
|
||||
{% endblock %}
|
||||
@@ -0,0 +1,267 @@
|
||||
{# app/templates/admin/platform-health.html #}
|
||||
{% extends "admin/base.html" %}
|
||||
{% from 'shared/macros/alerts.html' import loading_state, error_state %}
|
||||
{% from 'shared/macros/headers.html' import page_header_flex, refresh_button %}
|
||||
|
||||
{% block title %}Platform Health{% endblock %}
|
||||
|
||||
{% block alpine_data %}adminPlatformHealth(){% endblock %}
|
||||
|
||||
{% block content %}
|
||||
{% call page_header_flex(title='Platform Health', subtitle='System metrics, capacity monitoring, and scaling recommendations') %}
|
||||
{{ refresh_button(variant='primary') }}
|
||||
{% endcall %}
|
||||
|
||||
{{ loading_state('Loading platform health...') }}
|
||||
|
||||
{{ error_state('Error loading platform health') }}
|
||||
|
||||
<!-- Main Content -->
|
||||
<div x-show="!loading && !error" x-cloak class="space-y-6">
|
||||
<!-- Overall Status Banner -->
|
||||
<div
|
||||
:class="{
|
||||
'bg-green-50 border-green-200 dark:bg-green-900/20 dark:border-green-800': health?.overall_status === 'healthy',
|
||||
'bg-yellow-50 border-yellow-200 dark:bg-yellow-900/20 dark:border-yellow-800': health?.overall_status === 'degraded',
|
||||
'bg-red-50 border-red-200 dark:bg-red-900/20 dark:border-red-800': health?.overall_status === 'critical'
|
||||
}"
|
||||
class="px-4 py-3 rounded-lg border flex items-center justify-between"
|
||||
>
|
||||
<div class="flex items-center gap-3">
|
||||
<span
|
||||
:class="{
|
||||
'text-green-600 dark:text-green-400': health?.overall_status === 'healthy',
|
||||
'text-yellow-600 dark:text-yellow-400': health?.overall_status === 'degraded',
|
||||
'text-red-600 dark:text-red-400': health?.overall_status === 'critical'
|
||||
}"
|
||||
x-html="health?.overall_status === 'healthy' ? $icon('check-circle', 'w-6 h-6') : (health?.overall_status === 'degraded' ? $icon('exclamation', 'w-6 h-6') : $icon('x-circle', 'w-6 h-6'))"
|
||||
></span>
|
||||
<div>
|
||||
<span class="font-semibold capitalize" x-text="health?.overall_status || 'Unknown'"></span>
|
||||
<span class="text-sm text-gray-600 dark:text-gray-400 ml-2">
|
||||
Infrastructure Tier: <span class="font-medium" x-text="health?.infrastructure_tier"></span>
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
<span class="text-xs text-gray-500 dark:text-gray-400" x-text="'Last updated: ' + formatTime(health?.timestamp)"></span>
|
||||
</div>
|
||||
|
||||
<!-- Stats Cards -->
|
||||
<div class="grid gap-4 md:grid-cols-2 lg:grid-cols-4">
|
||||
<!-- Products -->
|
||||
<div class="px-4 py-5 bg-white rounded-lg shadow-md dark:bg-gray-800">
|
||||
<div class="flex items-center">
|
||||
<div class="p-3 mr-4 text-blue-500 bg-blue-100 rounded-full dark:text-blue-400 dark:bg-blue-900/50">
|
||||
<span x-html="$icon('cube', 'w-5 h-5')"></span>
|
||||
</div>
|
||||
<div>
|
||||
<p class="mb-1 text-sm font-medium text-gray-500 dark:text-gray-400">Products</p>
|
||||
<p class="text-lg font-semibold text-gray-700 dark:text-gray-200" x-text="formatNumber(health?.database?.products_count || 0)"></p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Image Storage -->
|
||||
<div class="px-4 py-5 bg-white rounded-lg shadow-md dark:bg-gray-800">
|
||||
<div class="flex items-center">
|
||||
<div class="p-3 mr-4 text-purple-500 bg-purple-100 rounded-full dark:text-purple-400 dark:bg-purple-900/50">
|
||||
<span x-html="$icon('photograph', 'w-5 h-5')"></span>
|
||||
</div>
|
||||
<div>
|
||||
<p class="mb-1 text-sm font-medium text-gray-500 dark:text-gray-400">Image Storage</p>
|
||||
<p class="text-lg font-semibold text-gray-700 dark:text-gray-200" x-text="formatStorage(health?.image_storage?.total_size_gb || 0)"></p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Database Size -->
|
||||
<div class="px-4 py-5 bg-white rounded-lg shadow-md dark:bg-gray-800">
|
||||
<div class="flex items-center">
|
||||
<div class="p-3 mr-4 text-green-500 bg-green-100 rounded-full dark:text-green-400 dark:bg-green-900/50">
|
||||
<span x-html="$icon('database', 'w-5 h-5')"></span>
|
||||
</div>
|
||||
<div>
|
||||
<p class="mb-1 text-sm font-medium text-gray-500 dark:text-gray-400">Database</p>
|
||||
<p class="text-lg font-semibold text-gray-700 dark:text-gray-200" x-text="formatNumber(health?.database?.size_mb || 0) + ' MB'"></p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Vendors -->
|
||||
<div class="px-4 py-5 bg-white rounded-lg shadow-md dark:bg-gray-800">
|
||||
<div class="flex items-center">
|
||||
<div class="p-3 mr-4 text-orange-500 bg-orange-100 rounded-full dark:text-orange-400 dark:bg-orange-900/50">
|
||||
<span x-html="$icon('office-building', 'w-5 h-5')"></span>
|
||||
</div>
|
||||
<div>
|
||||
<p class="mb-1 text-sm font-medium text-gray-500 dark:text-gray-400">Vendors</p>
|
||||
<p class="text-lg font-semibold text-gray-700 dark:text-gray-200" x-text="formatNumber(health?.database?.vendors_count || 0)"></p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Two Column Layout -->
|
||||
<div class="grid gap-6 lg:grid-cols-2">
|
||||
<!-- System Resources -->
|
||||
<div class="px-4 py-5 bg-white rounded-lg shadow-md dark:bg-gray-800">
|
||||
<h3 class="mb-4 text-lg font-semibold text-gray-700 dark:text-gray-200">System Resources</h3>
|
||||
<div class="space-y-4">
|
||||
<!-- CPU -->
|
||||
<div>
|
||||
<div class="flex justify-between mb-1">
|
||||
<span class="text-sm font-medium text-gray-600 dark:text-gray-400">CPU</span>
|
||||
<span class="text-sm font-semibold" x-text="(health?.system?.cpu_percent || 0).toFixed(1) + '%'"></span>
|
||||
</div>
|
||||
<div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2">
|
||||
<div
|
||||
:class="{
|
||||
'bg-green-500': (health?.system?.cpu_percent || 0) < 70,
|
||||
'bg-yellow-500': (health?.system?.cpu_percent || 0) >= 70 && (health?.system?.cpu_percent || 0) < 85,
|
||||
'bg-red-500': (health?.system?.cpu_percent || 0) >= 85
|
||||
}"
|
||||
class="h-2 rounded-full transition-all"
|
||||
:style="'width: ' + Math.min(health?.system?.cpu_percent || 0, 100) + '%'"
|
||||
></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Memory -->
|
||||
<div>
|
||||
<div class="flex justify-between mb-1">
|
||||
<span class="text-sm font-medium text-gray-600 dark:text-gray-400">Memory</span>
|
||||
<span class="text-sm">
|
||||
<span class="font-semibold" x-text="(health?.system?.memory_percent || 0).toFixed(1) + '%'"></span>
|
||||
<span class="text-gray-500 dark:text-gray-400" x-text="' (' + (health?.system?.memory_used_gb || 0).toFixed(1) + ' / ' + (health?.system?.memory_total_gb || 0).toFixed(1) + ' GB)'"></span>
|
||||
</span>
|
||||
</div>
|
||||
<div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2">
|
||||
<div
|
||||
:class="{
|
||||
'bg-green-500': (health?.system?.memory_percent || 0) < 75,
|
||||
'bg-yellow-500': (health?.system?.memory_percent || 0) >= 75 && (health?.system?.memory_percent || 0) < 90,
|
||||
'bg-red-500': (health?.system?.memory_percent || 0) >= 90
|
||||
}"
|
||||
class="h-2 rounded-full transition-all"
|
||||
:style="'width: ' + Math.min(health?.system?.memory_percent || 0, 100) + '%'"
|
||||
></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Disk -->
|
||||
<div>
|
||||
<div class="flex justify-between mb-1">
|
||||
<span class="text-sm font-medium text-gray-600 dark:text-gray-400">Disk</span>
|
||||
<span class="text-sm">
|
||||
<span class="font-semibold" x-text="(health?.system?.disk_percent || 0).toFixed(1) + '%'"></span>
|
||||
<span class="text-gray-500 dark:text-gray-400" x-text="' (' + (health?.system?.disk_used_gb || 0).toFixed(1) + ' / ' + (health?.system?.disk_total_gb || 0).toFixed(1) + ' GB)'"></span>
|
||||
</span>
|
||||
</div>
|
||||
<div class="w-full bg-gray-200 dark:bg-gray-700 rounded-full h-2">
|
||||
<div
|
||||
:class="{
|
||||
'bg-green-500': (health?.system?.disk_percent || 0) < 70,
|
||||
'bg-yellow-500': (health?.system?.disk_percent || 0) >= 70 && (health?.system?.disk_percent || 0) < 85,
|
||||
'bg-red-500': (health?.system?.disk_percent || 0) >= 85
|
||||
}"
|
||||
class="h-2 rounded-full transition-all"
|
||||
:style="'width: ' + Math.min(health?.system?.disk_percent || 0, 100) + '%'"
|
||||
></div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Capacity Thresholds -->
|
||||
<div class="px-4 py-5 bg-white rounded-lg shadow-md dark:bg-gray-800">
|
||||
<h3 class="mb-4 text-lg font-semibold text-gray-700 dark:text-gray-200">Capacity Thresholds</h3>
|
||||
<div class="space-y-3">
|
||||
<template x-for="threshold in health?.thresholds || []" :key="threshold.name">
|
||||
<div class="flex items-center justify-between py-2 border-b border-gray-100 dark:border-gray-700 last:border-0">
|
||||
<div class="flex items-center gap-2">
|
||||
<span
|
||||
:class="{
|
||||
'bg-green-100 text-green-600 dark:bg-green-900/50 dark:text-green-400': threshold.status === 'ok',
|
||||
'bg-yellow-100 text-yellow-600 dark:bg-yellow-900/50 dark:text-yellow-400': threshold.status === 'warning',
|
||||
'bg-red-100 text-red-600 dark:bg-red-900/50 dark:text-red-400': threshold.status === 'critical'
|
||||
}"
|
||||
class="w-2 h-2 rounded-full"
|
||||
></span>
|
||||
<span class="text-sm text-gray-700 dark:text-gray-300" x-text="threshold.name"></span>
|
||||
</div>
|
||||
<div class="text-right">
|
||||
<span class="text-sm font-medium" x-text="formatNumber(threshold.current)"></span>
|
||||
<span class="text-xs text-gray-500 dark:text-gray-400" x-text="' / ' + formatNumber(threshold.limit)"></span>
|
||||
<span
|
||||
:class="{
|
||||
'text-green-600 dark:text-green-400': threshold.status === 'ok',
|
||||
'text-yellow-600 dark:text-yellow-400': threshold.status === 'warning',
|
||||
'text-red-600 dark:text-red-400': threshold.status === 'critical'
|
||||
}"
|
||||
class="text-xs ml-1"
|
||||
x-text="'(' + threshold.percent_used.toFixed(0) + '%)'"
|
||||
></span>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Recommendations -->
|
||||
<div class="px-4 py-5 bg-white rounded-lg shadow-md dark:bg-gray-800">
|
||||
<h3 class="mb-4 text-lg font-semibold text-gray-700 dark:text-gray-200">Scaling Recommendations</h3>
|
||||
<div class="space-y-3">
|
||||
<template x-for="rec in health?.recommendations || []" :key="rec.title">
|
||||
<div
|
||||
:class="{
|
||||
'border-blue-200 bg-blue-50 dark:border-blue-800 dark:bg-blue-900/20': rec.priority === 'info',
|
||||
'border-yellow-200 bg-yellow-50 dark:border-yellow-800 dark:bg-yellow-900/20': rec.priority === 'warning',
|
||||
'border-red-200 bg-red-50 dark:border-red-800 dark:bg-red-900/20': rec.priority === 'critical'
|
||||
}"
|
||||
class="p-4 rounded-lg border"
|
||||
>
|
||||
<div class="flex items-start gap-3">
|
||||
<span
|
||||
:class="{
|
||||
'text-blue-600 dark:text-blue-400': rec.priority === 'info',
|
||||
'text-yellow-600 dark:text-yellow-400': rec.priority === 'warning',
|
||||
'text-red-600 dark:text-red-400': rec.priority === 'critical'
|
||||
}"
|
||||
x-html="rec.priority === 'info' ? $icon('information-circle', 'w-5 h-5') : (rec.priority === 'warning' ? $icon('exclamation', 'w-5 h-5') : $icon('x-circle', 'w-5 h-5'))"
|
||||
></span>
|
||||
<div class="flex-1">
|
||||
<p class="font-medium text-gray-800 dark:text-gray-200" x-text="rec.title"></p>
|
||||
<p class="text-sm text-gray-600 dark:text-gray-400 mt-1" x-text="rec.description"></p>
|
||||
<p x-show="rec.action" class="text-sm font-medium mt-2" x-text="rec.action"></p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Quick Links -->
|
||||
<div class="px-4 py-5 bg-white rounded-lg shadow-md dark:bg-gray-800">
|
||||
<h3 class="mb-4 text-lg font-semibold text-gray-700 dark:text-gray-200">Related Resources</h3>
|
||||
<div class="grid gap-4 md:grid-cols-3">
|
||||
<a href="/admin/code-quality" class="flex items-center gap-3 p-3 rounded-lg border border-gray-200 dark:border-gray-700 hover:bg-gray-50 dark:hover:bg-gray-700 transition-colors">
|
||||
<span class="text-purple-600 dark:text-purple-400" x-html="$icon('code', 'w-5 h-5')"></span>
|
||||
<span class="text-sm font-medium text-gray-700 dark:text-gray-300">Code Quality Dashboard</span>
|
||||
</a>
|
||||
<a href="/admin/settings" class="flex items-center gap-3 p-3 rounded-lg border border-gray-200 dark:border-gray-700 hover:bg-gray-50 dark:hover:bg-gray-700 transition-colors">
|
||||
<span class="text-gray-600 dark:text-gray-400" x-html="$icon('cog', 'w-5 h-5')"></span>
|
||||
<span class="text-sm font-medium text-gray-700 dark:text-gray-300">Platform Settings</span>
|
||||
</a>
|
||||
<a href="https://docs.wizamart.com/architecture/capacity-planning/" target="_blank" class="flex items-center gap-3 p-3 rounded-lg border border-gray-200 dark:border-gray-700 hover:bg-gray-50 dark:hover:bg-gray-700 transition-colors">
|
||||
<span class="text-blue-600 dark:text-blue-400" x-html="$icon('book-open', 'w-5 h-5')"></span>
|
||||
<span class="text-sm font-medium text-gray-700 dark:text-gray-300">Capacity Planning Docs</span>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
||||
|
||||
{% block extra_scripts %}
|
||||
<script src="{{ url_for('monitoring_static', path='admin/js/platform-health.js') }}"></script>
|
||||
{% endblock %}
|
||||
Reference in New Issue
Block a user