fix: improve SQLite concurrency and database logging reliability

Database improvements:
- Enable WAL mode for better concurrent read/write access
- Add busy_timeout (30s) to wait for locked database
- Add synchronous=NORMAL for balanced safety/performance
- Configure check_same_thread=False for thread safety

Logging improvements:
- Add retry logic (3 attempts) for database locked errors
- Silently skip logging on persistent failures to avoid spam
- Properly rollback failed transactions

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2025-12-12 22:36:27 +01:00
parent da9e0b7c64
commit 942f3722f5
2 changed files with 95 additions and 52 deletions

View File

@@ -10,12 +10,39 @@ This module provides classes and functions for:
import logging import logging
from sqlalchemy import create_engine from sqlalchemy import create_engine, event
from sqlalchemy.orm import declarative_base, sessionmaker from sqlalchemy.orm import declarative_base, sessionmaker
from .config import settings from .config import settings
engine = create_engine(settings.database_url)
def _configure_sqlite_connection(dbapi_connection, connection_record):
"""Configure SQLite connection for better concurrency.
- WAL mode: Allows concurrent reads during writes
- busy_timeout: Wait up to 30 seconds if database is locked
- synchronous=NORMAL: Balance between safety and performance
"""
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA journal_mode=WAL")
cursor.execute("PRAGMA busy_timeout=30000")
cursor.execute("PRAGMA synchronous=NORMAL")
cursor.close()
# Create engine with SQLite-specific configuration
engine_kwargs = {}
# Add SQLite-specific settings for better concurrent access
if settings.database_url.startswith("sqlite"):
engine_kwargs["connect_args"] = {"check_same_thread": False}
engine = create_engine(settings.database_url, **engine_kwargs)
# Configure SQLite pragmas on connection
if settings.database_url.startswith("sqlite"):
event.listen(engine, "connect", _configure_sqlite_connection)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base() Base = declarative_base()

View File

@@ -23,71 +23,87 @@ class DatabaseLogHandler(logging.Handler):
Custom logging handler that stores WARNING, ERROR, and CRITICAL logs in database. Custom logging handler that stores WARNING, ERROR, and CRITICAL logs in database.
Runs asynchronously to avoid blocking application performance. Runs asynchronously to avoid blocking application performance.
Uses retry logic for SQLite database locking issues.
""" """
MAX_RETRIES = 3
RETRY_DELAY = 0.1 # 100ms delay between retries
def __init__(self): def __init__(self):
super().__init__() super().__init__()
self.setLevel(logging.WARNING) # Only log WARNING and above to database self.setLevel(logging.WARNING) # Only log WARNING and above to database
def emit(self, record): def emit(self, record):
"""Emit a log record to the database.""" """Emit a log record to the database with retry logic for SQLite locking."""
try: import time
from app.core.database import SessionLocal
from models.database.admin import ApplicationLog
# Skip if no database session available
db = SessionLocal()
if not db:
return
for attempt in range(self.MAX_RETRIES):
try: try:
# Extract exception information if present from app.core.database import SessionLocal
exception_type = None from models.database.admin import ApplicationLog
exception_message = None
stack_trace = None
if record.exc_info: # Skip if no database session available
exception_type = record.exc_info[0].__name__ if record.exc_info[0] else None db = SessionLocal()
exception_message = str(record.exc_info[1]) if record.exc_info[1] else None if not db:
stack_trace = "".join(traceback.format_exception(*record.exc_info)) return
# Extract context from record (if middleware added it) try:
user_id = getattr(record, "user_id", None) # Extract exception information if present
vendor_id = getattr(record, "vendor_id", None) exception_type = None
request_id = getattr(record, "request_id", None) exception_message = None
context = getattr(record, "context", None) stack_trace = None
# Create log entry if record.exc_info:
log_entry = ApplicationLog( exception_type = record.exc_info[0].__name__ if record.exc_info[0] else None
timestamp=datetime.fromtimestamp(record.created, tz=UTC), exception_message = str(record.exc_info[1]) if record.exc_info[1] else None
level=record.levelname, stack_trace = "".join(traceback.format_exception(*record.exc_info))
logger_name=record.name,
module=record.module,
function_name=record.funcName,
line_number=record.lineno,
message=record.getMessage(),
exception_type=exception_type,
exception_message=exception_message,
stack_trace=stack_trace,
request_id=request_id,
user_id=user_id,
vendor_id=vendor_id,
context=context,
)
db.add(log_entry) # Extract context from record (if middleware added it)
db.commit() user_id = getattr(record, "user_id", None)
vendor_id = getattr(record, "vendor_id", None)
request_id = getattr(record, "request_id", None)
context = getattr(record, "context", None)
except Exception as e: # Create log entry
# If database logging fails, don't crash the app log_entry = ApplicationLog(
# Just print to stderr timestamp=datetime.fromtimestamp(record.created, tz=UTC),
print(f"Failed to write log to database: {e}", file=sys.stderr) level=record.levelname,
finally: logger_name=record.name,
db.close() module=record.module,
function_name=record.funcName,
line_number=record.lineno,
message=record.getMessage(),
exception_type=exception_type,
exception_message=exception_message,
stack_trace=stack_trace,
request_id=request_id,
user_id=user_id,
vendor_id=vendor_id,
context=context,
)
except Exception: db.add(log_entry)
# Silently fail - logging should never crash the app db.commit()
pass return # Success, exit retry loop
except Exception as e:
db.rollback()
# Check if it's a database locked error
if "database is locked" in str(e).lower():
if attempt < self.MAX_RETRIES - 1:
time.sleep(self.RETRY_DELAY * (attempt + 1))
continue
# For other errors or final attempt, silently skip
# Don't print to stderr to avoid log spam during imports
pass
finally:
db.close()
except Exception:
# Silently fail - logging should never crash the app
pass
break # Exit retry loop on non-recoverable errors
def get_log_level_from_db(): def get_log_level_from_db():