feat: add import error tracking and translation tabs

Import Error Tracking:
- Add MarketplaceImportError model to store detailed error information
- Store row number, identifier, error type, message, and row data for each error
- Add API endpoint GET /admin/marketplace-import-jobs/{job_id}/errors
- Add UI to view and browse import errors in job details modal
- Support pagination and error type filtering

Translation Tabs:
- Replace flat translation list with tabbed interface on product detail page
- Add language tabs with full language names
- Add copy-to-clipboard functionality for translation content
- Improved UX with better visual separation of translations

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2025-12-13 13:33:03 +01:00
parent 3316894c27
commit c2f42c2913
14 changed files with 542 additions and 44 deletions

View File

@@ -18,6 +18,7 @@ import requests
from sqlalchemy import literal
from sqlalchemy.orm import Session
from models.database.marketplace_import_job import MarketplaceImportError
from models.database.marketplace_product import MarketplaceProduct
from models.database.marketplace_product_translation import MarketplaceProductTranslation
@@ -280,6 +281,7 @@ class CSVProcessor:
batch_size: int,
db: Session,
language: str = "en",
import_job_id: int | None = None,
) -> dict[str, Any]:
"""
Process CSV from URL with marketplace and vendor information.
@@ -291,6 +293,7 @@ class CSVProcessor:
batch_size: Number of rows to process in each batch
db: Database session
language: Language code for translations (default: 'en')
import_job_id: ID of the import job for error tracking (optional)
Returns:
Dictionary with processing results
@@ -315,6 +318,8 @@ class CSVProcessor:
# Process in batches
for i in range(0, len(df), batch_size):
batch_df = df.iloc[i : i + batch_size]
# Calculate base row number for this batch (1-indexed for user-friendly display)
base_row_num = i + 2 # +2 for header row and 1-indexing
batch_result = await self._process_marketplace_batch(
batch_df,
marketplace,
@@ -323,6 +328,8 @@ class CSVProcessor:
i // batch_size + 1,
language=language,
source_file=source_file,
import_job_id=import_job_id,
base_row_num=base_row_num,
)
imported += batch_result["imported"]
@@ -341,6 +348,39 @@ class CSVProcessor:
"language": language,
}
def _create_import_error(
self,
db: Session,
import_job_id: int,
row_number: int,
error_type: str,
error_message: str,
identifier: str | None = None,
row_data: dict | None = None,
) -> None:
"""Create an import error record in the database."""
# Limit row_data size to prevent huge JSON storage
if row_data:
# Keep only key fields for review
limited_data = {
k: v for k, v in row_data.items()
if k in [
"marketplace_product_id", "title", "gtin", "mpn", "sku",
"brand", "price", "availability", "link"
] and v is not None and str(v).strip()
}
row_data = limited_data if limited_data else None
error_record = MarketplaceImportError(
import_job_id=import_job_id,
row_number=row_number,
identifier=identifier,
error_type=error_type,
error_message=error_message,
row_data=row_data,
)
db.add(error_record)
async def _process_marketplace_batch(
self,
batch_df: pd.DataFrame,
@@ -350,6 +390,8 @@ class CSVProcessor:
batch_num: int,
language: str = "en",
source_file: str | None = None,
import_job_id: int | None = None,
base_row_num: int = 2,
) -> dict[str, int]:
"""Process a batch of CSV rows with marketplace information."""
imported = 0
@@ -361,10 +403,13 @@ class CSVProcessor:
f"{marketplace} -> {vendor_name}"
)
for index, row in batch_df.iterrows():
for batch_idx, (index, row) in enumerate(batch_df.iterrows()):
row_number = base_row_num + batch_idx
row_dict = row.to_dict()
try:
# Convert row to dictionary and clean up
product_data = self._clean_row_data(row.to_dict())
product_data = self._clean_row_data(row_dict)
# Extract translation fields BEFORE processing product
translation_data = self._extract_translation_data(product_data)
@@ -373,17 +418,40 @@ class CSVProcessor:
product_data["marketplace"] = marketplace
product_data["vendor_name"] = vendor_name
# Get identifier for error tracking
identifier = product_data.get("marketplace_product_id") or product_data.get("gtin") or product_data.get("mpn")
# Validate required fields
if not product_data.get("marketplace_product_id"):
logger.warning(
f"Row {index}: Missing marketplace_product_id, skipping"
f"Row {row_number}: Missing marketplace_product_id, skipping"
)
if import_job_id:
self._create_import_error(
db=db,
import_job_id=import_job_id,
row_number=row_number,
error_type="missing_id",
error_message="Missing marketplace_product_id - product cannot be identified",
identifier=identifier,
row_data=row_dict,
)
errors += 1
continue
# Title is now required in translation_data
if not translation_data.get("title"):
logger.warning(f"Row {index}: Missing title, skipping")
logger.warning(f"Row {row_number}: Missing title, skipping")
if import_job_id:
self._create_import_error(
db=db,
import_job_id=import_job_id,
row_number=row_number,
error_type="missing_title",
error_message="Missing title - product title is required",
identifier=product_data.get("marketplace_product_id"),
row_data=row_dict,
)
errors += 1
continue
@@ -448,7 +516,17 @@ class CSVProcessor:
)
except Exception as e:
logger.error(f"Error processing row: {e}")
logger.error(f"Error processing row {row_number}: {e}")
if import_job_id:
self._create_import_error(
db=db,
import_job_id=import_job_id,
row_number=row_number,
error_type="processing_error",
error_message=str(e),
identifier=row_dict.get("marketplace_product_id") or row_dict.get("id"),
row_data=row_dict,
)
errors += 1
continue