120 lines
4.5 KiB
Python
120 lines
4.5 KiB
Python
"""
|
|
ChunkHandler — job handler that wraps the chunker Pipeline.
|
|
|
|
Downloads source from S3/MinIO, runs FFmpeg chunking pipeline,
|
|
uploads mp4 segments + manifest back to S3/MinIO.
|
|
"""
|
|
|
|
import logging
|
|
import os
|
|
import shutil
|
|
import tempfile
|
|
from typing import Any, Callable, Dict, Optional
|
|
|
|
from core.chunker import Pipeline
|
|
from core.storage import BUCKET_IN, BUCKET_OUT, download_to_temp, upload_file
|
|
|
|
from .base import Handler
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class ChunkHandler(Handler):
|
|
"""
|
|
Handles chunk processing jobs by delegating to the chunker Pipeline.
|
|
|
|
Expected payload keys:
|
|
source_key: str — S3 key of the source file in BUCKET_IN
|
|
chunk_duration: float — seconds per chunk (default: 10.0)
|
|
num_workers: int — concurrent workers (default: 4)
|
|
max_retries: int — retries per chunk (default: 3)
|
|
processor_type: str — "ffmpeg", "checksum", "simulated_decode", "composite"
|
|
queue_size: int — max queue depth (default: 10)
|
|
"""
|
|
|
|
def process(
|
|
self,
|
|
job_id: str,
|
|
payload: Dict[str, Any],
|
|
progress_callback: Optional[Callable[[int, Dict[str, Any]], None]] = None,
|
|
) -> Dict[str, Any]:
|
|
source_key = payload["source_key"]
|
|
processor_type = payload.get("processor_type", "ffmpeg")
|
|
|
|
logger.info(f"ChunkHandler starting job {job_id}: {source_key}")
|
|
|
|
# Download source from S3/MinIO
|
|
tmp_source = download_to_temp(BUCKET_IN, source_key)
|
|
|
|
# Create temp output directory for chunks
|
|
tmp_output_dir = tempfile.mkdtemp(prefix=f"chunks-{job_id}-")
|
|
|
|
try:
|
|
def event_bridge(event_type: str, data: Dict[str, Any]) -> None:
|
|
"""Bridge pipeline events to the job progress callback."""
|
|
if progress_callback and event_type == "pipeline_complete":
|
|
progress_callback(100, data)
|
|
elif progress_callback and event_type == "chunk_done":
|
|
total = data.get("total_chunks", 1)
|
|
if total > 0:
|
|
pct = min(int((data.get("sequence", 0) + 1) / total * 100), 99)
|
|
progress_callback(pct, data)
|
|
|
|
pipeline = Pipeline(
|
|
source=tmp_source,
|
|
chunk_duration=payload.get("chunk_duration", 10.0),
|
|
num_workers=payload.get("num_workers", 4),
|
|
max_retries=payload.get("max_retries", 3),
|
|
processor_type=processor_type,
|
|
queue_size=payload.get("queue_size", 10),
|
|
event_callback=event_bridge,
|
|
output_dir=tmp_output_dir if processor_type == "ffmpeg" else None,
|
|
)
|
|
|
|
result = pipeline.run()
|
|
|
|
# Upload chunks + manifest to S3/MinIO
|
|
output_prefix = f"chunks/{job_id}"
|
|
uploaded_files = []
|
|
|
|
for chunk_file in result.chunk_files:
|
|
filename = os.path.basename(chunk_file)
|
|
output_key = f"{output_prefix}/{filename}"
|
|
upload_file(chunk_file, BUCKET_OUT, output_key)
|
|
uploaded_files.append(output_key)
|
|
logger.info(f"Uploaded {output_key}")
|
|
|
|
# Upload manifest
|
|
manifest_path = os.path.join(tmp_output_dir, "manifest.json")
|
|
if os.path.exists(manifest_path):
|
|
manifest_key = f"{output_prefix}/manifest.json"
|
|
upload_file(manifest_path, BUCKET_OUT, manifest_key)
|
|
uploaded_files.append(manifest_key)
|
|
logger.info(f"Uploaded {manifest_key}")
|
|
|
|
return {
|
|
"status": "completed" if result.failed == 0 else "completed_with_errors",
|
|
"total_chunks": result.total_chunks,
|
|
"processed": result.processed,
|
|
"failed": result.failed,
|
|
"retries": result.retries,
|
|
"elapsed_time": result.elapsed_time,
|
|
"throughput_mbps": result.throughput_mbps,
|
|
"worker_stats": result.worker_stats,
|
|
"errors": result.errors,
|
|
"chunks_in_order": result.chunks_in_order,
|
|
"output_prefix": output_prefix,
|
|
"uploaded_files": uploaded_files,
|
|
}
|
|
|
|
finally:
|
|
# Cleanup temp files
|
|
try:
|
|
os.unlink(tmp_source)
|
|
except OSError:
|
|
pass
|
|
try:
|
|
shutil.rmtree(tmp_output_dir, ignore_errors=True)
|
|
except OSError:
|
|
pass
|