django and fastapi apps
This commit is contained in:
23
.env.template
Normal file
23
.env.template
Normal file
@@ -0,0 +1,23 @@
|
||||
# MPR Environment Configuration
|
||||
# Copy to .env and adjust values as needed
|
||||
|
||||
# Database
|
||||
POSTGRES_DB=mpr
|
||||
POSTGRES_USER=mpr_user
|
||||
POSTGRES_PASSWORD=mpr_pass
|
||||
POSTGRES_HOST=postgres
|
||||
POSTGRES_PORT=5432
|
||||
DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
|
||||
|
||||
# Redis
|
||||
REDIS_HOST=redis
|
||||
REDIS_PORT=6379
|
||||
REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}/0
|
||||
|
||||
# Django
|
||||
DEBUG=1
|
||||
DJANGO_SETTINGS_MODULE=mpr.settings
|
||||
SECRET_KEY=change-this-in-production
|
||||
|
||||
# Worker
|
||||
MPR_EXECUTOR=local
|
||||
38
.gitignore
vendored
Normal file
38
.gitignore
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
# Environment
|
||||
.env
|
||||
|
||||
# Python
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
*.so
|
||||
.Python
|
||||
.venv/
|
||||
venv/
|
||||
ENV/
|
||||
env/
|
||||
|
||||
# Django
|
||||
*.log
|
||||
*.pot
|
||||
*.pyc
|
||||
db.sqlite3
|
||||
media/
|
||||
|
||||
# Node
|
||||
node_modules/
|
||||
dist/
|
||||
.npm
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
*.swp
|
||||
*.swo
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Project specific
|
||||
def/
|
||||
54
api/deps.py
Normal file
54
api/deps.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""
|
||||
FastAPI dependencies.
|
||||
|
||||
Provides database sessions, settings, and common dependencies.
|
||||
"""
|
||||
|
||||
import os
|
||||
from functools import lru_cache
|
||||
from typing import Generator
|
||||
|
||||
import django
|
||||
from django.conf import settings as django_settings
|
||||
|
||||
# Initialize Django
|
||||
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mpr.settings")
|
||||
django.setup()
|
||||
|
||||
from mpr.media_assets.models import MediaAsset, TranscodeJob, TranscodePreset
|
||||
|
||||
|
||||
@lru_cache
|
||||
def get_settings():
|
||||
"""Get Django settings."""
|
||||
return django_settings
|
||||
|
||||
|
||||
def get_asset(asset_id: str) -> MediaAsset:
|
||||
"""Get asset by ID or raise 404."""
|
||||
from fastapi import HTTPException
|
||||
|
||||
try:
|
||||
return MediaAsset.objects.get(id=asset_id)
|
||||
except MediaAsset.DoesNotExist:
|
||||
raise HTTPException(status_code=404, detail="Asset not found")
|
||||
|
||||
|
||||
def get_preset(preset_id: str) -> TranscodePreset:
|
||||
"""Get preset by ID or raise 404."""
|
||||
from fastapi import HTTPException
|
||||
|
||||
try:
|
||||
return TranscodePreset.objects.get(id=preset_id)
|
||||
except TranscodePreset.DoesNotExist:
|
||||
raise HTTPException(status_code=404, detail="Preset not found")
|
||||
|
||||
|
||||
def get_job(job_id: str) -> TranscodeJob:
|
||||
"""Get job by ID or raise 404."""
|
||||
from fastapi import HTTPException
|
||||
|
||||
try:
|
||||
return TranscodeJob.objects.get(id=job_id)
|
||||
except TranscodeJob.DoesNotExist:
|
||||
raise HTTPException(status_code=404, detail="Job not found")
|
||||
56
api/main.py
Normal file
56
api/main.py
Normal file
@@ -0,0 +1,56 @@
|
||||
"""
|
||||
MPR FastAPI Application
|
||||
|
||||
Main entry point for the REST API.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Add project root to path
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
# Initialize Django before importing models
|
||||
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mpr.settings")
|
||||
|
||||
import django
|
||||
|
||||
django.setup()
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
|
||||
from api.routes import assets_router, jobs_router, presets_router, system_router
|
||||
|
||||
app = FastAPI(
|
||||
title="MPR API",
|
||||
description="Media Processor REST API",
|
||||
version="0.1.0",
|
||||
docs_url="/docs",
|
||||
redoc_url="/redoc",
|
||||
)
|
||||
|
||||
# CORS
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["http://mpr.local.ar", "http://localhost:5173"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
# Routes
|
||||
app.include_router(system_router)
|
||||
app.include_router(assets_router)
|
||||
app.include_router(presets_router)
|
||||
app.include_router(jobs_router)
|
||||
|
||||
|
||||
@app.get("/")
|
||||
def root():
|
||||
"""API root."""
|
||||
return {
|
||||
"name": "MPR API",
|
||||
"version": "0.1.0",
|
||||
"docs": "/docs",
|
||||
}
|
||||
8
api/routes/__init__.py
Normal file
8
api/routes/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""API Routes."""
|
||||
|
||||
from .assets import router as assets_router
|
||||
from .jobs import router as jobs_router
|
||||
from .presets import router as presets_router
|
||||
from .system import router as system_router
|
||||
|
||||
__all__ = ["assets_router", "jobs_router", "presets_router", "system_router"]
|
||||
90
api/routes/assets.py
Normal file
90
api/routes/assets.py
Normal file
@@ -0,0 +1,90 @@
|
||||
"""
|
||||
Asset endpoints - media file registration and metadata.
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
|
||||
from api.deps import get_asset
|
||||
from api.schemas import AssetCreate, AssetResponse, AssetUpdate
|
||||
|
||||
router = APIRouter(prefix="/assets", tags=["assets"])
|
||||
|
||||
|
||||
@router.post("/", response_model=AssetResponse, status_code=201)
|
||||
def create_asset(data: AssetCreate):
|
||||
"""
|
||||
Register a media file as an asset.
|
||||
|
||||
The file must exist on disk. A probe task will be queued
|
||||
to extract metadata asynchronously.
|
||||
"""
|
||||
from pathlib import Path
|
||||
|
||||
from mpr.media_assets.models import MediaAsset
|
||||
|
||||
# Validate file exists
|
||||
path = Path(data.file_path)
|
||||
if not path.exists():
|
||||
raise HTTPException(status_code=400, detail="File not found")
|
||||
|
||||
# Create asset
|
||||
asset = MediaAsset.objects.create(
|
||||
filename=data.filename or path.name,
|
||||
file_path=str(path.absolute()),
|
||||
file_size=path.stat().st_size,
|
||||
)
|
||||
|
||||
# TODO: Queue probe task via gRPC/Celery
|
||||
|
||||
return asset
|
||||
|
||||
|
||||
@router.get("/", response_model=list[AssetResponse])
|
||||
def list_assets(
|
||||
status: Optional[str] = Query(None, description="Filter by status"),
|
||||
limit: int = Query(50, ge=1, le=100),
|
||||
offset: int = Query(0, ge=0),
|
||||
):
|
||||
"""List assets with optional filtering."""
|
||||
from mpr.media_assets.models import MediaAsset
|
||||
|
||||
qs = MediaAsset.objects.all()
|
||||
|
||||
if status:
|
||||
qs = qs.filter(status=status)
|
||||
|
||||
return list(qs[offset : offset + limit])
|
||||
|
||||
|
||||
@router.get("/{asset_id}", response_model=AssetResponse)
|
||||
def get_asset_detail(asset_id: UUID, asset=Depends(get_asset)):
|
||||
"""Get asset details."""
|
||||
return asset
|
||||
|
||||
|
||||
@router.patch("/{asset_id}", response_model=AssetResponse)
|
||||
def update_asset(asset_id: UUID, data: AssetUpdate, asset=Depends(get_asset)):
|
||||
"""Update asset metadata (comments, tags)."""
|
||||
update_fields = []
|
||||
|
||||
if data.comments is not None:
|
||||
asset.comments = data.comments
|
||||
update_fields.append("comments")
|
||||
|
||||
if data.tags is not None:
|
||||
asset.tags = data.tags
|
||||
update_fields.append("tags")
|
||||
|
||||
if update_fields:
|
||||
asset.save(update_fields=update_fields)
|
||||
|
||||
return asset
|
||||
|
||||
|
||||
@router.delete("/{asset_id}", status_code=204)
|
||||
def delete_asset(asset_id: UUID, asset=Depends(get_asset)):
|
||||
"""Delete an asset."""
|
||||
asset.delete()
|
||||
160
api/routes/jobs.py
Normal file
160
api/routes/jobs.py
Normal file
@@ -0,0 +1,160 @@
|
||||
"""
|
||||
Job endpoints - transcode/trim job management.
|
||||
"""
|
||||
|
||||
import json
|
||||
from typing import Optional
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
|
||||
from api.deps import get_asset, get_job, get_preset
|
||||
from api.schemas import JobCreate, JobResponse
|
||||
|
||||
router = APIRouter(prefix="/jobs", tags=["jobs"])
|
||||
|
||||
|
||||
@router.post("/", response_model=JobResponse, status_code=201)
|
||||
def create_job(data: JobCreate):
|
||||
"""
|
||||
Create a transcode or trim job.
|
||||
|
||||
- With preset_id: Full transcode using preset settings
|
||||
- Without preset_id but with trim_start/end: Trim only (stream copy)
|
||||
"""
|
||||
from mpr.media_assets.models import MediaAsset, TranscodeJob, TranscodePreset
|
||||
|
||||
# Get source asset
|
||||
try:
|
||||
source = MediaAsset.objects.get(id=data.source_asset_id)
|
||||
except MediaAsset.DoesNotExist:
|
||||
raise HTTPException(status_code=404, detail="Source asset not found")
|
||||
|
||||
if source.status != "ready":
|
||||
raise HTTPException(status_code=400, detail="Source asset is not ready")
|
||||
|
||||
# Get preset if specified
|
||||
preset = None
|
||||
preset_snapshot = {}
|
||||
if data.preset_id:
|
||||
try:
|
||||
preset = TranscodePreset.objects.get(id=data.preset_id)
|
||||
# Snapshot preset at job creation time
|
||||
preset_snapshot = {
|
||||
"name": preset.name,
|
||||
"container": preset.container,
|
||||
"video_codec": preset.video_codec,
|
||||
"video_bitrate": preset.video_bitrate,
|
||||
"video_crf": preset.video_crf,
|
||||
"video_preset": preset.video_preset,
|
||||
"resolution": preset.resolution,
|
||||
"framerate": preset.framerate,
|
||||
"audio_codec": preset.audio_codec,
|
||||
"audio_bitrate": preset.audio_bitrate,
|
||||
"audio_channels": preset.audio_channels,
|
||||
"audio_samplerate": preset.audio_samplerate,
|
||||
"extra_args": preset.extra_args,
|
||||
}
|
||||
except TranscodePreset.DoesNotExist:
|
||||
raise HTTPException(status_code=404, detail="Preset not found")
|
||||
|
||||
# Validate trim-only job
|
||||
if not preset and not data.trim_start and not data.trim_end:
|
||||
raise HTTPException(
|
||||
status_code=400, detail="Must specify preset_id or trim_start/trim_end"
|
||||
)
|
||||
|
||||
# Generate output filename
|
||||
output_filename = data.output_filename
|
||||
if not output_filename:
|
||||
from pathlib import Path
|
||||
|
||||
stem = Path(source.filename).stem
|
||||
ext = preset_snapshot.get("container", "mp4") if preset else "mp4"
|
||||
output_filename = f"{stem}_output.{ext}"
|
||||
|
||||
# Create job
|
||||
job = TranscodeJob.objects.create(
|
||||
source_asset=source,
|
||||
preset=preset,
|
||||
preset_snapshot=preset_snapshot,
|
||||
trim_start=data.trim_start,
|
||||
trim_end=data.trim_end,
|
||||
output_filename=output_filename,
|
||||
priority=data.priority or 0,
|
||||
)
|
||||
|
||||
# TODO: Submit job via gRPC
|
||||
|
||||
return job
|
||||
|
||||
|
||||
@router.get("/", response_model=list[JobResponse])
|
||||
def list_jobs(
|
||||
status: Optional[str] = Query(None, description="Filter by status"),
|
||||
source_asset_id: Optional[UUID] = Query(None),
|
||||
limit: int = Query(50, ge=1, le=100),
|
||||
offset: int = Query(0, ge=0),
|
||||
):
|
||||
"""List jobs with optional filtering."""
|
||||
from mpr.media_assets.models import TranscodeJob
|
||||
|
||||
qs = TranscodeJob.objects.all()
|
||||
|
||||
if status:
|
||||
qs = qs.filter(status=status)
|
||||
if source_asset_id:
|
||||
qs = qs.filter(source_asset_id=source_asset_id)
|
||||
|
||||
return list(qs[offset : offset + limit])
|
||||
|
||||
|
||||
@router.get("/{job_id}", response_model=JobResponse)
|
||||
def get_job_detail(job_id: UUID, job=Depends(get_job)):
|
||||
"""Get job details including progress."""
|
||||
return job
|
||||
|
||||
|
||||
@router.get("/{job_id}/progress")
|
||||
def get_job_progress(job_id: UUID, job=Depends(get_job)):
|
||||
"""Get real-time job progress."""
|
||||
return {
|
||||
"job_id": str(job.id),
|
||||
"status": job.status,
|
||||
"progress": job.progress,
|
||||
"current_frame": job.current_frame,
|
||||
"current_time": job.current_time,
|
||||
"speed": job.speed,
|
||||
}
|
||||
|
||||
|
||||
@router.post("/{job_id}/cancel", response_model=JobResponse)
|
||||
def cancel_job(job_id: UUID, job=Depends(get_job)):
|
||||
"""Cancel a pending or processing job."""
|
||||
if job.status not in ("pending", "processing"):
|
||||
raise HTTPException(
|
||||
status_code=400, detail=f"Cannot cancel job with status: {job.status}"
|
||||
)
|
||||
|
||||
# TODO: Cancel via gRPC
|
||||
|
||||
job.status = "cancelled"
|
||||
job.save(update_fields=["status"])
|
||||
|
||||
return job
|
||||
|
||||
|
||||
@router.post("/{job_id}/retry", response_model=JobResponse)
|
||||
def retry_job(job_id: UUID, job=Depends(get_job)):
|
||||
"""Retry a failed job."""
|
||||
if job.status != "failed":
|
||||
raise HTTPException(status_code=400, detail="Only failed jobs can be retried")
|
||||
|
||||
job.status = "pending"
|
||||
job.progress = 0
|
||||
job.error_message = None
|
||||
job.save(update_fields=["status", "progress", "error_message"])
|
||||
|
||||
# TODO: Resubmit via gRPC
|
||||
|
||||
return job
|
||||
100
api/routes/presets.py
Normal file
100
api/routes/presets.py
Normal file
@@ -0,0 +1,100 @@
|
||||
"""
|
||||
Preset endpoints - transcode configuration templates.
|
||||
"""
|
||||
|
||||
from uuid import UUID
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
|
||||
from api.deps import get_preset
|
||||
from api.schemas import PresetCreate, PresetResponse, PresetUpdate
|
||||
|
||||
router = APIRouter(prefix="/presets", tags=["presets"])
|
||||
|
||||
|
||||
@router.post("/", response_model=PresetResponse, status_code=201)
|
||||
def create_preset(data: PresetCreate):
|
||||
"""Create a custom preset."""
|
||||
from mpr.media_assets.models import TranscodePreset
|
||||
|
||||
preset = TranscodePreset.objects.create(
|
||||
name=data.name,
|
||||
description=data.description or "",
|
||||
container=data.container or "mp4",
|
||||
video_codec=data.video_codec or "libx264",
|
||||
video_bitrate=data.video_bitrate,
|
||||
video_crf=data.video_crf,
|
||||
video_preset=data.video_preset,
|
||||
resolution=data.resolution,
|
||||
framerate=data.framerate,
|
||||
audio_codec=data.audio_codec or "aac",
|
||||
audio_bitrate=data.audio_bitrate,
|
||||
audio_channels=data.audio_channels,
|
||||
audio_samplerate=data.audio_samplerate,
|
||||
extra_args=data.extra_args or [],
|
||||
is_builtin=False,
|
||||
)
|
||||
|
||||
return preset
|
||||
|
||||
|
||||
@router.get("/", response_model=list[PresetResponse])
|
||||
def list_presets(include_builtin: bool = True):
|
||||
"""List all presets."""
|
||||
from mpr.media_assets.models import TranscodePreset
|
||||
|
||||
qs = TranscodePreset.objects.all()
|
||||
|
||||
if not include_builtin:
|
||||
qs = qs.filter(is_builtin=False)
|
||||
|
||||
return list(qs)
|
||||
|
||||
|
||||
@router.get("/{preset_id}", response_model=PresetResponse)
|
||||
def get_preset_detail(preset_id: UUID, preset=Depends(get_preset)):
|
||||
"""Get preset details."""
|
||||
return preset
|
||||
|
||||
|
||||
@router.patch("/{preset_id}", response_model=PresetResponse)
|
||||
def update_preset(preset_id: UUID, data: PresetUpdate, preset=Depends(get_preset)):
|
||||
"""Update a custom preset. Builtin presets cannot be modified."""
|
||||
if preset.is_builtin:
|
||||
raise HTTPException(status_code=403, detail="Cannot modify builtin preset")
|
||||
|
||||
update_fields = []
|
||||
for field in [
|
||||
"name",
|
||||
"description",
|
||||
"container",
|
||||
"video_codec",
|
||||
"video_bitrate",
|
||||
"video_crf",
|
||||
"video_preset",
|
||||
"resolution",
|
||||
"framerate",
|
||||
"audio_codec",
|
||||
"audio_bitrate",
|
||||
"audio_channels",
|
||||
"audio_samplerate",
|
||||
"extra_args",
|
||||
]:
|
||||
value = getattr(data, field, None)
|
||||
if value is not None:
|
||||
setattr(preset, field, value)
|
||||
update_fields.append(field)
|
||||
|
||||
if update_fields:
|
||||
preset.save(update_fields=update_fields)
|
||||
|
||||
return preset
|
||||
|
||||
|
||||
@router.delete("/{preset_id}", status_code=204)
|
||||
def delete_preset(preset_id: UUID, preset=Depends(get_preset)):
|
||||
"""Delete a custom preset. Builtin presets cannot be deleted."""
|
||||
if preset.is_builtin:
|
||||
raise HTTPException(status_code=403, detail="Cannot delete builtin preset")
|
||||
|
||||
preset.delete()
|
||||
30
api/routes/system.py
Normal file
30
api/routes/system.py
Normal file
@@ -0,0 +1,30 @@
|
||||
"""
|
||||
System endpoints - health checks and FFmpeg capabilities.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter
|
||||
|
||||
from core.ffmpeg import get_decoders, get_encoders, get_formats
|
||||
|
||||
router = APIRouter(prefix="/system", tags=["system"])
|
||||
|
||||
|
||||
@router.get("/health")
|
||||
def health_check():
|
||||
"""Health check endpoint."""
|
||||
return {"status": "healthy"}
|
||||
|
||||
|
||||
@router.get("/ffmpeg/codecs")
|
||||
def ffmpeg_codecs():
|
||||
"""Get available FFmpeg encoders and decoders."""
|
||||
return {
|
||||
"encoders": get_encoders(),
|
||||
"decoders": get_decoders(),
|
||||
}
|
||||
|
||||
|
||||
@router.get("/ffmpeg/formats")
|
||||
def ffmpeg_formats():
|
||||
"""Get available FFmpeg muxers and demuxers."""
|
||||
return get_formats()
|
||||
10
api/schemas/__init__.py
Normal file
10
api/schemas/__init__.py
Normal file
@@ -0,0 +1,10 @@
|
||||
"""API Schemas - GENERATED FILE"""
|
||||
|
||||
from .base import BaseSchema
|
||||
from .asset import AssetCreate, AssetUpdate, AssetResponse
|
||||
from .asset import AssetStatus
|
||||
from .preset import PresetCreate, PresetUpdate, PresetResponse
|
||||
from .job import JobCreate, JobUpdate, JobResponse
|
||||
from .job import JobStatus
|
||||
|
||||
__all__ = ["BaseSchema", "AssetCreate", "AssetUpdate", "AssetResponse", "AssetStatus", "PresetCreate", "PresetUpdate", "PresetResponse", "JobCreate", "JobUpdate", "JobResponse", "JobStatus"]
|
||||
70
api/schemas/asset.py
Normal file
70
api/schemas/asset.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""MediaAsset Schemas - GENERATED FILE"""
|
||||
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional
|
||||
from uuid import UUID
|
||||
|
||||
from .base import BaseSchema
|
||||
|
||||
|
||||
class AssetStatus(str, Enum):
|
||||
PENDING = "pending"
|
||||
READY = "ready"
|
||||
ERROR = "error"
|
||||
|
||||
|
||||
class AssetCreate(BaseSchema):
|
||||
"""AssetCreate schema."""
|
||||
filename: str
|
||||
file_path: str
|
||||
file_size: Optional[int] = None
|
||||
duration: Optional[float] = None
|
||||
video_codec: Optional[str] = None
|
||||
audio_codec: Optional[str] = None
|
||||
width: Optional[int] = None
|
||||
height: Optional[int] = None
|
||||
framerate: Optional[float] = None
|
||||
bitrate: Optional[int] = None
|
||||
properties: Dict[str, Any]
|
||||
comments: str = ""
|
||||
tags: List[str]
|
||||
|
||||
class AssetUpdate(BaseSchema):
|
||||
"""AssetUpdate schema."""
|
||||
filename: Optional[str] = None
|
||||
file_path: Optional[str] = None
|
||||
status: Optional[AssetStatus] = None
|
||||
error_message: Optional[str] = None
|
||||
file_size: Optional[int] = None
|
||||
duration: Optional[float] = None
|
||||
video_codec: Optional[str] = None
|
||||
audio_codec: Optional[str] = None
|
||||
width: Optional[int] = None
|
||||
height: Optional[int] = None
|
||||
framerate: Optional[float] = None
|
||||
bitrate: Optional[int] = None
|
||||
properties: Optional[Dict[str, Any]] = None
|
||||
comments: Optional[str] = None
|
||||
tags: Optional[List[str]] = None
|
||||
|
||||
class AssetResponse(BaseSchema):
|
||||
"""AssetResponse schema."""
|
||||
id: UUID
|
||||
filename: str
|
||||
file_path: str
|
||||
status: AssetStatus = "AssetStatus.PENDING"
|
||||
error_message: Optional[str] = None
|
||||
file_size: Optional[int] = None
|
||||
duration: Optional[float] = None
|
||||
video_codec: Optional[str] = None
|
||||
audio_codec: Optional[str] = None
|
||||
width: Optional[int] = None
|
||||
height: Optional[int] = None
|
||||
framerate: Optional[float] = None
|
||||
bitrate: Optional[int] = None
|
||||
properties: Dict[str, Any]
|
||||
comments: str = ""
|
||||
tags: List[str]
|
||||
created_at: Optional[datetime] = None
|
||||
updated_at: Optional[datetime] = None
|
||||
8
api/schemas/base.py
Normal file
8
api/schemas/base.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""Pydantic Base Schema - GENERATED FILE"""
|
||||
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
|
||||
class BaseSchema(BaseModel):
|
||||
"""Base schema with ORM mode."""
|
||||
model_config = ConfigDict(from_attributes=True)
|
||||
80
api/schemas/job.py
Normal file
80
api/schemas/job.py
Normal file
@@ -0,0 +1,80 @@
|
||||
"""TranscodeJob Schemas - GENERATED FILE"""
|
||||
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional
|
||||
from uuid import UUID
|
||||
|
||||
from .base import BaseSchema
|
||||
|
||||
|
||||
class JobStatus(str, Enum):
|
||||
PENDING = "pending"
|
||||
PROCESSING = "processing"
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
CANCELLED = "cancelled"
|
||||
|
||||
|
||||
class JobCreate(BaseSchema):
|
||||
"""JobCreate schema."""
|
||||
source_asset_id: UUID
|
||||
preset_id: Optional[UUID] = None
|
||||
preset_snapshot: Dict[str, Any]
|
||||
trim_start: Optional[float] = None
|
||||
trim_end: Optional[float] = None
|
||||
output_filename: str = ""
|
||||
output_path: Optional[str] = None
|
||||
output_asset_id: Optional[UUID] = None
|
||||
progress: float = 0.0
|
||||
current_frame: Optional[int] = None
|
||||
current_time: Optional[float] = None
|
||||
speed: Optional[str] = None
|
||||
celery_task_id: Optional[str] = None
|
||||
priority: int = 0
|
||||
started_at: Optional[datetime] = None
|
||||
completed_at: Optional[datetime] = None
|
||||
|
||||
class JobUpdate(BaseSchema):
|
||||
"""JobUpdate schema."""
|
||||
source_asset_id: Optional[UUID] = None
|
||||
preset_id: Optional[UUID] = None
|
||||
preset_snapshot: Optional[Dict[str, Any]] = None
|
||||
trim_start: Optional[float] = None
|
||||
trim_end: Optional[float] = None
|
||||
output_filename: Optional[str] = None
|
||||
output_path: Optional[str] = None
|
||||
output_asset_id: Optional[UUID] = None
|
||||
status: Optional[JobStatus] = None
|
||||
progress: Optional[float] = None
|
||||
current_frame: Optional[int] = None
|
||||
current_time: Optional[float] = None
|
||||
speed: Optional[str] = None
|
||||
error_message: Optional[str] = None
|
||||
celery_task_id: Optional[str] = None
|
||||
priority: Optional[int] = None
|
||||
started_at: Optional[datetime] = None
|
||||
completed_at: Optional[datetime] = None
|
||||
|
||||
class JobResponse(BaseSchema):
|
||||
"""JobResponse schema."""
|
||||
id: UUID
|
||||
source_asset_id: UUID
|
||||
preset_id: Optional[UUID] = None
|
||||
preset_snapshot: Dict[str, Any]
|
||||
trim_start: Optional[float] = None
|
||||
trim_end: Optional[float] = None
|
||||
output_filename: str = ""
|
||||
output_path: Optional[str] = None
|
||||
output_asset_id: Optional[UUID] = None
|
||||
status: JobStatus = "JobStatus.PENDING"
|
||||
progress: float = 0.0
|
||||
current_frame: Optional[int] = None
|
||||
current_time: Optional[float] = None
|
||||
speed: Optional[str] = None
|
||||
error_message: Optional[str] = None
|
||||
celery_task_id: Optional[str] = None
|
||||
priority: int = 0
|
||||
created_at: Optional[datetime] = None
|
||||
started_at: Optional[datetime] = None
|
||||
completed_at: Optional[datetime] = None
|
||||
66
api/schemas/preset.py
Normal file
66
api/schemas/preset.py
Normal file
@@ -0,0 +1,66 @@
|
||||
"""TranscodePreset Schemas - GENERATED FILE"""
|
||||
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional
|
||||
from uuid import UUID
|
||||
|
||||
from .base import BaseSchema
|
||||
|
||||
|
||||
class PresetCreate(BaseSchema):
|
||||
"""PresetCreate schema."""
|
||||
name: str
|
||||
description: str = ""
|
||||
is_builtin: bool = False
|
||||
container: str = "mp4"
|
||||
video_codec: str = "libx264"
|
||||
video_bitrate: Optional[str] = None
|
||||
video_crf: Optional[int] = None
|
||||
video_preset: Optional[str] = None
|
||||
resolution: Optional[str] = None
|
||||
framerate: Optional[float] = None
|
||||
audio_codec: str = "aac"
|
||||
audio_bitrate: Optional[str] = None
|
||||
audio_channels: Optional[int] = None
|
||||
audio_samplerate: Optional[int] = None
|
||||
extra_args: List[str]
|
||||
|
||||
class PresetUpdate(BaseSchema):
|
||||
"""PresetUpdate schema."""
|
||||
name: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
is_builtin: Optional[bool] = None
|
||||
container: Optional[str] = None
|
||||
video_codec: Optional[str] = None
|
||||
video_bitrate: Optional[str] = None
|
||||
video_crf: Optional[int] = None
|
||||
video_preset: Optional[str] = None
|
||||
resolution: Optional[str] = None
|
||||
framerate: Optional[float] = None
|
||||
audio_codec: Optional[str] = None
|
||||
audio_bitrate: Optional[str] = None
|
||||
audio_channels: Optional[int] = None
|
||||
audio_samplerate: Optional[int] = None
|
||||
extra_args: Optional[List[str]] = None
|
||||
|
||||
class PresetResponse(BaseSchema):
|
||||
"""PresetResponse schema."""
|
||||
id: UUID
|
||||
name: str
|
||||
description: str = ""
|
||||
is_builtin: bool = False
|
||||
container: str = "mp4"
|
||||
video_codec: str = "libx264"
|
||||
video_bitrate: Optional[str] = None
|
||||
video_crf: Optional[int] = None
|
||||
video_preset: Optional[str] = None
|
||||
resolution: Optional[str] = None
|
||||
framerate: Optional[float] = None
|
||||
audio_codec: str = "aac"
|
||||
audio_bitrate: Optional[str] = None
|
||||
audio_channels: Optional[int] = None
|
||||
audio_samplerate: Optional[int] = None
|
||||
extra_args: List[str]
|
||||
created_at: Optional[datetime] = None
|
||||
updated_at: Optional[datetime] = None
|
||||
0
core/__init__.py
Normal file
0
core/__init__.py
Normal file
13
core/ffmpeg/__init__.py
Normal file
13
core/ffmpeg/__init__.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from .capabilities import get_decoders, get_encoders, get_formats
|
||||
from .probe import ProbeResult, probe_file
|
||||
from .transcode import TranscodeConfig, transcode
|
||||
|
||||
__all__ = [
|
||||
"probe_file",
|
||||
"ProbeResult",
|
||||
"transcode",
|
||||
"TranscodeConfig",
|
||||
"get_encoders",
|
||||
"get_decoders",
|
||||
"get_formats",
|
||||
]
|
||||
145
core/ffmpeg/capabilities.py
Normal file
145
core/ffmpeg/capabilities.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""
|
||||
FFmpeg capabilities - Discover available codecs and formats using ffmpeg-python.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from functools import lru_cache
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import ffmpeg
|
||||
|
||||
|
||||
@dataclass
|
||||
class Codec:
|
||||
"""An FFmpeg encoder or decoder."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
type: str # 'video' or 'audio'
|
||||
|
||||
|
||||
@dataclass
|
||||
class Format:
|
||||
"""An FFmpeg format (muxer/demuxer)."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
can_demux: bool
|
||||
can_mux: bool
|
||||
|
||||
|
||||
@lru_cache(maxsize=1)
|
||||
def _get_ffmpeg_info() -> Dict[str, Any]:
|
||||
"""Get FFmpeg capabilities info."""
|
||||
# ffmpeg-python doesn't have a direct way to get codecs/formats
|
||||
# but we can use probe on a dummy or parse -codecs output
|
||||
# For now, return common codecs that are typically available
|
||||
return {
|
||||
"video_encoders": [
|
||||
{"name": "libx264", "description": "H.264 / AVC"},
|
||||
{"name": "libx265", "description": "H.265 / HEVC"},
|
||||
{"name": "mpeg4", "description": "MPEG-4 Part 2"},
|
||||
{"name": "libvpx", "description": "VP8"},
|
||||
{"name": "libvpx-vp9", "description": "VP9"},
|
||||
{"name": "h264_nvenc", "description": "NVIDIA NVENC H.264"},
|
||||
{"name": "hevc_nvenc", "description": "NVIDIA NVENC H.265"},
|
||||
{"name": "h264_vaapi", "description": "VAAPI H.264"},
|
||||
{"name": "prores_ks", "description": "Apple ProRes"},
|
||||
{"name": "dnxhd", "description": "Avid DNxHD/DNxHR"},
|
||||
{"name": "copy", "description": "Stream copy (no encoding)"},
|
||||
],
|
||||
"audio_encoders": [
|
||||
{"name": "aac", "description": "AAC"},
|
||||
{"name": "libmp3lame", "description": "MP3"},
|
||||
{"name": "libopus", "description": "Opus"},
|
||||
{"name": "libvorbis", "description": "Vorbis"},
|
||||
{"name": "pcm_s16le", "description": "PCM signed 16-bit little-endian"},
|
||||
{"name": "flac", "description": "FLAC"},
|
||||
{"name": "copy", "description": "Stream copy (no encoding)"},
|
||||
],
|
||||
"formats": [
|
||||
{"name": "mp4", "description": "MP4", "can_demux": True, "can_mux": True},
|
||||
{
|
||||
"name": "mov",
|
||||
"description": "QuickTime / MOV",
|
||||
"can_demux": True,
|
||||
"can_mux": True,
|
||||
},
|
||||
{
|
||||
"name": "mkv",
|
||||
"description": "Matroska",
|
||||
"can_demux": True,
|
||||
"can_mux": True,
|
||||
},
|
||||
{"name": "webm", "description": "WebM", "can_demux": True, "can_mux": True},
|
||||
{"name": "avi", "description": "AVI", "can_demux": True, "can_mux": True},
|
||||
{"name": "flv", "description": "FLV", "can_demux": True, "can_mux": True},
|
||||
{
|
||||
"name": "ts",
|
||||
"description": "MPEG-TS",
|
||||
"can_demux": True,
|
||||
"can_mux": True,
|
||||
},
|
||||
{
|
||||
"name": "mpegts",
|
||||
"description": "MPEG-TS",
|
||||
"can_demux": True,
|
||||
"can_mux": True,
|
||||
},
|
||||
{"name": "hls", "description": "HLS", "can_demux": True, "can_mux": True},
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def get_encoders() -> List[Codec]:
|
||||
"""Get available encoders (video + audio)."""
|
||||
info = _get_ffmpeg_info()
|
||||
codecs = []
|
||||
|
||||
for c in info["video_encoders"]:
|
||||
codecs.append(Codec(name=c["name"], description=c["description"], type="video"))
|
||||
|
||||
for c in info["audio_encoders"]:
|
||||
codecs.append(Codec(name=c["name"], description=c["description"], type="audio"))
|
||||
|
||||
return codecs
|
||||
|
||||
|
||||
def get_decoders() -> List[Codec]:
|
||||
"""Get available decoders."""
|
||||
# Most encoders can also decode
|
||||
return get_encoders()
|
||||
|
||||
|
||||
def get_formats() -> List[Format]:
|
||||
"""Get available formats."""
|
||||
info = _get_ffmpeg_info()
|
||||
return [
|
||||
Format(
|
||||
name=f["name"],
|
||||
description=f["description"],
|
||||
can_demux=f["can_demux"],
|
||||
can_mux=f["can_mux"],
|
||||
)
|
||||
for f in info["formats"]
|
||||
]
|
||||
|
||||
|
||||
def get_video_encoders() -> List[Codec]:
|
||||
"""Get available video encoders."""
|
||||
return [c for c in get_encoders() if c.type == "video"]
|
||||
|
||||
|
||||
def get_audio_encoders() -> List[Codec]:
|
||||
"""Get available audio encoders."""
|
||||
return [c for c in get_encoders() if c.type == "audio"]
|
||||
|
||||
|
||||
def get_muxers() -> List[Format]:
|
||||
"""Get available output formats (muxers)."""
|
||||
return [f for f in get_formats() if f.can_mux]
|
||||
|
||||
|
||||
def get_demuxers() -> List[Format]:
|
||||
"""Get available input formats (demuxers)."""
|
||||
return [f for f in get_formats() if f.can_demux]
|
||||
92
core/ffmpeg/probe.py
Normal file
92
core/ffmpeg/probe.py
Normal file
@@ -0,0 +1,92 @@
|
||||
"""
|
||||
FFmpeg probe module - Extract metadata from media files using ffprobe.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
import ffmpeg
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProbeResult:
|
||||
"""Structured ffprobe result."""
|
||||
|
||||
duration: Optional[float]
|
||||
file_size: int
|
||||
|
||||
# Video
|
||||
video_codec: Optional[str]
|
||||
width: Optional[int]
|
||||
height: Optional[int]
|
||||
framerate: Optional[float]
|
||||
video_bitrate: Optional[int]
|
||||
|
||||
# Audio
|
||||
audio_codec: Optional[str]
|
||||
audio_channels: Optional[int]
|
||||
audio_samplerate: Optional[int]
|
||||
audio_bitrate: Optional[int]
|
||||
|
||||
# Raw data
|
||||
raw: Dict[str, Any]
|
||||
|
||||
|
||||
def probe_file(file_path: str) -> ProbeResult:
|
||||
"""
|
||||
Run ffprobe and return structured result.
|
||||
|
||||
Args:
|
||||
file_path: Path to the media file
|
||||
|
||||
Returns:
|
||||
ProbeResult with extracted metadata
|
||||
|
||||
Raises:
|
||||
ffmpeg.Error: If ffprobe fails
|
||||
"""
|
||||
data = ffmpeg.probe(file_path)
|
||||
|
||||
# Extract video stream info
|
||||
video_stream = next(
|
||||
(s for s in data.get("streams", []) if s.get("codec_type") == "video"), {}
|
||||
)
|
||||
|
||||
# Extract audio stream info
|
||||
audio_stream = next(
|
||||
(s for s in data.get("streams", []) if s.get("codec_type") == "audio"), {}
|
||||
)
|
||||
|
||||
format_info = data.get("format", {})
|
||||
|
||||
# Parse framerate (e.g., "30000/1001" -> 29.97)
|
||||
framerate = None
|
||||
if "r_frame_rate" in video_stream:
|
||||
try:
|
||||
num, den = video_stream["r_frame_rate"].split("/")
|
||||
framerate = float(num) / float(den)
|
||||
except (ValueError, ZeroDivisionError):
|
||||
pass
|
||||
|
||||
# Parse duration
|
||||
duration = None
|
||||
if "duration" in format_info:
|
||||
try:
|
||||
duration = float(format_info["duration"])
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return ProbeResult(
|
||||
duration=duration,
|
||||
file_size=int(format_info.get("size", 0)),
|
||||
video_codec=video_stream.get("codec_name"),
|
||||
width=video_stream.get("width"),
|
||||
height=video_stream.get("height"),
|
||||
framerate=framerate,
|
||||
video_bitrate=int(video_stream.get("bit_rate", 0)) or None,
|
||||
audio_codec=audio_stream.get("codec_name"),
|
||||
audio_channels=audio_stream.get("channels"),
|
||||
audio_samplerate=int(audio_stream.get("sample_rate", 0)) or None,
|
||||
audio_bitrate=int(audio_stream.get("bit_rate", 0)) or None,
|
||||
raw=data,
|
||||
)
|
||||
225
core/ffmpeg/transcode.py
Normal file
225
core/ffmpeg/transcode.py
Normal file
@@ -0,0 +1,225 @@
|
||||
"""
|
||||
FFmpeg transcode module - Transcode media files using ffmpeg-python.
|
||||
"""
|
||||
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
import ffmpeg
|
||||
|
||||
|
||||
@dataclass
|
||||
class TranscodeConfig:
|
||||
"""Configuration for a transcode operation."""
|
||||
|
||||
input_path: str
|
||||
output_path: str
|
||||
|
||||
# Video
|
||||
video_codec: str = "libx264"
|
||||
video_bitrate: Optional[str] = None
|
||||
video_crf: Optional[int] = None
|
||||
video_preset: Optional[str] = None
|
||||
resolution: Optional[str] = None
|
||||
framerate: Optional[float] = None
|
||||
|
||||
# Audio
|
||||
audio_codec: str = "aac"
|
||||
audio_bitrate: Optional[str] = None
|
||||
audio_channels: Optional[int] = None
|
||||
audio_samplerate: Optional[int] = None
|
||||
|
||||
# Trimming
|
||||
trim_start: Optional[float] = None
|
||||
trim_end: Optional[float] = None
|
||||
|
||||
# Container
|
||||
container: str = "mp4"
|
||||
|
||||
# Extra args (key-value pairs)
|
||||
extra_args: List[str] = field(default_factory=list)
|
||||
|
||||
@property
|
||||
def is_copy(self) -> bool:
|
||||
"""Check if this is a stream copy (no transcoding)."""
|
||||
return self.video_codec == "copy" and self.audio_codec == "copy"
|
||||
|
||||
|
||||
def build_stream(config: TranscodeConfig):
|
||||
"""
|
||||
Build an ffmpeg-python stream from config.
|
||||
|
||||
Returns the stream object ready to run.
|
||||
"""
|
||||
# Input options
|
||||
input_kwargs = {}
|
||||
if config.trim_start is not None:
|
||||
input_kwargs["ss"] = config.trim_start
|
||||
|
||||
stream = ffmpeg.input(config.input_path, **input_kwargs)
|
||||
|
||||
# Output options
|
||||
output_kwargs = {
|
||||
"vcodec": config.video_codec,
|
||||
"acodec": config.audio_codec,
|
||||
}
|
||||
|
||||
# Trimming duration
|
||||
if config.trim_end is not None:
|
||||
if config.trim_start is not None:
|
||||
output_kwargs["t"] = config.trim_end - config.trim_start
|
||||
else:
|
||||
output_kwargs["t"] = config.trim_end
|
||||
|
||||
# Video options (skip if copy)
|
||||
if config.video_codec != "copy":
|
||||
if config.video_crf is not None:
|
||||
output_kwargs["crf"] = config.video_crf
|
||||
elif config.video_bitrate:
|
||||
output_kwargs["video_bitrate"] = config.video_bitrate
|
||||
|
||||
if config.video_preset:
|
||||
output_kwargs["preset"] = config.video_preset
|
||||
|
||||
if config.resolution:
|
||||
output_kwargs["s"] = config.resolution
|
||||
|
||||
if config.framerate:
|
||||
output_kwargs["r"] = config.framerate
|
||||
|
||||
# Audio options (skip if copy)
|
||||
if config.audio_codec != "copy":
|
||||
if config.audio_bitrate:
|
||||
output_kwargs["audio_bitrate"] = config.audio_bitrate
|
||||
if config.audio_channels:
|
||||
output_kwargs["ac"] = config.audio_channels
|
||||
if config.audio_samplerate:
|
||||
output_kwargs["ar"] = config.audio_samplerate
|
||||
|
||||
# Parse extra args into kwargs
|
||||
extra_kwargs = parse_extra_args(config.extra_args)
|
||||
output_kwargs.update(extra_kwargs)
|
||||
|
||||
stream = ffmpeg.output(stream, config.output_path, **output_kwargs)
|
||||
stream = ffmpeg.overwrite_output(stream)
|
||||
|
||||
return stream
|
||||
|
||||
|
||||
def parse_extra_args(extra_args: List[str]) -> Dict[str, Any]:
|
||||
"""
|
||||
Parse extra args list into kwargs dict.
|
||||
|
||||
["-vtag", "xvid", "-pix_fmt", "yuv420p"] -> {"vtag": "xvid", "pix_fmt": "yuv420p"}
|
||||
"""
|
||||
kwargs = {}
|
||||
i = 0
|
||||
while i < len(extra_args):
|
||||
key = extra_args[i].lstrip("-")
|
||||
if i + 1 < len(extra_args) and not extra_args[i + 1].startswith("-"):
|
||||
kwargs[key] = extra_args[i + 1]
|
||||
i += 2
|
||||
else:
|
||||
# Flag without value
|
||||
kwargs[key] = None
|
||||
i += 1
|
||||
return kwargs
|
||||
|
||||
|
||||
def transcode(
|
||||
config: TranscodeConfig,
|
||||
duration: Optional[float] = None,
|
||||
progress_callback: Optional[Callable[[float, Dict[str, Any]], None]] = None,
|
||||
) -> bool:
|
||||
"""
|
||||
Transcode a media file.
|
||||
|
||||
Args:
|
||||
config: Transcode configuration
|
||||
duration: Total duration in seconds (for progress calculation, optional)
|
||||
progress_callback: Called with (percent, details_dict) - requires duration
|
||||
|
||||
Returns:
|
||||
True if successful
|
||||
|
||||
Raises:
|
||||
ffmpeg.Error: If transcoding fails
|
||||
"""
|
||||
# Ensure output directory exists
|
||||
Path(config.output_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
stream = build_stream(config)
|
||||
|
||||
if progress_callback and duration:
|
||||
# Run with progress tracking using run_async
|
||||
return _run_with_progress(stream, config, duration, progress_callback)
|
||||
else:
|
||||
# Run synchronously
|
||||
ffmpeg.run(stream, capture_stdout=True, capture_stderr=True)
|
||||
return True
|
||||
|
||||
|
||||
def _run_with_progress(
|
||||
stream,
|
||||
config: TranscodeConfig,
|
||||
duration: float,
|
||||
progress_callback: Callable[[float, Dict[str, Any]], None],
|
||||
) -> bool:
|
||||
"""Run FFmpeg with progress tracking using run_async and stderr parsing."""
|
||||
import re
|
||||
|
||||
# Calculate effective duration
|
||||
effective_duration = duration
|
||||
if config.trim_start and config.trim_end:
|
||||
effective_duration = config.trim_end - config.trim_start
|
||||
elif config.trim_end:
|
||||
effective_duration = config.trim_end
|
||||
elif config.trim_start:
|
||||
effective_duration = duration - config.trim_start
|
||||
|
||||
# Run async to get process handle
|
||||
process = ffmpeg.run_async(stream, pipe_stdout=True, pipe_stderr=True)
|
||||
|
||||
# Parse stderr for progress (time=HH:MM:SS.ms pattern)
|
||||
time_pattern = re.compile(r"time=(\d+):(\d+):(\d+)\.(\d+)")
|
||||
|
||||
while True:
|
||||
line = process.stderr.readline()
|
||||
if not line:
|
||||
break
|
||||
|
||||
line = line.decode("utf-8", errors="ignore")
|
||||
match = time_pattern.search(line)
|
||||
if match:
|
||||
hours = int(match.group(1))
|
||||
minutes = int(match.group(2))
|
||||
seconds = int(match.group(3))
|
||||
ms = int(match.group(4))
|
||||
|
||||
current_time = hours * 3600 + minutes * 60 + seconds + ms / 100
|
||||
percent = min(100.0, (current_time / effective_duration) * 100)
|
||||
|
||||
progress_callback(
|
||||
percent,
|
||||
{
|
||||
"time": current_time,
|
||||
"percent": percent,
|
||||
},
|
||||
)
|
||||
|
||||
# Wait for completion
|
||||
process.wait()
|
||||
|
||||
if process.returncode != 0:
|
||||
raise ffmpeg.Error(
|
||||
"ffmpeg", stdout=process.stdout.read(), stderr=process.stderr.read()
|
||||
)
|
||||
|
||||
# Final callback
|
||||
progress_callback(
|
||||
100.0, {"time": effective_duration, "percent": 100.0, "done": True}
|
||||
)
|
||||
|
||||
return True
|
||||
21
ctrl/.env.template
Normal file
21
ctrl/.env.template
Normal file
@@ -0,0 +1,21 @@
|
||||
# MPR Control Environment
|
||||
# Copy to .env and adjust values
|
||||
|
||||
# Database
|
||||
POSTGRES_DB=mpr
|
||||
POSTGRES_USER=mpr_user
|
||||
POSTGRES_PASSWORD=mpr_pass
|
||||
|
||||
# Ports (less common to avoid conflicts)
|
||||
POSTGRES_PORT=5433
|
||||
REDIS_PORT=6380
|
||||
DJANGO_PORT=8701
|
||||
FASTAPI_PORT=8702
|
||||
TIMELINE_PORT=5173
|
||||
|
||||
# Worker
|
||||
MPR_EXECUTOR=local
|
||||
|
||||
# Remote deployment (optional)
|
||||
# SERVER=user@host
|
||||
# REMOTE_PATH=~/mpr
|
||||
14
ctrl/Dockerfile
Normal file
14
ctrl/Dockerfile
Normal file
@@ -0,0 +1,14 @@
|
||||
FROM python:3.11-slim
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
ffmpeg \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
|
||||
CMD ["python", "manage.py", "runserver", "0.0.0.0:8000"]
|
||||
61
ctrl/README.md
Normal file
61
ctrl/README.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# MPR Control
|
||||
|
||||
Scripts for running and deploying MPR.
|
||||
|
||||
## Setup
|
||||
|
||||
1. Add to `/etc/hosts`:
|
||||
```
|
||||
127.0.0.1 mpr.local.ar
|
||||
```
|
||||
|
||||
2. Copy environment template:
|
||||
```bash
|
||||
cp ctrl/.env.template ctrl/.env
|
||||
```
|
||||
|
||||
3. Start the stack:
|
||||
```bash
|
||||
./ctrl/run.sh
|
||||
```
|
||||
|
||||
## URLs
|
||||
|
||||
- http://mpr.local.ar/admin - Django Admin
|
||||
- http://mpr.local.ar/api/docs - FastAPI Swagger
|
||||
- http://mpr.local.ar/ui - Timeline UI
|
||||
|
||||
## Commands
|
||||
|
||||
```bash
|
||||
# Start all services
|
||||
./ctrl/run.sh
|
||||
|
||||
# Start in detached mode
|
||||
./ctrl/run.sh -d
|
||||
|
||||
# Rebuild and start
|
||||
./ctrl/run.sh --build
|
||||
|
||||
# Stop all
|
||||
./ctrl/run.sh down
|
||||
|
||||
# View logs
|
||||
./ctrl/run.sh logs -f
|
||||
|
||||
# Deploy to remote (configure SERVER/REMOTE_PATH in .env)
|
||||
./ctrl/deploy.sh
|
||||
./ctrl/deploy.sh --restart
|
||||
./ctrl/deploy.sh --dry-run
|
||||
```
|
||||
|
||||
## Ports
|
||||
|
||||
| Service | Internal | External |
|
||||
|------------|----------|----------|
|
||||
| nginx | 80 | 80 |
|
||||
| Django | 8701 | 8701 |
|
||||
| FastAPI | 8702 | 8702 |
|
||||
| Timeline | 5173 | 5173 |
|
||||
| PostgreSQL | 5432 | 5433 |
|
||||
| Redis | 6379 | 6380 |
|
||||
76
ctrl/deploy.sh
Executable file
76
ctrl/deploy.sh
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/bin/bash
|
||||
# Deploy MPR to remote server via rsync
|
||||
# Uses project .gitignore for excludes
|
||||
#
|
||||
# Usage: ./ctrl/deploy.sh [--restart] [--dry-run]
|
||||
#
|
||||
# Examples:
|
||||
# ./ctrl/deploy.sh # Sync files only
|
||||
# ./ctrl/deploy.sh --restart # Sync and restart services
|
||||
# ./ctrl/deploy.sh --dry-run # Preview sync
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
|
||||
source "$SCRIPT_DIR/.env" 2>/dev/null || true
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m'
|
||||
|
||||
if [ -z "$SERVER" ] || [ -z "$REMOTE_PATH" ]; then
|
||||
echo -e "${RED}Error: SERVER and REMOTE_PATH must be set in ctrl/.env${NC}"
|
||||
echo "Example:"
|
||||
echo " SERVER=user@host"
|
||||
echo " REMOTE_PATH=~/mpr"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
RESTART=false
|
||||
DRY_RUN=""
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
--restart)
|
||||
RESTART=true
|
||||
shift
|
||||
;;
|
||||
--dry-run)
|
||||
DRY_RUN="--dry-run"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo -e "${GREEN}=== Deploying MPR to $SERVER:$REMOTE_PATH ===${NC}"
|
||||
|
||||
# Sync files using .gitignore for excludes
|
||||
echo -e "${YELLOW}Syncing files...${NC}"
|
||||
rsync -avz --delete $DRY_RUN \
|
||||
--filter=':- .gitignore' \
|
||||
--exclude='.git' \
|
||||
--exclude='media/*' \
|
||||
--exclude='ctrl/.env' \
|
||||
"$PROJECT_ROOT/" "$SERVER:$REMOTE_PATH/"
|
||||
|
||||
if [ -n "$DRY_RUN" ]; then
|
||||
echo -e "${YELLOW}Dry run - no changes made${NC}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Copy env template if .env doesn't exist on remote
|
||||
ssh "$SERVER" "[ -f $REMOTE_PATH/ctrl/.env ] || cp $REMOTE_PATH/ctrl/.env.template $REMOTE_PATH/ctrl/.env"
|
||||
|
||||
if [ "$RESTART" = true ]; then
|
||||
echo -e "${YELLOW}Restarting services...${NC}"
|
||||
ssh "$SERVER" "cd $REMOTE_PATH/ctrl && docker compose down && docker compose up -d --build"
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}Done!${NC}"
|
||||
128
ctrl/docker-compose.yml
Normal file
128
ctrl/docker-compose.yml
Normal file
@@ -0,0 +1,128 @@
|
||||
x-common-env: &common-env
|
||||
DATABASE_URL: postgresql://mpr_user:mpr_pass@postgres:5432/mpr
|
||||
REDIS_URL: redis://redis:6379/0
|
||||
DJANGO_SETTINGS_MODULE: mpr.settings
|
||||
DEBUG: 1
|
||||
|
||||
x-healthcheck-defaults: &healthcheck-defaults
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
services:
|
||||
# =============================================================================
|
||||
# Infrastructure
|
||||
# =============================================================================
|
||||
|
||||
postgres:
|
||||
image: postgres:15-alpine
|
||||
environment:
|
||||
POSTGRES_DB: mpr
|
||||
POSTGRES_USER: mpr_user
|
||||
POSTGRES_PASSWORD: mpr_pass
|
||||
ports:
|
||||
- "5433:5432"
|
||||
volumes:
|
||||
- postgres-data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
<<: *healthcheck-defaults
|
||||
test: ["CMD-SHELL", "pg_isready -U mpr_user -d mpr"]
|
||||
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
ports:
|
||||
- "6380:6379"
|
||||
volumes:
|
||||
- redis-data:/data
|
||||
healthcheck:
|
||||
<<: *healthcheck-defaults
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
ports:
|
||||
- "80:80"
|
||||
volumes:
|
||||
- ./nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
- ../media:/app/media:ro
|
||||
depends_on:
|
||||
- django
|
||||
- fastapi
|
||||
- timeline
|
||||
|
||||
# =============================================================================
|
||||
# Application Services
|
||||
# =============================================================================
|
||||
|
||||
django:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: ctrl/Dockerfile
|
||||
command: >
|
||||
bash -c "python manage.py migrate &&
|
||||
python manage.py loadbuiltins || true &&
|
||||
python manage.py runserver 0.0.0.0:8701"
|
||||
ports:
|
||||
- "8701:8701"
|
||||
environment:
|
||||
<<: *common-env
|
||||
volumes:
|
||||
- ..:/app
|
||||
- ../media:/app/media
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
|
||||
fastapi:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: ctrl/Dockerfile
|
||||
command: uvicorn api.main:app --host 0.0.0.0 --port 8702 --reload
|
||||
ports:
|
||||
- "8702:8702"
|
||||
environment:
|
||||
<<: *common-env
|
||||
volumes:
|
||||
- ..:/app
|
||||
- ../media:/app/media
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
|
||||
celery:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: ctrl/Dockerfile
|
||||
command: celery -A mpr worker -l info -Q default -c 2
|
||||
environment:
|
||||
<<: *common-env
|
||||
MPR_EXECUTOR: local
|
||||
volumes:
|
||||
- ..:/app
|
||||
- ../media:/app/media
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
redis:
|
||||
condition: service_healthy
|
||||
|
||||
timeline:
|
||||
build:
|
||||
context: ../ui/timeline
|
||||
dockerfile: Dockerfile
|
||||
ports:
|
||||
- "5173:5173"
|
||||
volumes:
|
||||
- ../ui/timeline/src:/app/src
|
||||
|
||||
volumes:
|
||||
postgres-data:
|
||||
redis-data:
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: mpr
|
||||
84
ctrl/nginx.conf
Normal file
84
ctrl/nginx.conf
Normal file
@@ -0,0 +1,84 @@
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
access_log /var/log/nginx/access.log;
|
||||
error_log /var/log/nginx/error.log;
|
||||
|
||||
upstream django {
|
||||
server django:8701;
|
||||
}
|
||||
|
||||
upstream fastapi {
|
||||
server fastapi:8702;
|
||||
}
|
||||
|
||||
upstream timeline {
|
||||
server timeline:5173;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80;
|
||||
server_name mpr.local.ar;
|
||||
|
||||
# Django Admin
|
||||
location /admin {
|
||||
proxy_pass http://django;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
# Django static files
|
||||
location /static {
|
||||
proxy_pass http://django;
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
# FastAPI
|
||||
location /api {
|
||||
proxy_pass http://fastapi;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
# Timeline UI
|
||||
location /ui {
|
||||
proxy_pass http://timeline;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
|
||||
# Vite HMR websocket
|
||||
location /@vite {
|
||||
proxy_pass http://timeline;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
|
||||
# Media files
|
||||
location /media {
|
||||
alias /app/media;
|
||||
autoindex on;
|
||||
}
|
||||
|
||||
# Default to Timeline UI
|
||||
location / {
|
||||
proxy_pass http://timeline;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}
|
||||
}
|
||||
}
|
||||
33
ctrl/run.sh
Executable file
33
ctrl/run.sh
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
# Run MPR stack locally
|
||||
# Usage: ./ctrl/run.sh [docker-compose args]
|
||||
#
|
||||
# Examples:
|
||||
# ./ctrl/run.sh # Start all services
|
||||
# ./ctrl/run.sh --build # Rebuild and start
|
||||
# ./ctrl/run.sh -d # Detached mode
|
||||
# ./ctrl/run.sh down # Stop all
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
# Load env
|
||||
if [ -f .env ]; then
|
||||
set -a
|
||||
source .env
|
||||
set +a
|
||||
else
|
||||
echo "Warning: .env not found, using defaults"
|
||||
echo "Copy .env.template to .env to customize"
|
||||
fi
|
||||
|
||||
# Check /etc/hosts
|
||||
if ! grep -q "mpr.local.ar" /etc/hosts 2>/dev/null; then
|
||||
echo "Note: Add to /etc/hosts:"
|
||||
echo " 127.0.0.1 mpr.local.ar"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
docker compose "$@"
|
||||
65
grpc/protos/worker.proto
Normal file
65
grpc/protos/worker.proto
Normal file
@@ -0,0 +1,65 @@
|
||||
// MPR Worker Service - GENERATED FILE
|
||||
//
|
||||
// Do not edit directly. Modify schema/models/grpc.py and run:
|
||||
// python schema/generate.py --proto
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package mpr.worker;
|
||||
|
||||
service WorkerService {
|
||||
rpc SubmitJob(JobRequest) returns (JobResponse);
|
||||
rpc StreamProgress(ProgressRequest) returns (stream ProgressUpdate);
|
||||
rpc CancelJob(CancelRequest) returns (CancelResponse);
|
||||
rpc GetWorkerStatus(Empty) returns (WorkerStatus);
|
||||
}
|
||||
|
||||
message JobRequest {
|
||||
string job_id = 1;
|
||||
string source_path = 2;
|
||||
string output_path = 3;
|
||||
string preset_json = 4;
|
||||
optional float trim_start = 5;
|
||||
optional float trim_end = 6;
|
||||
}
|
||||
|
||||
message JobResponse {
|
||||
string job_id = 1;
|
||||
bool accepted = 2;
|
||||
string message = 3;
|
||||
}
|
||||
|
||||
message ProgressRequest {
|
||||
string job_id = 1;
|
||||
}
|
||||
|
||||
message ProgressUpdate {
|
||||
string job_id = 1;
|
||||
int32 progress = 2;
|
||||
int32 current_frame = 3;
|
||||
float current_time = 4;
|
||||
float speed = 5;
|
||||
string status = 6;
|
||||
optional string error = 7;
|
||||
}
|
||||
|
||||
message CancelRequest {
|
||||
string job_id = 1;
|
||||
}
|
||||
|
||||
message CancelResponse {
|
||||
string job_id = 1;
|
||||
bool cancelled = 2;
|
||||
string message = 3;
|
||||
}
|
||||
|
||||
message WorkerStatus {
|
||||
bool available = 1;
|
||||
int32 active_jobs = 2;
|
||||
repeated string supported_codecs = 3;
|
||||
bool gpu_available = 4;
|
||||
}
|
||||
|
||||
message Empty {
|
||||
// Empty
|
||||
}
|
||||
22
manage.py
Executable file
22
manage.py
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env python
|
||||
"""Django's command-line utility for administrative tasks."""
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
"""Run administrative tasks."""
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mpr.settings')
|
||||
try:
|
||||
from django.core.management import execute_from_command_line
|
||||
except ImportError as exc:
|
||||
raise ImportError(
|
||||
"Couldn't import Django. Are you sure it's installed and "
|
||||
"available on your PYTHONPATH environment variable? Did you "
|
||||
"forget to activate a virtual environment?"
|
||||
) from exc
|
||||
execute_from_command_line(sys.argv)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
3
mpr/__init__.py
Normal file
3
mpr/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .celery import app as celery_app
|
||||
|
||||
__all__ = ("celery_app",)
|
||||
16
mpr/asgi.py
Normal file
16
mpr/asgi.py
Normal file
@@ -0,0 +1,16 @@
|
||||
"""
|
||||
ASGI config for mpr project.
|
||||
|
||||
It exposes the ASGI callable as a module-level variable named ``application``.
|
||||
|
||||
For more information on this file, see
|
||||
https://docs.djangoproject.com/en/6.0/howto/deployment/asgi/
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from django.core.asgi import get_asgi_application
|
||||
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mpr.settings')
|
||||
|
||||
application = get_asgi_application()
|
||||
9
mpr/celery.py
Normal file
9
mpr/celery.py
Normal file
@@ -0,0 +1,9 @@
|
||||
import os
|
||||
|
||||
from celery import Celery
|
||||
|
||||
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mpr.settings")
|
||||
|
||||
app = Celery("mpr")
|
||||
app.config_from_object("django.conf:settings", namespace="CELERY")
|
||||
app.autodiscover_tasks()
|
||||
0
mpr/media_assets/__init__.py
Normal file
0
mpr/media_assets/__init__.py
Normal file
174
mpr/media_assets/admin.py
Normal file
174
mpr/media_assets/admin.py
Normal file
@@ -0,0 +1,174 @@
|
||||
from django.contrib import admin
|
||||
|
||||
from .models import MediaAsset, TranscodeJob, TranscodePreset
|
||||
|
||||
|
||||
@admin.register(MediaAsset)
|
||||
class MediaAssetAdmin(admin.ModelAdmin):
|
||||
list_display = [
|
||||
"filename",
|
||||
"status",
|
||||
"duration_display",
|
||||
"resolution",
|
||||
"created_at",
|
||||
]
|
||||
list_filter = ["status", "video_codec", "audio_codec"]
|
||||
search_fields = ["filename", "file_path", "comments"]
|
||||
readonly_fields = ["id", "created_at", "updated_at", "properties"]
|
||||
|
||||
fieldsets = [
|
||||
(None, {"fields": ["id", "filename", "file_path", "status", "error_message"]}),
|
||||
(
|
||||
"Media Info",
|
||||
{
|
||||
"fields": [
|
||||
"file_size",
|
||||
"duration",
|
||||
"video_codec",
|
||||
"audio_codec",
|
||||
"width",
|
||||
"height",
|
||||
"framerate",
|
||||
"bitrate",
|
||||
]
|
||||
},
|
||||
),
|
||||
("Annotations", {"fields": ["comments", "tags"]}),
|
||||
(
|
||||
"Metadata",
|
||||
{
|
||||
"classes": ["collapse"],
|
||||
"fields": ["properties", "created_at", "updated_at"],
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
def duration_display(self, obj):
|
||||
if obj.duration:
|
||||
mins, secs = divmod(int(obj.duration), 60)
|
||||
hours, mins = divmod(mins, 60)
|
||||
if hours:
|
||||
return f"{hours}:{mins:02d}:{secs:02d}"
|
||||
return f"{mins}:{secs:02d}"
|
||||
return "-"
|
||||
|
||||
duration_display.short_description = "Duration"
|
||||
|
||||
def resolution(self, obj):
|
||||
if obj.width and obj.height:
|
||||
return f"{obj.width}x{obj.height}"
|
||||
return "-"
|
||||
|
||||
|
||||
@admin.register(TranscodePreset)
|
||||
class TranscodePresetAdmin(admin.ModelAdmin):
|
||||
list_display = ["name", "container", "video_codec", "audio_codec", "is_builtin"]
|
||||
list_filter = ["is_builtin", "container", "video_codec"]
|
||||
search_fields = ["name", "description"]
|
||||
readonly_fields = ["id", "created_at", "updated_at"]
|
||||
|
||||
fieldsets = [
|
||||
(None, {"fields": ["id", "name", "description", "is_builtin"]}),
|
||||
("Output", {"fields": ["container"]}),
|
||||
(
|
||||
"Video",
|
||||
{
|
||||
"fields": [
|
||||
"video_codec",
|
||||
"video_bitrate",
|
||||
"video_crf",
|
||||
"video_preset",
|
||||
"resolution",
|
||||
"framerate",
|
||||
]
|
||||
},
|
||||
),
|
||||
(
|
||||
"Audio",
|
||||
{
|
||||
"fields": [
|
||||
"audio_codec",
|
||||
"audio_bitrate",
|
||||
"audio_channels",
|
||||
"audio_samplerate",
|
||||
]
|
||||
},
|
||||
),
|
||||
(
|
||||
"Advanced",
|
||||
{
|
||||
"classes": ["collapse"],
|
||||
"fields": ["extra_args", "created_at", "updated_at"],
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@admin.register(TranscodeJob)
|
||||
class TranscodeJobAdmin(admin.ModelAdmin):
|
||||
list_display = [
|
||||
"id_short",
|
||||
"source_asset",
|
||||
"preset",
|
||||
"status",
|
||||
"progress_display",
|
||||
"created_at",
|
||||
]
|
||||
list_filter = ["status", "preset"]
|
||||
search_fields = ["source_asset__filename", "output_filename"]
|
||||
readonly_fields = [
|
||||
"id",
|
||||
"created_at",
|
||||
"started_at",
|
||||
"completed_at",
|
||||
"progress",
|
||||
"current_frame",
|
||||
"current_time",
|
||||
"speed",
|
||||
"celery_task_id",
|
||||
"preset_snapshot",
|
||||
]
|
||||
raw_id_fields = ["source_asset", "preset", "output_asset"]
|
||||
|
||||
fieldsets = [
|
||||
(None, {"fields": ["id", "source_asset", "status", "error_message"]}),
|
||||
(
|
||||
"Configuration",
|
||||
{
|
||||
"fields": [
|
||||
"preset",
|
||||
"preset_snapshot",
|
||||
"trim_start",
|
||||
"trim_end",
|
||||
"priority",
|
||||
]
|
||||
},
|
||||
),
|
||||
("Output", {"fields": ["output_filename", "output_path", "output_asset"]}),
|
||||
(
|
||||
"Progress",
|
||||
{"fields": ["progress", "current_frame", "current_time", "speed"]},
|
||||
),
|
||||
(
|
||||
"Worker",
|
||||
{
|
||||
"classes": ["collapse"],
|
||||
"fields": [
|
||||
"celery_task_id",
|
||||
"created_at",
|
||||
"started_at",
|
||||
"completed_at",
|
||||
],
|
||||
},
|
||||
),
|
||||
]
|
||||
|
||||
def id_short(self, obj):
|
||||
return str(obj.id)[:8]
|
||||
|
||||
id_short.short_description = "ID"
|
||||
|
||||
def progress_display(self, obj):
|
||||
return f"{obj.progress:.1f}%"
|
||||
|
||||
progress_display.short_description = "Progress"
|
||||
7
mpr/media_assets/apps.py
Normal file
7
mpr/media_assets/apps.py
Normal file
@@ -0,0 +1,7 @@
|
||||
from django.apps import AppConfig
|
||||
|
||||
|
||||
class MediaAssetsConfig(AppConfig):
|
||||
default_auto_field = "django.db.models.BigAutoField"
|
||||
name = "mpr.media_assets"
|
||||
verbose_name = "Media Assets"
|
||||
0
mpr/media_assets/management/__init__.py
Normal file
0
mpr/media_assets/management/__init__.py
Normal file
0
mpr/media_assets/management/commands/__init__.py
Normal file
0
mpr/media_assets/management/commands/__init__.py
Normal file
54
mpr/media_assets/management/commands/loadbuiltins.py
Normal file
54
mpr/media_assets/management/commands/loadbuiltins.py
Normal file
@@ -0,0 +1,54 @@
|
||||
# Import builtin presets from schema
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from django.core.management.base import BaseCommand
|
||||
|
||||
from mpr.media_assets.models import TranscodePreset
|
||||
|
||||
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent.parent.parent))
|
||||
from schema.models import BUILTIN_PRESETS
|
||||
|
||||
|
||||
class Command(BaseCommand):
|
||||
help = "Load builtin transcode presets"
|
||||
|
||||
def handle(self, *args, **options):
|
||||
created_count = 0
|
||||
updated_count = 0
|
||||
|
||||
for preset_data in BUILTIN_PRESETS:
|
||||
name = preset_data["name"]
|
||||
defaults = {
|
||||
"description": preset_data.get("description", ""),
|
||||
"is_builtin": True,
|
||||
"container": preset_data.get("container", "mp4"),
|
||||
"video_codec": preset_data.get("video_codec", "libx264"),
|
||||
"video_bitrate": preset_data.get("video_bitrate"),
|
||||
"video_crf": preset_data.get("video_crf"),
|
||||
"video_preset": preset_data.get("video_preset"),
|
||||
"resolution": preset_data.get("resolution"),
|
||||
"framerate": preset_data.get("framerate"),
|
||||
"audio_codec": preset_data.get("audio_codec", "aac"),
|
||||
"audio_bitrate": preset_data.get("audio_bitrate"),
|
||||
"audio_channels": preset_data.get("audio_channels"),
|
||||
"audio_samplerate": preset_data.get("audio_samplerate"),
|
||||
"extra_args": preset_data.get("extra_args", []),
|
||||
}
|
||||
|
||||
preset, created = TranscodePreset.objects.update_or_create(
|
||||
name=name, defaults=defaults
|
||||
)
|
||||
|
||||
if created:
|
||||
created_count += 1
|
||||
self.stdout.write(self.style.SUCCESS(f"Created: {name}"))
|
||||
else:
|
||||
updated_count += 1
|
||||
self.stdout.write(f"Updated: {name}")
|
||||
|
||||
self.stdout.write(
|
||||
self.style.SUCCESS(
|
||||
f"Done: {created_count} created, {updated_count} updated"
|
||||
)
|
||||
)
|
||||
98
mpr/media_assets/migrations/0001_initial.py
Normal file
98
mpr/media_assets/migrations/0001_initial.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# Generated by Django 6.0.1 on 2026-02-01 15:13
|
||||
|
||||
import django.db.models.deletion
|
||||
import uuid
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
initial = True
|
||||
|
||||
dependencies = [
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='TranscodePreset',
|
||||
fields=[
|
||||
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
|
||||
('name', models.CharField(max_length=100, unique=True)),
|
||||
('description', models.TextField(blank=True, default='')),
|
||||
('is_builtin', models.BooleanField(default=False)),
|
||||
('container', models.CharField(default='mp4', max_length=20)),
|
||||
('video_codec', models.CharField(default='libx264', max_length=50)),
|
||||
('video_bitrate', models.CharField(blank=True, max_length=20, null=True)),
|
||||
('video_crf', models.IntegerField(blank=True, null=True)),
|
||||
('video_preset', models.CharField(blank=True, max_length=20, null=True)),
|
||||
('resolution', models.CharField(blank=True, max_length=20, null=True)),
|
||||
('framerate', models.FloatField(blank=True, null=True)),
|
||||
('audio_codec', models.CharField(default='aac', max_length=50)),
|
||||
('audio_bitrate', models.CharField(blank=True, max_length=20, null=True)),
|
||||
('audio_channels', models.IntegerField(blank=True, null=True)),
|
||||
('audio_samplerate', models.IntegerField(blank=True, null=True)),
|
||||
('extra_args', models.JSONField(blank=True, default=list)),
|
||||
('created_at', models.DateTimeField(auto_now_add=True)),
|
||||
('updated_at', models.DateTimeField(auto_now=True)),
|
||||
],
|
||||
options={
|
||||
'ordering': ['name'],
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='MediaAsset',
|
||||
fields=[
|
||||
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
|
||||
('filename', models.CharField(max_length=500)),
|
||||
('file_path', models.CharField(max_length=1000)),
|
||||
('status', models.CharField(choices=[('pending', 'Pending Probe'), ('ready', 'Ready'), ('error', 'Error')], default='pending', max_length=20)),
|
||||
('error_message', models.TextField(blank=True, null=True)),
|
||||
('file_size', models.BigIntegerField(blank=True, null=True)),
|
||||
('duration', models.FloatField(blank=True, null=True)),
|
||||
('video_codec', models.CharField(blank=True, max_length=50, null=True)),
|
||||
('audio_codec', models.CharField(blank=True, max_length=50, null=True)),
|
||||
('width', models.IntegerField(blank=True, null=True)),
|
||||
('height', models.IntegerField(blank=True, null=True)),
|
||||
('framerate', models.FloatField(blank=True, null=True)),
|
||||
('bitrate', models.BigIntegerField(blank=True, null=True)),
|
||||
('properties', models.JSONField(blank=True, default=dict)),
|
||||
('comments', models.TextField(blank=True, default='')),
|
||||
('tags', models.JSONField(blank=True, default=list)),
|
||||
('created_at', models.DateTimeField(auto_now_add=True)),
|
||||
('updated_at', models.DateTimeField(auto_now=True)),
|
||||
],
|
||||
options={
|
||||
'ordering': ['-created_at'],
|
||||
'indexes': [models.Index(fields=['status'], name='media_asset_status_9ea2f2_idx'), models.Index(fields=['created_at'], name='media_asset_created_368039_idx')],
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='TranscodeJob',
|
||||
fields=[
|
||||
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
|
||||
('preset_snapshot', models.JSONField(blank=True, default=dict)),
|
||||
('trim_start', models.FloatField(blank=True, null=True)),
|
||||
('trim_end', models.FloatField(blank=True, null=True)),
|
||||
('output_filename', models.CharField(max_length=500)),
|
||||
('output_path', models.CharField(blank=True, max_length=1000, null=True)),
|
||||
('status', models.CharField(choices=[('pending', 'Pending'), ('processing', 'Processing'), ('completed', 'Completed'), ('failed', 'Failed'), ('cancelled', 'Cancelled')], default='pending', max_length=20)),
|
||||
('progress', models.FloatField(default=0.0)),
|
||||
('current_frame', models.IntegerField(blank=True, null=True)),
|
||||
('current_time', models.FloatField(blank=True, null=True)),
|
||||
('speed', models.CharField(blank=True, max_length=20, null=True)),
|
||||
('error_message', models.TextField(blank=True, null=True)),
|
||||
('celery_task_id', models.CharField(blank=True, max_length=100, null=True)),
|
||||
('priority', models.IntegerField(default=0)),
|
||||
('created_at', models.DateTimeField(auto_now_add=True)),
|
||||
('started_at', models.DateTimeField(blank=True, null=True)),
|
||||
('completed_at', models.DateTimeField(blank=True, null=True)),
|
||||
('output_asset', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='source_jobs', to='media_assets.mediaasset')),
|
||||
('source_asset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='transcode_jobs', to='media_assets.mediaasset')),
|
||||
('preset', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='jobs', to='media_assets.transcodepreset')),
|
||||
],
|
||||
options={
|
||||
'ordering': ['priority', 'created_at'],
|
||||
'indexes': [models.Index(fields=['status', 'priority'], name='media_asset_status_e6ac18_idx'), models.Index(fields=['created_at'], name='media_asset_created_ba3a46_idx'), models.Index(fields=['celery_task_id'], name='media_asset_celery__81a88e_idx')],
|
||||
},
|
||||
),
|
||||
]
|
||||
0
mpr/media_assets/migrations/__init__.py
Normal file
0
mpr/media_assets/migrations/__init__.py
Normal file
110
mpr/media_assets/models.py
Normal file
110
mpr/media_assets/models.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""
|
||||
Django ORM Models - GENERATED FILE
|
||||
|
||||
Do not edit directly. Modify schema/models/*.py and run:
|
||||
python schema/generate.py --django
|
||||
"""
|
||||
|
||||
import uuid
|
||||
from django.db import models
|
||||
|
||||
class MediaAsset(models.Model):
|
||||
"""A video/audio file registered in the system."""
|
||||
|
||||
class Status(models.TextChoices):
|
||||
PENDING = "pending", "Pending"
|
||||
READY = "ready", "Ready"
|
||||
ERROR = "error", "Error"
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
|
||||
filename = models.CharField(max_length=500)
|
||||
file_path = models.CharField(max_length=1000)
|
||||
status = models.CharField(max_length=20, choices=Status.choices, default=Status.PENDING)
|
||||
error_message = models.TextField(blank=True, default='')
|
||||
file_size = models.BigIntegerField(null=True, blank=True)
|
||||
duration = models.FloatField(null=True, blank=True, default=None)
|
||||
video_codec = models.CharField(max_length=255, null=True, blank=True)
|
||||
audio_codec = models.CharField(max_length=255, null=True, blank=True)
|
||||
width = models.IntegerField(null=True, blank=True, default=None)
|
||||
height = models.IntegerField(null=True, blank=True, default=None)
|
||||
framerate = models.FloatField(null=True, blank=True, default=None)
|
||||
bitrate = models.BigIntegerField(null=True, blank=True)
|
||||
properties = models.JSONField(default=dict, blank=True)
|
||||
comments = models.TextField(blank=True, default='')
|
||||
tags = models.JSONField(default=list, blank=True)
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
|
||||
class Meta:
|
||||
ordering = ["-created_at"]
|
||||
|
||||
def __str__(self):
|
||||
return self.filename
|
||||
|
||||
|
||||
class TranscodePreset(models.Model):
|
||||
"""A reusable transcoding configuration (like Handbrake presets)."""
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
|
||||
name = models.CharField(max_length=255)
|
||||
description = models.TextField(blank=True, default='')
|
||||
is_builtin = models.BooleanField(default=False)
|
||||
container = models.CharField(max_length=255)
|
||||
video_codec = models.CharField(max_length=255)
|
||||
video_bitrate = models.CharField(max_length=255, null=True, blank=True)
|
||||
video_crf = models.IntegerField(null=True, blank=True, default=None)
|
||||
video_preset = models.CharField(max_length=255, null=True, blank=True)
|
||||
resolution = models.CharField(max_length=255, null=True, blank=True)
|
||||
framerate = models.FloatField(null=True, blank=True, default=None)
|
||||
audio_codec = models.CharField(max_length=255)
|
||||
audio_bitrate = models.CharField(max_length=255, null=True, blank=True)
|
||||
audio_channels = models.IntegerField(null=True, blank=True, default=None)
|
||||
audio_samplerate = models.IntegerField(null=True, blank=True, default=None)
|
||||
extra_args = models.JSONField(default=list, blank=True)
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
updated_at = models.DateTimeField(auto_now=True)
|
||||
|
||||
class Meta:
|
||||
ordering = ["-created_at"]
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
|
||||
class TranscodeJob(models.Model):
|
||||
"""A transcoding or trimming job in the queue."""
|
||||
|
||||
class Status(models.TextChoices):
|
||||
PENDING = "pending", "Pending"
|
||||
PROCESSING = "processing", "Processing"
|
||||
COMPLETED = "completed", "Completed"
|
||||
FAILED = "failed", "Failed"
|
||||
CANCELLED = "cancelled", "Cancelled"
|
||||
|
||||
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
|
||||
source_asset_id = models.UUIDField()
|
||||
preset_id = models.UUIDField(null=True, blank=True)
|
||||
preset_snapshot = models.JSONField(default=dict, blank=True)
|
||||
trim_start = models.FloatField(null=True, blank=True, default=None)
|
||||
trim_end = models.FloatField(null=True, blank=True, default=None)
|
||||
output_filename = models.CharField(max_length=500)
|
||||
output_path = models.CharField(max_length=1000, null=True, blank=True)
|
||||
output_asset_id = models.UUIDField(null=True, blank=True)
|
||||
status = models.CharField(max_length=20, choices=Status.choices, default=Status.PENDING)
|
||||
progress = models.FloatField(default=0.0)
|
||||
current_frame = models.IntegerField(null=True, blank=True, default=None)
|
||||
current_time = models.FloatField(null=True, blank=True, default=None)
|
||||
speed = models.CharField(max_length=255, null=True, blank=True)
|
||||
error_message = models.TextField(blank=True, default='')
|
||||
celery_task_id = models.CharField(max_length=255, null=True, blank=True)
|
||||
priority = models.IntegerField(default=0)
|
||||
created_at = models.DateTimeField(auto_now_add=True)
|
||||
started_at = models.DateTimeField(null=True, blank=True)
|
||||
completed_at = models.DateTimeField(null=True, blank=True)
|
||||
|
||||
class Meta:
|
||||
ordering = ["-created_at"]
|
||||
|
||||
def __str__(self):
|
||||
return str(self.id)
|
||||
|
||||
3
mpr/media_assets/tests.py
Normal file
3
mpr/media_assets/tests.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from django.test import TestCase
|
||||
|
||||
# Create your tests here.
|
||||
3
mpr/media_assets/views.py
Normal file
3
mpr/media_assets/views.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from django.shortcuts import render
|
||||
|
||||
# Create your views here.
|
||||
103
mpr/settings.py
Normal file
103
mpr/settings.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""
|
||||
Django settings for mpr project.
|
||||
"""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import environ
|
||||
|
||||
BASE_DIR = Path(__file__).resolve().parent.parent
|
||||
|
||||
env = environ.Env(
|
||||
DEBUG=(bool, False),
|
||||
SECRET_KEY=(str, "dev-secret-key-change-in-production"),
|
||||
)
|
||||
|
||||
environ.Env.read_env(BASE_DIR / ".env")
|
||||
|
||||
SECRET_KEY = env("SECRET_KEY")
|
||||
DEBUG = env("DEBUG")
|
||||
ALLOWED_HOSTS = ["*"]
|
||||
|
||||
INSTALLED_APPS = [
|
||||
"django.contrib.admin",
|
||||
"django.contrib.auth",
|
||||
"django.contrib.contenttypes",
|
||||
"django.contrib.sessions",
|
||||
"django.contrib.messages",
|
||||
"django.contrib.staticfiles",
|
||||
"mpr.media_assets",
|
||||
]
|
||||
|
||||
MIDDLEWARE = [
|
||||
"django.middleware.security.SecurityMiddleware",
|
||||
"django.contrib.sessions.middleware.SessionMiddleware",
|
||||
"django.middleware.common.CommonMiddleware",
|
||||
"django.middleware.csrf.CsrfViewMiddleware",
|
||||
"django.contrib.auth.middleware.AuthenticationMiddleware",
|
||||
"django.contrib.messages.middleware.MessageMiddleware",
|
||||
"django.middleware.clickjacking.XFrameOptionsMiddleware",
|
||||
]
|
||||
|
||||
ROOT_URLCONF = "mpr.urls"
|
||||
|
||||
TEMPLATES = [
|
||||
{
|
||||
"BACKEND": "django.template.backends.django.DjangoTemplates",
|
||||
"DIRS": [],
|
||||
"APP_DIRS": True,
|
||||
"OPTIONS": {
|
||||
"context_processors": [
|
||||
"django.template.context_processors.request",
|
||||
"django.contrib.auth.context_processors.auth",
|
||||
"django.contrib.messages.context_processors.messages",
|
||||
],
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
WSGI_APPLICATION = "mpr.wsgi.application"
|
||||
|
||||
# Database
|
||||
DATABASE_URL = env("DATABASE_URL", default="sqlite:///db.sqlite3")
|
||||
|
||||
if DATABASE_URL.startswith("postgresql"):
|
||||
DATABASES = {"default": env.db("DATABASE_URL")}
|
||||
else:
|
||||
DATABASES = {
|
||||
"default": {
|
||||
"ENGINE": "django.db.backends.sqlite3",
|
||||
"NAME": BASE_DIR / "db.sqlite3",
|
||||
}
|
||||
}
|
||||
|
||||
AUTH_PASSWORD_VALIDATORS = [
|
||||
{
|
||||
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
|
||||
},
|
||||
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
|
||||
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
|
||||
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
|
||||
]
|
||||
|
||||
LANGUAGE_CODE = "en-us"
|
||||
TIME_ZONE = "UTC"
|
||||
USE_I18N = True
|
||||
USE_TZ = True
|
||||
|
||||
STATIC_URL = "static/"
|
||||
STATIC_ROOT = BASE_DIR / "staticfiles"
|
||||
|
||||
MEDIA_URL = "media/"
|
||||
MEDIA_ROOT = BASE_DIR / "media"
|
||||
|
||||
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
|
||||
|
||||
# Celery
|
||||
REDIS_URL = env("REDIS_URL", default="redis://localhost:6379/0")
|
||||
CELERY_BROKER_URL = REDIS_URL
|
||||
CELERY_RESULT_BACKEND = REDIS_URL
|
||||
CELERY_ACCEPT_CONTENT = ["json"]
|
||||
CELERY_TASK_SERIALIZER = "json"
|
||||
CELERY_RESULT_SERIALIZER = "json"
|
||||
22
mpr/urls.py
Normal file
22
mpr/urls.py
Normal file
@@ -0,0 +1,22 @@
|
||||
"""
|
||||
URL configuration for mpr project.
|
||||
|
||||
The `urlpatterns` list routes URLs to views. For more information please see:
|
||||
https://docs.djangoproject.com/en/6.0/topics/http/urls/
|
||||
Examples:
|
||||
Function views
|
||||
1. Add an import: from my_app import views
|
||||
2. Add a URL to urlpatterns: path('', views.home, name='home')
|
||||
Class-based views
|
||||
1. Add an import: from other_app.views import Home
|
||||
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
|
||||
Including another URLconf
|
||||
1. Import the include() function: from django.urls import include, path
|
||||
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
|
||||
"""
|
||||
from django.contrib import admin
|
||||
from django.urls import path
|
||||
|
||||
urlpatterns = [
|
||||
path('admin/', admin.site.urls),
|
||||
]
|
||||
16
mpr/wsgi.py
Normal file
16
mpr/wsgi.py
Normal file
@@ -0,0 +1,16 @@
|
||||
"""
|
||||
WSGI config for mpr project.
|
||||
|
||||
It exposes the WSGI callable as a module-level variable named ``application``.
|
||||
|
||||
For more information on this file, see
|
||||
https://docs.djangoproject.com/en/6.0/howto/deployment/wsgi/
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from django.core.wsgi import get_wsgi_application
|
||||
|
||||
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mpr.settings')
|
||||
|
||||
application = get_wsgi_application()
|
||||
22
requirements.txt
Normal file
22
requirements.txt
Normal file
@@ -0,0 +1,22 @@
|
||||
# Django
|
||||
Django>=4.2,<5.0
|
||||
django-environ>=0.11.2
|
||||
psycopg2-binary>=2.9.9
|
||||
|
||||
# FastAPI
|
||||
fastapi>=0.109.0
|
||||
uvicorn[standard]>=0.27.0
|
||||
pydantic>=2.5.0
|
||||
|
||||
# Celery
|
||||
celery[redis]>=5.3.0
|
||||
redis>=5.0.0
|
||||
|
||||
# FFmpeg
|
||||
ffmpeg-python>=0.2.0
|
||||
|
||||
# Testing
|
||||
pytest>=7.4.0
|
||||
pytest-django>=4.7.0
|
||||
pytest-asyncio>=0.23.0
|
||||
httpx>=0.26.0
|
||||
@@ -11,32 +11,38 @@ These definitions are used to generate:
|
||||
Run `python schema/generate.py` to regenerate all targets.
|
||||
"""
|
||||
|
||||
from .grpc import (
|
||||
from .models import (
|
||||
BUILTIN_PRESETS,
|
||||
# For generator
|
||||
DATACLASSES,
|
||||
ENUMS,
|
||||
GRPC_MESSAGES,
|
||||
# gRPC
|
||||
GRPC_SERVICE,
|
||||
# Enums
|
||||
AssetStatus,
|
||||
CancelRequest,
|
||||
CancelResponse,
|
||||
Empty,
|
||||
JobRequest,
|
||||
JobResponse,
|
||||
JobStatus,
|
||||
# Models
|
||||
MediaAsset,
|
||||
ProgressRequest,
|
||||
ProgressUpdate,
|
||||
TranscodeJob,
|
||||
TranscodePreset,
|
||||
WorkerStatus,
|
||||
)
|
||||
from .jobs import JobStatus, TranscodeJob
|
||||
from .media import AssetStatus, MediaAsset
|
||||
from .presets import BUILTIN_PRESETS, TranscodePreset
|
||||
|
||||
__all__ = [
|
||||
# Media
|
||||
"MediaAsset",
|
||||
"AssetStatus",
|
||||
# Presets
|
||||
"TranscodePreset",
|
||||
"BUILTIN_PRESETS",
|
||||
# Jobs
|
||||
"TranscodeJob",
|
||||
"AssetStatus",
|
||||
"JobStatus",
|
||||
# gRPC
|
||||
"GRPC_SERVICE",
|
||||
"JobRequest",
|
||||
"JobResponse",
|
||||
"ProgressRequest",
|
||||
@@ -45,5 +51,8 @@ __all__ = [
|
||||
"CancelResponse",
|
||||
"WorkerStatus",
|
||||
"Empty",
|
||||
"GRPC_SERVICE",
|
||||
"DATACLASSES",
|
||||
"ENUMS",
|
||||
"GRPC_MESSAGES",
|
||||
"BUILTIN_PRESETS",
|
||||
]
|
||||
|
||||
702
schema/generate.py
Executable file
702
schema/generate.py
Executable file
@@ -0,0 +1,702 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
MPR Model Generator
|
||||
|
||||
Generates framework-specific models from schema/models/:
|
||||
- Django ORM models -> mpr/media_assets/models.py
|
||||
- Pydantic schemas -> api/schemas/*.py
|
||||
- TypeScript types -> ui/timeline/src/types.ts
|
||||
- Protobuf -> grpc/protos/worker.proto
|
||||
|
||||
Usage:
|
||||
python schema/generate.py [--django] [--pydantic] [--typescript] [--proto] [--all]
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import dataclasses as dc
|
||||
import subprocess
|
||||
import sys
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Union, get_args, get_origin, get_type_hints
|
||||
|
||||
PROJECT_ROOT = Path(__file__).parent.parent
|
||||
sys.path.insert(0, str(PROJECT_ROOT))
|
||||
|
||||
from schema.models import DATACLASSES, ENUMS, GRPC_MESSAGES, GRPC_SERVICE
|
||||
|
||||
# =============================================================================
|
||||
# Type Dispatch Tables
|
||||
# =============================================================================
|
||||
|
||||
DJANGO_TYPES: dict[Any, str] = {
|
||||
str: "models.CharField(max_length={max_length}{opts})",
|
||||
int: "models.IntegerField({opts})",
|
||||
float: "models.FloatField({opts})",
|
||||
bool: "models.BooleanField(default={default})",
|
||||
"UUID": "models.UUIDField({opts})",
|
||||
"datetime": "models.DateTimeField({opts})",
|
||||
"dict": "models.JSONField(default=dict, blank=True)",
|
||||
"list": "models.JSONField(default=list, blank=True)",
|
||||
"text": "models.TextField(blank=True, default='')",
|
||||
"bigint": "models.BigIntegerField({opts})",
|
||||
"enum": "models.CharField(max_length=20, choices=Status.choices{opts})",
|
||||
}
|
||||
|
||||
DJANGO_SPECIAL: dict[str, str] = {
|
||||
"id": "models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)",
|
||||
"created_at": "models.DateTimeField(auto_now_add=True)",
|
||||
"updated_at": "models.DateTimeField(auto_now=True)",
|
||||
}
|
||||
|
||||
PYDANTIC_RESOLVERS: dict[Any, Callable[[Any], str]] = {
|
||||
str: lambda _: "str",
|
||||
int: lambda _: "int",
|
||||
float: lambda _: "float",
|
||||
bool: lambda _: "bool",
|
||||
"UUID": lambda _: "UUID",
|
||||
"datetime": lambda _: "datetime",
|
||||
"dict": lambda _: "Dict[str, Any]",
|
||||
"list": lambda base: f"List[{get_list_inner(base)}]",
|
||||
"enum": lambda base: base.__name__,
|
||||
}
|
||||
|
||||
TS_RESOLVERS: dict[Any, Callable[[Any], str]] = {
|
||||
str: lambda _: "string",
|
||||
int: lambda _: "number",
|
||||
float: lambda _: "number",
|
||||
bool: lambda _: "boolean",
|
||||
"UUID": lambda _: "string",
|
||||
"datetime": lambda _: "string",
|
||||
"dict": lambda _: "Record<string, unknown>",
|
||||
"list": lambda base: f"{TS_RESOLVERS.get(get_args(base)[0], lambda _: 'string')(None)}[]"
|
||||
if get_args(base)
|
||||
else "string[]",
|
||||
"enum": lambda base: base.__name__,
|
||||
}
|
||||
|
||||
PROTO_RESOLVERS: dict[Any, Callable[[Any], str]] = {
|
||||
str: lambda _: "string",
|
||||
int: lambda _: "int32",
|
||||
float: lambda _: "float",
|
||||
bool: lambda _: "bool",
|
||||
"list": lambda base: f"repeated {PROTO_RESOLVERS.get(get_args(base)[0], lambda _: 'string')(None)}"
|
||||
if get_args(base)
|
||||
else "repeated string",
|
||||
}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Type Helpers
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def unwrap_optional(type_hint: Any) -> tuple[Any, bool]:
|
||||
"""Unwrap Optional[T] -> (T, True) or (T, False) if not optional."""
|
||||
origin = get_origin(type_hint)
|
||||
if origin is Union:
|
||||
args = [a for a in get_args(type_hint) if a is not type(None)]
|
||||
return (args[0] if args else str, True)
|
||||
return (type_hint, False)
|
||||
|
||||
|
||||
def get_origin_name(type_hint: Any) -> str | None:
|
||||
"""Get origin type name: 'dict', 'list', or None."""
|
||||
origin = get_origin(type_hint)
|
||||
if origin is dict:
|
||||
return "dict"
|
||||
if origin is list:
|
||||
return "list"
|
||||
return None
|
||||
|
||||
|
||||
def get_type_name(type_hint: Any) -> str | None:
|
||||
"""Get type name for special types like UUID, datetime."""
|
||||
if hasattr(type_hint, "__name__"):
|
||||
return type_hint.__name__
|
||||
return None
|
||||
|
||||
|
||||
def get_list_inner(type_hint: Any) -> str:
|
||||
"""Get inner type of List[T]."""
|
||||
args = get_args(type_hint)
|
||||
if args and args[0] in (str, int, float, bool):
|
||||
return {str: "str", int: "int", float: "float", bool: "bool"}[args[0]]
|
||||
return "str"
|
||||
|
||||
|
||||
def get_field_default(field: dc.Field) -> Any:
|
||||
"""Get default value from dataclass field."""
|
||||
if field.default is not dc.MISSING:
|
||||
return field.default
|
||||
return dc.MISSING
|
||||
|
||||
|
||||
def format_opts(optional: bool, extra: list[str] | None = None) -> str:
|
||||
"""Format field options string."""
|
||||
parts = []
|
||||
if optional:
|
||||
parts.append("null=True, blank=True")
|
||||
if extra:
|
||||
parts.extend(extra)
|
||||
return ", ".join(parts)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Django Generator
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def resolve_django_type(name: str, type_hint: Any, default: Any) -> str:
|
||||
"""Resolve Python type to Django field."""
|
||||
# Special fields
|
||||
if name in DJANGO_SPECIAL:
|
||||
return DJANGO_SPECIAL[name]
|
||||
|
||||
base, optional = unwrap_optional(type_hint)
|
||||
origin = get_origin_name(base)
|
||||
type_name = get_type_name(base)
|
||||
opts = format_opts(optional)
|
||||
|
||||
# Container types
|
||||
if origin == "dict":
|
||||
return DJANGO_TYPES["dict"]
|
||||
if origin == "list":
|
||||
return DJANGO_TYPES["list"]
|
||||
|
||||
# UUID / datetime
|
||||
if type_name == "UUID":
|
||||
return DJANGO_TYPES["UUID"].format(opts=opts)
|
||||
if type_name == "datetime":
|
||||
return DJANGO_TYPES["datetime"].format(opts=opts)
|
||||
|
||||
# Enum
|
||||
if isinstance(base, type) and issubclass(base, Enum):
|
||||
extra = []
|
||||
if optional:
|
||||
extra.append("null=True, blank=True")
|
||||
if default is not dc.MISSING and isinstance(default, Enum):
|
||||
extra.append(f"default=Status.{default.name}")
|
||||
return DJANGO_TYPES["enum"].format(
|
||||
opts=", " + ", ".join(extra) if extra else ""
|
||||
)
|
||||
|
||||
# Text fields
|
||||
if base is str and any(x in name for x in ("message", "comments", "description")):
|
||||
return DJANGO_TYPES["text"]
|
||||
|
||||
# BigInt fields
|
||||
if base is int and name in ("file_size", "bitrate"):
|
||||
return DJANGO_TYPES["bigint"].format(opts=opts)
|
||||
|
||||
# Basic types
|
||||
if base is str:
|
||||
max_length = 1000 if "path" in name else 500 if "filename" in name else 255
|
||||
return DJANGO_TYPES[str].format(
|
||||
max_length=max_length, opts=", " + opts if opts else ""
|
||||
)
|
||||
|
||||
if base is int:
|
||||
extra = [opts] if opts else []
|
||||
if default is not dc.MISSING and not callable(default):
|
||||
extra.append(f"default={default}")
|
||||
return DJANGO_TYPES[int].format(opts=", ".join(extra))
|
||||
|
||||
if base is float:
|
||||
extra = [opts] if opts else []
|
||||
if default is not dc.MISSING and not callable(default):
|
||||
extra.append(f"default={default}")
|
||||
return DJANGO_TYPES[float].format(opts=", ".join(extra))
|
||||
|
||||
if base is bool:
|
||||
default_val = default if default is not dc.MISSING else False
|
||||
return DJANGO_TYPES[bool].format(default=default_val)
|
||||
|
||||
# Fallback
|
||||
return DJANGO_TYPES[str].format(max_length=255, opts=", " + opts if opts else "")
|
||||
|
||||
|
||||
def generate_django_model(cls: type) -> list[str]:
|
||||
"""Generate Django model lines from dataclass."""
|
||||
lines = [
|
||||
f"class {cls.__name__}(models.Model):",
|
||||
f' """{(cls.__doc__ or cls.__name__).strip().split(chr(10))[0]}"""',
|
||||
"",
|
||||
]
|
||||
|
||||
hints = get_type_hints(cls)
|
||||
fields = {f.name: f for f in dc.fields(cls)}
|
||||
|
||||
# Add Status inner class for enum fields
|
||||
for type_hint in hints.values():
|
||||
base, _ = unwrap_optional(type_hint)
|
||||
if isinstance(base, type) and issubclass(base, Enum):
|
||||
lines.append(" class Status(models.TextChoices):")
|
||||
for member in base:
|
||||
label = member.name.replace("_", " ").title()
|
||||
lines.append(f' {member.name} = "{member.value}", "{label}"')
|
||||
lines.append("")
|
||||
break
|
||||
|
||||
# Fields
|
||||
for name, type_hint in hints.items():
|
||||
if name.startswith("_"):
|
||||
continue
|
||||
field = fields.get(name)
|
||||
default = get_field_default(field) if field else dc.MISSING
|
||||
django_field = resolve_django_type(name, type_hint, default)
|
||||
lines.append(f" {name} = {django_field}")
|
||||
|
||||
# Meta and __str__
|
||||
lines.extend(
|
||||
[
|
||||
"",
|
||||
" class Meta:",
|
||||
' ordering = ["-created_at"]',
|
||||
"",
|
||||
" def __str__(self):",
|
||||
]
|
||||
)
|
||||
|
||||
if "filename" in hints:
|
||||
lines.append(" return self.filename")
|
||||
elif "name" in hints:
|
||||
lines.append(" return self.name")
|
||||
else:
|
||||
lines.append(" return str(self.id)")
|
||||
|
||||
return lines
|
||||
|
||||
|
||||
def generate_django() -> str:
|
||||
"""Generate complete Django models file."""
|
||||
header = [
|
||||
'"""',
|
||||
"Django ORM Models - GENERATED FILE",
|
||||
"",
|
||||
"Do not edit directly. Modify schema/models/*.py and run:",
|
||||
" python schema/generate.py --django",
|
||||
'"""',
|
||||
"",
|
||||
"import uuid",
|
||||
"from django.db import models",
|
||||
"",
|
||||
]
|
||||
|
||||
body = []
|
||||
for cls in DATACLASSES:
|
||||
body.extend(generate_django_model(cls))
|
||||
body.extend(["", ""])
|
||||
|
||||
return "\n".join(header + body)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Pydantic Generator
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def resolve_pydantic_type(type_hint: Any) -> str:
|
||||
"""Resolve Python type to Pydantic type string."""
|
||||
base, optional = unwrap_optional(type_hint)
|
||||
origin = get_origin_name(base)
|
||||
type_name = get_type_name(base)
|
||||
|
||||
# Look up resolver by origin, type name, base type, or enum
|
||||
resolver = (
|
||||
PYDANTIC_RESOLVERS.get(origin)
|
||||
or PYDANTIC_RESOLVERS.get(type_name)
|
||||
or PYDANTIC_RESOLVERS.get(base)
|
||||
or (
|
||||
PYDANTIC_RESOLVERS["enum"]
|
||||
if isinstance(base, type) and issubclass(base, Enum)
|
||||
else None
|
||||
)
|
||||
)
|
||||
|
||||
result = resolver(base) if resolver else "str"
|
||||
return f"Optional[{result}]" if optional else result
|
||||
|
||||
|
||||
def generate_pydantic_schema(cls: type, suffix: str) -> list[str]:
|
||||
"""Generate Pydantic schema lines from dataclass."""
|
||||
name = cls.__name__.replace("Transcode", "").replace("Media", "")
|
||||
class_name = f"{name}{suffix}"
|
||||
|
||||
skip_fields = {
|
||||
"Create": {"id", "created_at", "updated_at", "status", "error_message"},
|
||||
"Update": {"id", "created_at", "updated_at"},
|
||||
"Response": set(),
|
||||
}
|
||||
|
||||
lines = [
|
||||
f"class {class_name}(BaseSchema):",
|
||||
f' """{class_name} schema."""',
|
||||
]
|
||||
|
||||
hints = get_type_hints(cls)
|
||||
fields = {f.name: f for f in dc.fields(cls)}
|
||||
|
||||
for name, type_hint in hints.items():
|
||||
if name.startswith("_") or name in skip_fields.get(suffix, set()):
|
||||
continue
|
||||
|
||||
py_type = resolve_pydantic_type(type_hint)
|
||||
|
||||
# Update schemas: all fields optional
|
||||
if suffix == "Update" and "Optional" not in py_type:
|
||||
py_type = f"Optional[{py_type}]"
|
||||
|
||||
field = fields.get(name)
|
||||
default = get_field_default(field) if field else dc.MISSING
|
||||
|
||||
if "Optional" in py_type:
|
||||
lines.append(f" {name}: {py_type} = None")
|
||||
elif default is not dc.MISSING and not callable(default):
|
||||
if isinstance(default, str):
|
||||
lines.append(f' {name}: {py_type} = "{default}"')
|
||||
elif isinstance(default, Enum):
|
||||
lines.append(
|
||||
f" {name}: {py_type} = {default.__class__.__name__}.{default.name}"
|
||||
)
|
||||
else:
|
||||
lines.append(f" {name}: {py_type} = {default!r}")
|
||||
else:
|
||||
lines.append(f" {name}: {py_type}")
|
||||
|
||||
return lines
|
||||
|
||||
|
||||
def generate_pydantic() -> dict[str, str]:
|
||||
"""Generate all Pydantic schema files."""
|
||||
files = {}
|
||||
|
||||
# base.py
|
||||
files["base.py"] = "\n".join(
|
||||
[
|
||||
'"""Pydantic Base Schema - GENERATED FILE"""',
|
||||
"",
|
||||
"from pydantic import BaseModel, ConfigDict",
|
||||
"",
|
||||
"",
|
||||
"class BaseSchema(BaseModel):",
|
||||
' """Base schema with ORM mode."""',
|
||||
" model_config = ConfigDict(from_attributes=True)",
|
||||
"",
|
||||
]
|
||||
)
|
||||
|
||||
# Schema files per model
|
||||
for cls in DATACLASSES:
|
||||
module_name = cls.__name__.replace("Transcode", "").replace("Media", "").lower()
|
||||
|
||||
lines = [
|
||||
f'"""{cls.__name__} Schemas - GENERATED FILE"""',
|
||||
"",
|
||||
"from datetime import datetime",
|
||||
"from enum import Enum",
|
||||
"from typing import Any, Dict, List, Optional",
|
||||
"from uuid import UUID",
|
||||
"",
|
||||
"from .base import BaseSchema",
|
||||
"",
|
||||
]
|
||||
|
||||
# Add enum if present
|
||||
hints = get_type_hints(cls)
|
||||
for type_hint in hints.values():
|
||||
base, _ = unwrap_optional(type_hint)
|
||||
if isinstance(base, type) and issubclass(base, Enum):
|
||||
lines.extend(
|
||||
[
|
||||
"",
|
||||
f"class {base.__name__}(str, Enum):",
|
||||
]
|
||||
)
|
||||
for m in base:
|
||||
lines.append(f' {m.name} = "{m.value}"')
|
||||
lines.append("")
|
||||
break
|
||||
|
||||
# Schemas
|
||||
for suffix in ["Create", "Update", "Response"]:
|
||||
lines.append("")
|
||||
lines.extend(generate_pydantic_schema(cls, suffix))
|
||||
|
||||
lines.append("")
|
||||
files[f"{module_name}.py"] = "\n".join(lines)
|
||||
|
||||
# __init__.py
|
||||
imports = ["from .base import BaseSchema"]
|
||||
all_exports = ['"BaseSchema"']
|
||||
|
||||
for cls in DATACLASSES:
|
||||
name = cls.__name__.replace("Transcode", "").replace("Media", "")
|
||||
module = name.lower()
|
||||
imports.append(
|
||||
f"from .{module} import {name}Create, {name}Update, {name}Response"
|
||||
)
|
||||
all_exports.extend([f'"{name}Create"', f'"{name}Update"', f'"{name}Response"'])
|
||||
|
||||
# Add enum export
|
||||
hints = get_type_hints(cls)
|
||||
for type_hint in hints.values():
|
||||
base, _ = unwrap_optional(type_hint)
|
||||
if isinstance(base, type) and issubclass(base, Enum):
|
||||
imports.append(f"from .{module} import {base.__name__}")
|
||||
all_exports.append(f'"{base.__name__}"')
|
||||
break
|
||||
|
||||
files["__init__.py"] = "\n".join(
|
||||
[
|
||||
'"""API Schemas - GENERATED FILE"""',
|
||||
"",
|
||||
*imports,
|
||||
"",
|
||||
f"__all__ = [{', '.join(all_exports)}]",
|
||||
"",
|
||||
]
|
||||
)
|
||||
|
||||
return files
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# TypeScript Generator
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def resolve_ts_type(type_hint: Any) -> str:
|
||||
"""Resolve Python type to TypeScript type string."""
|
||||
base, optional = unwrap_optional(type_hint)
|
||||
origin = get_origin_name(base)
|
||||
type_name = get_type_name(base)
|
||||
|
||||
# Look up resolver by origin, type name, base type, or enum
|
||||
resolver = (
|
||||
TS_RESOLVERS.get(origin)
|
||||
or TS_RESOLVERS.get(type_name)
|
||||
or TS_RESOLVERS.get(base)
|
||||
or (
|
||||
TS_RESOLVERS["enum"]
|
||||
if isinstance(base, type) and issubclass(base, Enum)
|
||||
else None
|
||||
)
|
||||
)
|
||||
|
||||
result = resolver(base) if resolver else "string"
|
||||
return f"{result} | null" if optional else result
|
||||
|
||||
|
||||
def generate_ts_interface(cls: type) -> list[str]:
|
||||
"""Generate TypeScript interface lines from dataclass."""
|
||||
lines = [f"export interface {cls.__name__} {{"]
|
||||
|
||||
for name, type_hint in get_type_hints(cls).items():
|
||||
if name.startswith("_"):
|
||||
continue
|
||||
ts_type = resolve_ts_type(type_hint)
|
||||
lines.append(f" {name}: {ts_type};")
|
||||
|
||||
lines.append("}")
|
||||
return lines
|
||||
|
||||
|
||||
def generate_typescript() -> str:
|
||||
"""Generate complete TypeScript file."""
|
||||
lines = [
|
||||
"/**",
|
||||
" * MPR TypeScript Types - GENERATED FILE",
|
||||
" *",
|
||||
" * Do not edit directly. Modify schema/models/*.py and run:",
|
||||
" * python schema/generate.py --typescript",
|
||||
" */",
|
||||
"",
|
||||
]
|
||||
|
||||
# Enums as union types
|
||||
for enum in ENUMS:
|
||||
values = " | ".join(f'"{m.value}"' for m in enum)
|
||||
lines.append(f"export type {enum.__name__} = {values};")
|
||||
lines.append("")
|
||||
|
||||
# Interfaces
|
||||
for cls in DATACLASSES:
|
||||
lines.extend(generate_ts_interface(cls))
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Proto Generator
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def resolve_proto_type(type_hint: Any) -> tuple[str, bool]:
|
||||
"""Resolve Python type to proto type. Returns (type, is_optional)."""
|
||||
base, optional = unwrap_optional(type_hint)
|
||||
origin = get_origin_name(base)
|
||||
|
||||
# Look up resolver by origin or base type
|
||||
resolver = PROTO_RESOLVERS.get(origin) or PROTO_RESOLVERS.get(base)
|
||||
|
||||
if resolver:
|
||||
result = resolver(base)
|
||||
is_repeated = result.startswith("repeated")
|
||||
return result, optional and not is_repeated
|
||||
|
||||
return "string", optional
|
||||
|
||||
|
||||
def generate_proto_message(cls: type) -> list[str]:
|
||||
"""Generate proto message lines from dataclass."""
|
||||
lines = [f"message {cls.__name__} {{"]
|
||||
|
||||
hints = get_type_hints(cls)
|
||||
if not hints:
|
||||
lines.append(" // Empty")
|
||||
else:
|
||||
for i, (name, type_hint) in enumerate(hints.items(), 1):
|
||||
proto_type, optional = resolve_proto_type(type_hint)
|
||||
prefix = (
|
||||
"optional "
|
||||
if optional and not proto_type.startswith("repeated")
|
||||
else ""
|
||||
)
|
||||
lines.append(f" {prefix}{proto_type} {name} = {i};")
|
||||
|
||||
lines.append("}")
|
||||
return lines
|
||||
|
||||
|
||||
def generate_proto() -> str:
|
||||
"""Generate complete proto file."""
|
||||
lines = [
|
||||
"// MPR Worker Service - GENERATED FILE",
|
||||
"//",
|
||||
"// Do not edit directly. Modify schema/models/grpc.py and run:",
|
||||
"// python schema/generate.py --proto",
|
||||
"",
|
||||
'syntax = "proto3";',
|
||||
"",
|
||||
f"package {GRPC_SERVICE['package']};",
|
||||
"",
|
||||
f"service {GRPC_SERVICE['name']} {{",
|
||||
]
|
||||
|
||||
# Methods
|
||||
for m in GRPC_SERVICE["methods"]:
|
||||
req = m["request"].__name__
|
||||
resp = m["response"].__name__
|
||||
returns = f"stream {resp}" if m["stream_response"] else resp
|
||||
lines.append(f" rpc {m['name']}({req}) returns ({returns});")
|
||||
|
||||
lines.extend(["}", ""])
|
||||
|
||||
# Messages
|
||||
for cls in GRPC_MESSAGES:
|
||||
lines.extend(generate_proto_message(cls))
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Writers
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def write_file(path: Path, content: str) -> None:
|
||||
"""Write content to file, creating directories as needed."""
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(content)
|
||||
print(f" {path}")
|
||||
|
||||
|
||||
def write_django(output_dir: Path) -> None:
|
||||
"""Write Django models."""
|
||||
write_file(output_dir / "mpr" / "media_assets" / "models.py", generate_django())
|
||||
|
||||
|
||||
def write_pydantic(output_dir: Path) -> None:
|
||||
"""Write Pydantic schemas."""
|
||||
schemas_dir = output_dir / "api" / "schemas"
|
||||
for filename, content in generate_pydantic().items():
|
||||
write_file(schemas_dir / filename, content)
|
||||
|
||||
|
||||
def write_typescript(output_dir: Path) -> None:
|
||||
"""Write TypeScript types."""
|
||||
write_file(
|
||||
output_dir / "ui" / "timeline" / "src" / "types.ts", generate_typescript()
|
||||
)
|
||||
|
||||
|
||||
def write_proto(output_dir: Path) -> None:
|
||||
"""Write proto and generate stubs."""
|
||||
proto_dir = output_dir / "grpc" / "protos"
|
||||
proto_path = proto_dir / "worker.proto"
|
||||
write_file(proto_path, generate_proto())
|
||||
|
||||
# Generate Python stubs
|
||||
grpc_dir = output_dir / "grpc"
|
||||
result = subprocess.run(
|
||||
[
|
||||
sys.executable,
|
||||
"-m",
|
||||
"grpc_tools.protoc",
|
||||
f"-I{proto_dir}",
|
||||
f"--python_out={grpc_dir}",
|
||||
f"--grpc_python_out={grpc_dir}",
|
||||
str(proto_path),
|
||||
],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
print(f" {grpc_dir}/worker_pb2.py")
|
||||
print(f" {grpc_dir}/worker_pb2_grpc.py")
|
||||
else:
|
||||
print(" Warning: grpc_tools failed - pip install grpcio-tools")
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Main
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description="Generate from schema")
|
||||
parser.add_argument("--django", action="store_true")
|
||||
parser.add_argument("--pydantic", action="store_true")
|
||||
parser.add_argument("--typescript", action="store_true")
|
||||
parser.add_argument("--proto", action="store_true")
|
||||
parser.add_argument("--all", action="store_true")
|
||||
parser.add_argument("--output", type=Path, default=PROJECT_ROOT)
|
||||
args = parser.parse_args()
|
||||
|
||||
if not any([args.django, args.pydantic, args.typescript, args.proto, args.all]):
|
||||
args.all = True
|
||||
|
||||
print(f"Generating to {args.output}\n")
|
||||
|
||||
targets: list[tuple[bool, str, Callable]] = [
|
||||
(args.django or args.all, "Django", write_django),
|
||||
(args.pydantic or args.all, "Pydantic", write_pydantic),
|
||||
(args.typescript or args.all, "TypeScript", write_typescript),
|
||||
(args.proto or args.all, "Proto", write_proto),
|
||||
]
|
||||
|
||||
for enabled, name, writer in targets:
|
||||
if enabled:
|
||||
print(f"{name}:")
|
||||
writer(args.output)
|
||||
print()
|
||||
|
||||
print("Done!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
64
schema/models/__init__.py
Normal file
64
schema/models/__init__.py
Normal file
@@ -0,0 +1,64 @@
|
||||
"""
|
||||
MPR Schema Models
|
||||
|
||||
This module exports all dataclasses, enums, and constants that the generator
|
||||
should process. Add new models here to have them included in generation.
|
||||
"""
|
||||
|
||||
from .grpc import (
|
||||
GRPC_SERVICE,
|
||||
CancelRequest,
|
||||
CancelResponse,
|
||||
Empty,
|
||||
JobRequest,
|
||||
JobResponse,
|
||||
ProgressRequest,
|
||||
ProgressUpdate,
|
||||
WorkerStatus,
|
||||
)
|
||||
from .jobs import JobStatus, TranscodeJob
|
||||
from .media import AssetStatus, MediaAsset
|
||||
from .presets import BUILTIN_PRESETS, TranscodePreset
|
||||
|
||||
# Core domain models - generates Django, Pydantic, TypeScript
|
||||
DATACLASSES = [MediaAsset, TranscodePreset, TranscodeJob]
|
||||
|
||||
# Status enums - included in generated code
|
||||
ENUMS = [AssetStatus, JobStatus]
|
||||
|
||||
# gRPC messages - generates Proto
|
||||
GRPC_MESSAGES = [
|
||||
JobRequest,
|
||||
JobResponse,
|
||||
ProgressRequest,
|
||||
ProgressUpdate,
|
||||
CancelRequest,
|
||||
CancelResponse,
|
||||
WorkerStatus,
|
||||
Empty,
|
||||
]
|
||||
|
||||
__all__ = [
|
||||
# Models
|
||||
"MediaAsset",
|
||||
"TranscodePreset",
|
||||
"TranscodeJob",
|
||||
# Enums
|
||||
"AssetStatus",
|
||||
"JobStatus",
|
||||
# gRPC
|
||||
"GRPC_SERVICE",
|
||||
"JobRequest",
|
||||
"JobResponse",
|
||||
"ProgressRequest",
|
||||
"ProgressUpdate",
|
||||
"CancelRequest",
|
||||
"CancelResponse",
|
||||
"WorkerStatus",
|
||||
"Empty",
|
||||
# For generator
|
||||
"DATACLASSES",
|
||||
"ENUMS",
|
||||
"GRPC_MESSAGES",
|
||||
"BUILTIN_PRESETS",
|
||||
]
|
||||
74
ui/timeline/src/types.ts
Normal file
74
ui/timeline/src/types.ts
Normal file
@@ -0,0 +1,74 @@
|
||||
/**
|
||||
* MPR TypeScript Types - GENERATED FILE
|
||||
*
|
||||
* Do not edit directly. Modify schema/models/*.py and run:
|
||||
* python schema/generate.py --typescript
|
||||
*/
|
||||
|
||||
export type AssetStatus = "pending" | "ready" | "error";
|
||||
export type JobStatus = "pending" | "processing" | "completed" | "failed" | "cancelled";
|
||||
|
||||
export interface MediaAsset {
|
||||
id: string;
|
||||
filename: string;
|
||||
file_path: string;
|
||||
status: AssetStatus;
|
||||
error_message: string | null;
|
||||
file_size: number | null;
|
||||
duration: number | null;
|
||||
video_codec: string | null;
|
||||
audio_codec: string | null;
|
||||
width: number | null;
|
||||
height: number | null;
|
||||
framerate: number | null;
|
||||
bitrate: number | null;
|
||||
properties: Record<string, unknown>;
|
||||
comments: string;
|
||||
tags: string[];
|
||||
created_at: string | null;
|
||||
updated_at: string | null;
|
||||
}
|
||||
|
||||
export interface TranscodePreset {
|
||||
id: string;
|
||||
name: string;
|
||||
description: string;
|
||||
is_builtin: boolean;
|
||||
container: string;
|
||||
video_codec: string;
|
||||
video_bitrate: string | null;
|
||||
video_crf: number | null;
|
||||
video_preset: string | null;
|
||||
resolution: string | null;
|
||||
framerate: number | null;
|
||||
audio_codec: string;
|
||||
audio_bitrate: string | null;
|
||||
audio_channels: number | null;
|
||||
audio_samplerate: number | null;
|
||||
extra_args: string[];
|
||||
created_at: string | null;
|
||||
updated_at: string | null;
|
||||
}
|
||||
|
||||
export interface TranscodeJob {
|
||||
id: string;
|
||||
source_asset_id: string;
|
||||
preset_id: string | null;
|
||||
preset_snapshot: Record<string, unknown>;
|
||||
trim_start: number | null;
|
||||
trim_end: number | null;
|
||||
output_filename: string;
|
||||
output_path: string | null;
|
||||
output_asset_id: string | null;
|
||||
status: JobStatus;
|
||||
progress: number;
|
||||
current_frame: number | null;
|
||||
current_time: number | null;
|
||||
speed: string | null;
|
||||
error_message: string | null;
|
||||
celery_task_id: string | null;
|
||||
priority: number;
|
||||
created_at: string | null;
|
||||
started_at: string | null;
|
||||
completed_at: string | null;
|
||||
}
|
||||
Reference in New Issue
Block a user