Compare commits

...

11 Commits

Author SHA1 Message Date
5ceb8172ea docker build fix 2026-03-13 14:31:26 -03:00
3eeedebb15 major refactor 2026-03-13 01:07:02 -03:00
eaaf2ad60c executor abstraction, graphene to strawberry 2026-03-12 23:27:34 -03:00
4e9d731cff Remove REST API, keep GraphQL as sole API
- Add missing GraphQL mutations: retryJob, updateAsset, deleteAsset
- Add UpdateAssetRequest and DeleteResult to schema source of truth
- Move Lambda callback endpoint to main.py (only REST endpoint)
- Remove REST routes, pydantic schemas, and deps
- Remove pydantic target from modelgen.json
- Update architecture diagrams and documentation
2026-02-12 20:07:51 -03:00
dbbaad5b94 Display architecture diagrams side-by-side for easier comparison 2026-02-12 19:56:21 -03:00
2ac31083e5 Update root docs index.html to reference new separate architecture diagrams 2026-02-12 19:55:00 -03:00
f481fa6cbe Remove old combined architecture diagram 2026-02-12 19:53:23 -03:00
cc1a1b9953 Split architecture diagram into separate local and AWS diagrams 2026-02-12 19:49:47 -03:00
da1ff62877 Merge aws-int: Add AWS integration with GraphQL, Step Functions, and Lambda
# Conflicts:
#	docs/architecture/index.html
2026-02-12 19:47:15 -03:00
9cead74fb3 updated docs 2026-02-12 19:46:12 -03:00
72e4113529 updated modelgen tool 2026-02-06 20:18:45 -03:00
101 changed files with 2955 additions and 2810 deletions

30
.dockerignore Normal file
View File

@@ -0,0 +1,30 @@
# Python
.venv/
__pycache__/
*.pyc
*.egg-info/
.pytest_cache/
# Node
node_modules/
ui/*/node_modules/
ui/*/dist/
# Media (9.8GB — mounted via volume, never needed in image)
media/
# Git
.git/
# IDE / OS
.idea/
.vscode/
*.swp
.DS_Store
# Docker
ctrl/docker-compose.yml
# Docs
docs/
*.md

View File

@@ -71,12 +71,12 @@ docker compose logs -f
docker compose logs -f celery docker compose logs -f celery
# Create admin user # Create admin user
docker compose exec django python manage.py createsuperuser docker compose exec django python admin/manage.py createsuperuser
``` ```
## Code Generation ## Code Generation
Models are defined as dataclasses in `schema/models/` and generated via `modelgen`: Models are defined as dataclasses in `core/schema/models/` and generated via `modelgen`:
- **Django ORM** models (`--include dataclasses,enums`) - **Django ORM** models (`--include dataclasses,enums`)
- **Pydantic** schemas (`--include dataclasses,enums`) - **Pydantic** schemas (`--include dataclasses,enums`)
- **TypeScript** types (`--include dataclasses,enums,api`) - **TypeScript** types (`--include dataclasses,enums,api`)
@@ -113,26 +113,29 @@ See [docs/media-storage.md](docs/media-storage.md) for full details.
``` ```
mpr/ mpr/
├── api/ # FastAPI application ├── admin/ # Django project
│ ├── routes/ # API endpoints │ ├── manage.py # Django management script
│ └── schemas/ # Pydantic models (generated) │ └── mpr/ # Django settings & app
├── core/ # Core utilities │ └── media_assets/# Django app
│ └── ffmpeg/ # FFmpeg wrappers ├── core/ # Core application logic
│ ├── api/ # FastAPI + GraphQL API
│ │ └── schema/ # GraphQL types (generated)
│ ├── ffmpeg/ # FFmpeg wrappers
│ ├── rpc/ # gRPC server & client
│ │ └── protos/ # Protobuf definitions (generated)
│ ├── schema/ # Source of truth
│ │ └── models/ # Dataclass definitions
│ ├── storage/ # S3/GCP/local storage backends
│ └── task/ # Celery job execution
│ ├── executor.py # Executor abstraction
│ └── tasks.py # Celery tasks
├── ctrl/ # Docker & deployment ├── ctrl/ # Docker & deployment
│ ├── docker-compose.yml │ ├── docker-compose.yml
│ └── nginx.conf │ └── nginx.conf
├── media/ ├── media/
│ ├── in/ # Source media files │ ├── in/ # Source media files
│ └── out/ # Transcoded output │ └── out/ # Transcoded output
├── rpc/ # gRPC server & client ├── modelgen/ # Code generation tool
│ └── protos/ # Protobuf definitions (generated)
├── mpr/ # Django project
│ └── media_assets/ # Django app
├── schema/ # Source of truth
│ └── models/ # Dataclass definitions
├── task/ # Celery job execution
│ ├── executor.py # Executor abstraction
│ └── tasks.py # Celery tasks
└── ui/ # Frontend └── ui/ # Frontend
└── timeline/ # React app └── timeline/ # React app
``` ```

View File

@@ -6,7 +6,9 @@ import sys
def main(): def main():
"""Run administrative tasks.""" """Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mpr.settings') # Ensure project root is on sys.path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'admin.mpr.settings')
try: try:
from django.core.management import execute_from_command_line from django.core.management import execute_from_command_line
except ImportError as exc: except ImportError as exc:

View File

@@ -11,6 +11,6 @@ import os
from django.core.asgi import get_asgi_application from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mpr.settings') os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'admin.mpr.settings')
application = get_asgi_application() application = get_asgi_application()

View File

@@ -2,9 +2,9 @@ import os
from celery import Celery from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mpr.settings") os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin.mpr.settings")
app = Celery("mpr") app = Celery("mpr")
app.config_from_object("django.conf:settings", namespace="CELERY") app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks() app.autodiscover_tasks()
app.autodiscover_tasks(["task"]) app.autodiscover_tasks(["core.task"])

View File

@@ -3,5 +3,6 @@ from django.apps import AppConfig
class MediaAssetsConfig(AppConfig): class MediaAssetsConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField" default_auto_field = "django.db.models.BigAutoField"
name = "mpr.media_assets" name = "admin.mpr.media_assets"
label = "media_assets"
verbose_name = "Media Assets" verbose_name = "Media Assets"

View File

@@ -4,10 +4,10 @@ from pathlib import Path
from django.core.management.base import BaseCommand from django.core.management.base import BaseCommand
from mpr.media_assets.models import TranscodePreset from admin.mpr.media_assets.models import TranscodePreset
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent.parent.parent)) sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent.parent.parent.parent))
from schema.models import BUILTIN_PRESETS from core.schema.models import BUILTIN_PRESETS
class Command(BaseCommand): class Command(BaseCommand):

View File

@@ -1,8 +1,7 @@
# Generated by Django 6.0.1 on 2026-02-01 15:13 # Generated by Django 4.2.29 on 2026-03-13 04:04
import django.db.models.deletion
import uuid
from django.db import migrations, models from django.db import migrations, models
import uuid
class Migration(migrations.Migration): class Migration(migrations.Migration):
@@ -13,47 +12,21 @@ class Migration(migrations.Migration):
] ]
operations = [ operations = [
migrations.CreateModel(
name='TranscodePreset',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100, unique=True)),
('description', models.TextField(blank=True, default='')),
('is_builtin', models.BooleanField(default=False)),
('container', models.CharField(default='mp4', max_length=20)),
('video_codec', models.CharField(default='libx264', max_length=50)),
('video_bitrate', models.CharField(blank=True, max_length=20, null=True)),
('video_crf', models.IntegerField(blank=True, null=True)),
('video_preset', models.CharField(blank=True, max_length=20, null=True)),
('resolution', models.CharField(blank=True, max_length=20, null=True)),
('framerate', models.FloatField(blank=True, null=True)),
('audio_codec', models.CharField(default='aac', max_length=50)),
('audio_bitrate', models.CharField(blank=True, max_length=20, null=True)),
('audio_channels', models.IntegerField(blank=True, null=True)),
('audio_samplerate', models.IntegerField(blank=True, null=True)),
('extra_args', models.JSONField(blank=True, default=list)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel( migrations.CreateModel(
name='MediaAsset', name='MediaAsset',
fields=[ fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('filename', models.CharField(max_length=500)), ('filename', models.CharField(max_length=500)),
('file_path', models.CharField(max_length=1000)), ('file_path', models.CharField(max_length=1000)),
('status', models.CharField(choices=[('pending', 'Pending Probe'), ('ready', 'Ready'), ('error', 'Error')], default='pending', max_length=20)), ('status', models.CharField(choices=[('pending', 'Pending'), ('ready', 'Ready'), ('error', 'Error')], default='pending', max_length=20)),
('error_message', models.TextField(blank=True, null=True)), ('error_message', models.TextField(blank=True, default='')),
('file_size', models.BigIntegerField(blank=True, null=True)), ('file_size', models.BigIntegerField(blank=True, null=True)),
('duration', models.FloatField(blank=True, null=True)), ('duration', models.FloatField(blank=True, default=None, null=True)),
('video_codec', models.CharField(blank=True, max_length=50, null=True)), ('video_codec', models.CharField(blank=True, max_length=255, null=True)),
('audio_codec', models.CharField(blank=True, max_length=50, null=True)), ('audio_codec', models.CharField(blank=True, max_length=255, null=True)),
('width', models.IntegerField(blank=True, null=True)), ('width', models.IntegerField(blank=True, default=None, null=True)),
('height', models.IntegerField(blank=True, null=True)), ('height', models.IntegerField(blank=True, default=None, null=True)),
('framerate', models.FloatField(blank=True, null=True)), ('framerate', models.FloatField(blank=True, default=None, null=True)),
('bitrate', models.BigIntegerField(blank=True, null=True)), ('bitrate', models.BigIntegerField(blank=True, null=True)),
('properties', models.JSONField(blank=True, default=dict)), ('properties', models.JSONField(blank=True, default=dict)),
('comments', models.TextField(blank=True, default='')), ('comments', models.TextField(blank=True, default='')),
@@ -63,36 +36,61 @@ class Migration(migrations.Migration):
], ],
options={ options={
'ordering': ['-created_at'], 'ordering': ['-created_at'],
'indexes': [models.Index(fields=['status'], name='media_asset_status_9ea2f2_idx'), models.Index(fields=['created_at'], name='media_asset_created_368039_idx')],
}, },
), ),
migrations.CreateModel( migrations.CreateModel(
name='TranscodeJob', name='TranscodeJob',
fields=[ fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('source_asset_id', models.UUIDField()),
('preset_id', models.UUIDField(blank=True, null=True)),
('preset_snapshot', models.JSONField(blank=True, default=dict)), ('preset_snapshot', models.JSONField(blank=True, default=dict)),
('trim_start', models.FloatField(blank=True, null=True)), ('trim_start', models.FloatField(blank=True, default=None, null=True)),
('trim_end', models.FloatField(blank=True, null=True)), ('trim_end', models.FloatField(blank=True, default=None, null=True)),
('output_filename', models.CharField(max_length=500)), ('output_filename', models.CharField(max_length=500)),
('output_path', models.CharField(blank=True, max_length=1000, null=True)), ('output_path', models.CharField(blank=True, max_length=1000, null=True)),
('output_asset_id', models.UUIDField(blank=True, null=True)),
('status', models.CharField(choices=[('pending', 'Pending'), ('processing', 'Processing'), ('completed', 'Completed'), ('failed', 'Failed'), ('cancelled', 'Cancelled')], default='pending', max_length=20)), ('status', models.CharField(choices=[('pending', 'Pending'), ('processing', 'Processing'), ('completed', 'Completed'), ('failed', 'Failed'), ('cancelled', 'Cancelled')], default='pending', max_length=20)),
('progress', models.FloatField(default=0.0)), ('progress', models.FloatField(default=0.0)),
('current_frame', models.IntegerField(blank=True, null=True)), ('current_frame', models.IntegerField(blank=True, default=None, null=True)),
('current_time', models.FloatField(blank=True, null=True)), ('current_time', models.FloatField(blank=True, default=None, null=True)),
('speed', models.CharField(blank=True, max_length=20, null=True)), ('speed', models.CharField(blank=True, max_length=255, null=True)),
('error_message', models.TextField(blank=True, null=True)), ('error_message', models.TextField(blank=True, default='')),
('celery_task_id', models.CharField(blank=True, max_length=100, null=True)), ('celery_task_id', models.CharField(blank=True, max_length=255, null=True)),
('execution_arn', models.CharField(blank=True, max_length=255, null=True)),
('priority', models.IntegerField(default=0)), ('priority', models.IntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)), ('created_at', models.DateTimeField(auto_now_add=True)),
('started_at', models.DateTimeField(blank=True, null=True)), ('started_at', models.DateTimeField(blank=True, null=True)),
('completed_at', models.DateTimeField(blank=True, null=True)), ('completed_at', models.DateTimeField(blank=True, null=True)),
('output_asset', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='source_jobs', to='media_assets.mediaasset')),
('source_asset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='transcode_jobs', to='media_assets.mediaasset')),
('preset', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='jobs', to='media_assets.transcodepreset')),
], ],
options={ options={
'ordering': ['priority', 'created_at'], 'ordering': ['-created_at'],
'indexes': [models.Index(fields=['status', 'priority'], name='media_asset_status_e6ac18_idx'), models.Index(fields=['created_at'], name='media_asset_created_ba3a46_idx'), models.Index(fields=['celery_task_id'], name='media_asset_celery__81a88e_idx')], },
),
migrations.CreateModel(
name='TranscodePreset',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True, default='')),
('is_builtin', models.BooleanField(default=False)),
('container', models.CharField(max_length=255)),
('video_codec', models.CharField(max_length=255)),
('video_bitrate', models.CharField(blank=True, max_length=255, null=True)),
('video_crf', models.IntegerField(blank=True, default=None, null=True)),
('video_preset', models.CharField(blank=True, max_length=255, null=True)),
('resolution', models.CharField(blank=True, max_length=255, null=True)),
('framerate', models.FloatField(blank=True, default=None, null=True)),
('audio_codec', models.CharField(max_length=255)),
('audio_bitrate', models.CharField(blank=True, max_length=255, null=True)),
('audio_channels', models.IntegerField(blank=True, default=None, null=True)),
('audio_samplerate', models.IntegerField(blank=True, default=None, null=True)),
('extra_args', models.JSONField(blank=True, default=list)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['-created_at'],
}, },
), ),
] ]

View File

@@ -7,7 +7,7 @@ from pathlib import Path
import environ import environ
BASE_DIR = Path(__file__).resolve().parent.parent BASE_DIR = Path(__file__).resolve().parent.parent.parent
env = environ.Env( env = environ.Env(
DEBUG=(bool, False), DEBUG=(bool, False),
@@ -27,7 +27,7 @@ INSTALLED_APPS = [
"django.contrib.sessions", "django.contrib.sessions",
"django.contrib.messages", "django.contrib.messages",
"django.contrib.staticfiles", "django.contrib.staticfiles",
"mpr.media_assets", "admin.mpr.media_assets",
] ]
MIDDLEWARE = [ MIDDLEWARE = [
@@ -40,7 +40,7 @@ MIDDLEWARE = [
"django.middleware.clickjacking.XFrameOptionsMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware",
] ]
ROOT_URLCONF = "mpr.urls" ROOT_URLCONF = "admin.mpr.urls"
TEMPLATES = [ TEMPLATES = [
{ {
@@ -57,7 +57,7 @@ TEMPLATES = [
}, },
] ]
WSGI_APPLICATION = "mpr.wsgi.application" WSGI_APPLICATION = "admin.mpr.wsgi.application"
# Database # Database
DATABASE_URL = env("DATABASE_URL", default="sqlite:///db.sqlite3") DATABASE_URL = env("DATABASE_URL", default="sqlite:///db.sqlite3")

View File

@@ -11,6 +11,6 @@ import os
from django.core.wsgi import get_wsgi_application from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mpr.settings') os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'admin.mpr.settings')
application = get_wsgi_application() application = get_wsgi_application()

View File

@@ -1,54 +0,0 @@
"""
FastAPI dependencies.
Provides database sessions, settings, and common dependencies.
"""
import os
from functools import lru_cache
from typing import Generator
import django
from django.conf import settings as django_settings
# Initialize Django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mpr.settings")
django.setup()
from mpr.media_assets.models import MediaAsset, TranscodeJob, TranscodePreset
@lru_cache
def get_settings():
"""Get Django settings."""
return django_settings
def get_asset(asset_id: str) -> MediaAsset:
"""Get asset by ID or raise 404."""
from fastapi import HTTPException
try:
return MediaAsset.objects.get(id=asset_id)
except MediaAsset.DoesNotExist:
raise HTTPException(status_code=404, detail="Asset not found")
def get_preset(preset_id: str) -> TranscodePreset:
"""Get preset by ID or raise 404."""
from fastapi import HTTPException
try:
return TranscodePreset.objects.get(id=preset_id)
except TranscodePreset.DoesNotExist:
raise HTTPException(status_code=404, detail="Preset not found")
def get_job(job_id: str) -> TranscodeJob:
"""Get job by ID or raise 404."""
from fastapi import HTTPException
try:
return TranscodeJob.objects.get(id=job_id)
except TranscodeJob.DoesNotExist:
raise HTTPException(status_code=404, detail="Job not found")

View File

@@ -1,251 +0,0 @@
"""
GraphQL API using graphene, mounted on FastAPI/Starlette.
Provides the same data as the REST API but via GraphQL queries and mutations.
Uses Django ORM directly for data access.
Types are generated from schema/ via modelgen — see api/schemas/graphql_types.py.
"""
import os
import graphene
from api.schemas.graphql import (
CreateJobInput,
MediaAssetType,
ScanResultType,
SystemStatusType,
TranscodeJobType,
TranscodePresetType,
)
from core.storage import BUCKET_IN, list_objects
# Media extensions (same as assets route)
VIDEO_EXTS = {".mp4", ".mkv", ".avi", ".mov", ".webm", ".flv", ".wmv", ".m4v"}
AUDIO_EXTS = {".mp3", ".wav", ".flac", ".aac", ".ogg", ".m4a"}
MEDIA_EXTS = VIDEO_EXTS | AUDIO_EXTS
# ---------------------------------------------------------------------------
# Queries
# ---------------------------------------------------------------------------
class Query(graphene.ObjectType):
assets = graphene.List(
MediaAssetType,
status=graphene.String(),
search=graphene.String(),
)
asset = graphene.Field(MediaAssetType, id=graphene.UUID(required=True))
jobs = graphene.List(
TranscodeJobType,
status=graphene.String(),
source_asset_id=graphene.UUID(),
)
job = graphene.Field(TranscodeJobType, id=graphene.UUID(required=True))
presets = graphene.List(TranscodePresetType)
system_status = graphene.Field(SystemStatusType)
def resolve_assets(self, info, status=None, search=None):
from mpr.media_assets.models import MediaAsset
qs = MediaAsset.objects.all()
if status:
qs = qs.filter(status=status)
if search:
qs = qs.filter(filename__icontains=search)
return qs
def resolve_asset(self, info, id):
from mpr.media_assets.models import MediaAsset
try:
return MediaAsset.objects.get(id=id)
except MediaAsset.DoesNotExist:
return None
def resolve_jobs(self, info, status=None, source_asset_id=None):
from mpr.media_assets.models import TranscodeJob
qs = TranscodeJob.objects.all()
if status:
qs = qs.filter(status=status)
if source_asset_id:
qs = qs.filter(source_asset_id=source_asset_id)
return qs
def resolve_job(self, info, id):
from mpr.media_assets.models import TranscodeJob
try:
return TranscodeJob.objects.get(id=id)
except TranscodeJob.DoesNotExist:
return None
def resolve_presets(self, info):
from mpr.media_assets.models import TranscodePreset
return TranscodePreset.objects.all()
def resolve_system_status(self, info):
return {"status": "ok", "version": "0.1.0"}
# ---------------------------------------------------------------------------
# Mutations
# ---------------------------------------------------------------------------
class ScanMediaFolder(graphene.Mutation):
class Arguments:
pass
Output = ScanResultType
def mutate(self, info):
from mpr.media_assets.models import MediaAsset
objects = list_objects(BUCKET_IN, extensions=MEDIA_EXTS)
existing = set(MediaAsset.objects.values_list("filename", flat=True))
registered = []
skipped = []
for obj in objects:
if obj["filename"] in existing:
skipped.append(obj["filename"])
continue
try:
MediaAsset.objects.create(
filename=obj["filename"],
file_path=obj["key"],
file_size=obj["size"],
)
registered.append(obj["filename"])
except Exception:
pass
return ScanResultType(
found=len(objects),
registered=len(registered),
skipped=len(skipped),
files=registered,
)
class CreateJob(graphene.Mutation):
class Arguments:
input = CreateJobInput(required=True)
Output = TranscodeJobType
def mutate(self, info, input):
from pathlib import Path
from mpr.media_assets.models import MediaAsset, TranscodeJob, TranscodePreset
try:
source = MediaAsset.objects.get(id=input.source_asset_id)
except MediaAsset.DoesNotExist:
raise Exception("Source asset not found")
preset = None
preset_snapshot = {}
if input.preset_id:
try:
preset = TranscodePreset.objects.get(id=input.preset_id)
preset_snapshot = {
"name": preset.name,
"container": preset.container,
"video_codec": preset.video_codec,
"audio_codec": preset.audio_codec,
}
except TranscodePreset.DoesNotExist:
raise Exception("Preset not found")
if not preset and not input.trim_start and not input.trim_end:
raise Exception("Must specify preset_id or trim_start/trim_end")
output_filename = input.output_filename
if not output_filename:
stem = Path(source.filename).stem
ext = preset_snapshot.get("container", "mp4") if preset else "mp4"
output_filename = f"{stem}_output.{ext}"
job = TranscodeJob.objects.create(
source_asset_id=source.id,
preset_id=preset.id if preset else None,
preset_snapshot=preset_snapshot,
trim_start=input.trim_start,
trim_end=input.trim_end,
output_filename=output_filename,
output_path=output_filename,
priority=input.priority or 0,
)
# Dispatch
executor_mode = os.environ.get("MPR_EXECUTOR", "local")
if executor_mode == "lambda":
from task.executor import get_executor
get_executor().run(
job_id=str(job.id),
source_path=source.file_path,
output_path=output_filename,
preset=preset_snapshot or None,
trim_start=input.trim_start,
trim_end=input.trim_end,
duration=source.duration,
)
else:
from task.tasks import run_transcode_job
result = run_transcode_job.delay(
job_id=str(job.id),
source_key=source.file_path,
output_key=output_filename,
preset=preset_snapshot or None,
trim_start=input.trim_start,
trim_end=input.trim_end,
duration=source.duration,
)
job.celery_task_id = result.id
job.save(update_fields=["celery_task_id"])
return job
class CancelJob(graphene.Mutation):
class Arguments:
id = graphene.UUID(required=True)
Output = TranscodeJobType
def mutate(self, info, id):
from mpr.media_assets.models import TranscodeJob
try:
job = TranscodeJob.objects.get(id=id)
except TranscodeJob.DoesNotExist:
raise Exception("Job not found")
if job.status not in ("pending", "processing"):
raise Exception(f"Cannot cancel job with status: {job.status}")
job.status = "cancelled"
job.save(update_fields=["status"])
return job
class Mutation(graphene.ObjectType):
scan_media_folder = ScanMediaFolder.Field()
create_job = CreateJob.Field()
cancel_job = CancelJob.Field()
# ---------------------------------------------------------------------------
# Schema
# ---------------------------------------------------------------------------
schema = graphene.Schema(query=Query, mutation=Mutation)

View File

@@ -1,61 +0,0 @@
"""
MPR FastAPI Application
Main entry point for the REST API.
"""
import os
import sys
# Add project root to path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Initialize Django before importing models
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mpr.settings")
import django
django.setup()
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from api.graphql import schema as graphql_schema
from api.routes import assets_router, jobs_router, presets_router, system_router
from starlette_graphene3 import GraphQLApp, make_graphiql_handler
app = FastAPI(
title="MPR API",
description="Media Processor REST API",
version="0.1.0",
docs_url="/docs",
redoc_url="/redoc",
)
# CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["http://mpr.local.ar", "http://localhost:5173"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Routes - all under /api prefix
app.include_router(system_router, prefix="/api")
app.include_router(assets_router, prefix="/api")
app.include_router(presets_router, prefix="/api")
app.include_router(jobs_router, prefix="/api")
# GraphQL
app.mount("/graphql", GraphQLApp(schema=graphql_schema, on_get=make_graphiql_handler()))
@app.get("/")
def root():
"""API root."""
return {
"name": "MPR API",
"version": "0.1.0",
"docs": "/docs",
}

View File

@@ -1,8 +0,0 @@
"""API Routes."""
from .assets import router as assets_router
from .jobs import router as jobs_router
from .presets import router as presets_router
from .system import router as system_router
__all__ = ["assets_router", "jobs_router", "presets_router", "system_router"]

View File

@@ -1,117 +0,0 @@
"""
Asset endpoints - media file registration and metadata.
"""
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException, Query
from api.deps import get_asset
from api.schemas import AssetCreate, AssetResponse, AssetUpdate
from core.storage import BUCKET_IN, list_objects
router = APIRouter(prefix="/assets", tags=["assets"])
# Supported media extensions
VIDEO_EXTS = {".mp4", ".mkv", ".avi", ".mov", ".webm", ".flv", ".wmv", ".m4v"}
AUDIO_EXTS = {".mp3", ".wav", ".flac", ".aac", ".ogg", ".m4a"}
MEDIA_EXTS = VIDEO_EXTS | AUDIO_EXTS
@router.post("/", response_model=AssetResponse, status_code=201)
def create_asset(data: AssetCreate):
"""Register a media file as an asset."""
from mpr.media_assets.models import MediaAsset
asset = MediaAsset.objects.create(
filename=data.filename or data.file_path.split("/")[-1],
file_path=data.file_path,
file_size=data.file_size,
)
return asset
@router.get("/", response_model=list[AssetResponse])
def list_assets(
status: Optional[str] = Query(None, description="Filter by status"),
limit: int = Query(50, ge=1, le=100),
offset: int = Query(0, ge=0),
):
"""List assets with optional filtering."""
from mpr.media_assets.models import MediaAsset
qs = MediaAsset.objects.all()
if status:
qs = qs.filter(status=status)
return list(qs[offset : offset + limit])
@router.get("/{asset_id}", response_model=AssetResponse)
def get_asset_detail(asset_id: UUID, asset=Depends(get_asset)):
"""Get asset details."""
return asset
@router.patch("/{asset_id}", response_model=AssetResponse)
def update_asset(asset_id: UUID, data: AssetUpdate, asset=Depends(get_asset)):
"""Update asset metadata (comments, tags)."""
update_fields = []
if data.comments is not None:
asset.comments = data.comments
update_fields.append("comments")
if data.tags is not None:
asset.tags = data.tags
update_fields.append("tags")
if update_fields:
asset.save(update_fields=update_fields)
return asset
@router.delete("/{asset_id}", status_code=204)
def delete_asset(asset_id: UUID, asset=Depends(get_asset)):
"""Delete an asset."""
asset.delete()
@router.post("/scan", response_model=dict)
def scan_media_folder():
"""
Scan the S3 media-in bucket for new video/audio files and register them as assets.
"""
from mpr.media_assets.models import MediaAsset
# List objects from S3 bucket
objects = list_objects(BUCKET_IN, extensions=MEDIA_EXTS)
# Get existing filenames to avoid duplicates
existing_filenames = set(MediaAsset.objects.values_list("filename", flat=True))
registered_files = []
skipped_files = []
for obj in objects:
if obj["filename"] in existing_filenames:
skipped_files.append(obj["filename"])
continue
try:
MediaAsset.objects.create(
filename=obj["filename"],
file_path=obj["key"],
file_size=obj["size"],
)
registered_files.append(obj["filename"])
except Exception as e:
print(f"Error registering {obj['filename']}: {e}")
return {
"found": len(objects),
"registered": len(registered_files),
"skipped": len(skipped_files),
"files": registered_files,
}

View File

@@ -1,233 +0,0 @@
"""
Job endpoints - transcode/trim job management.
"""
import os
from pathlib import Path
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, Header, HTTPException, Query
from api.deps import get_asset, get_job, get_preset
from api.schemas import JobCreate, JobResponse
router = APIRouter(prefix="/jobs", tags=["jobs"])
CALLBACK_API_KEY = os.environ.get("CALLBACK_API_KEY", "")
@router.post("/", response_model=JobResponse, status_code=201)
def create_job(data: JobCreate):
"""
Create a transcode or trim job.
- With preset_id: Full transcode using preset settings
- Without preset_id but with trim_start/end: Trim only (stream copy)
"""
from mpr.media_assets.models import MediaAsset, TranscodeJob, TranscodePreset
# Get source asset
try:
source = MediaAsset.objects.get(id=data.source_asset_id)
except MediaAsset.DoesNotExist:
raise HTTPException(status_code=404, detail="Source asset not found")
# Get preset if specified
preset = None
preset_snapshot = {}
if data.preset_id:
try:
preset = TranscodePreset.objects.get(id=data.preset_id)
preset_snapshot = {
"name": preset.name,
"container": preset.container,
"video_codec": preset.video_codec,
"video_bitrate": preset.video_bitrate,
"video_crf": preset.video_crf,
"video_preset": preset.video_preset,
"resolution": preset.resolution,
"framerate": preset.framerate,
"audio_codec": preset.audio_codec,
"audio_bitrate": preset.audio_bitrate,
"audio_channels": preset.audio_channels,
"audio_samplerate": preset.audio_samplerate,
"extra_args": preset.extra_args,
}
except TranscodePreset.DoesNotExist:
raise HTTPException(status_code=404, detail="Preset not found")
# Validate trim-only job
if not preset and not data.trim_start and not data.trim_end:
raise HTTPException(
status_code=400, detail="Must specify preset_id or trim_start/trim_end"
)
# Generate output filename - stored as S3 key in output bucket
output_filename = data.output_filename
if not output_filename:
stem = Path(source.filename).stem
ext = preset_snapshot.get("container", "mp4") if preset else "mp4"
output_filename = f"{stem}_output.{ext}"
# Create job
job = TranscodeJob.objects.create(
source_asset_id=source.id,
preset_id=preset.id if preset else None,
preset_snapshot=preset_snapshot,
trim_start=data.trim_start,
trim_end=data.trim_end,
output_filename=output_filename,
output_path=output_filename, # S3 key in output bucket
priority=data.priority or 0,
)
# Dispatch based on executor mode
executor_mode = os.environ.get("MPR_EXECUTOR", "local")
if executor_mode == "lambda":
_dispatch_lambda(job, source, preset_snapshot)
else:
_dispatch_celery(job, source, preset_snapshot)
return job
def _dispatch_celery(job, source, preset_snapshot):
"""Dispatch job to Celery worker."""
from task.tasks import run_transcode_job
result = run_transcode_job.delay(
job_id=str(job.id),
source_key=source.file_path,
output_key=job.output_filename,
preset=preset_snapshot or None,
trim_start=job.trim_start,
trim_end=job.trim_end,
duration=source.duration,
)
job.celery_task_id = result.id
job.save(update_fields=["celery_task_id"])
def _dispatch_lambda(job, source, preset_snapshot):
"""Dispatch job to AWS Step Functions."""
from task.executor import get_executor
executor = get_executor()
executor.run(
job_id=str(job.id),
source_path=source.file_path,
output_path=job.output_filename,
preset=preset_snapshot or None,
trim_start=job.trim_start,
trim_end=job.trim_end,
duration=source.duration,
)
@router.post("/{job_id}/callback")
def job_callback(
job_id: UUID,
payload: dict,
x_api_key: Optional[str] = Header(None),
):
"""
Callback endpoint for Lambda to report job completion.
Protected by API key.
"""
if CALLBACK_API_KEY and x_api_key != CALLBACK_API_KEY:
raise HTTPException(status_code=403, detail="Invalid API key")
from django.utils import timezone
from mpr.media_assets.models import TranscodeJob
try:
job = TranscodeJob.objects.get(id=job_id)
except TranscodeJob.DoesNotExist:
raise HTTPException(status_code=404, detail="Job not found")
status = payload.get("status", "failed")
job.status = status
job.progress = 100.0 if status == "completed" else job.progress
update_fields = ["status", "progress"]
if payload.get("error"):
job.error_message = payload["error"]
update_fields.append("error_message")
if status == "completed":
job.completed_at = timezone.now()
update_fields.append("completed_at")
elif status == "failed":
job.completed_at = timezone.now()
update_fields.append("completed_at")
job.save(update_fields=update_fields)
return {"ok": True}
@router.get("/", response_model=list[JobResponse])
def list_jobs(
status: Optional[str] = Query(None, description="Filter by status"),
source_asset_id: Optional[UUID] = Query(None),
limit: int = Query(50, ge=1, le=100),
offset: int = Query(0, ge=0),
):
"""List jobs with optional filtering."""
from mpr.media_assets.models import TranscodeJob
qs = TranscodeJob.objects.all()
if status:
qs = qs.filter(status=status)
if source_asset_id:
qs = qs.filter(source_asset_id=source_asset_id)
return list(qs[offset : offset + limit])
@router.get("/{job_id}", response_model=JobResponse)
def get_job_detail(job_id: UUID, job=Depends(get_job)):
"""Get job details including progress."""
return job
@router.get("/{job_id}/progress")
def get_job_progress(job_id: UUID, job=Depends(get_job)):
"""Get real-time job progress."""
return {
"job_id": str(job.id),
"status": job.status,
"progress": job.progress,
"current_frame": job.current_frame,
"current_time": job.current_time,
"speed": job.speed,
}
@router.post("/{job_id}/cancel", response_model=JobResponse)
def cancel_job(job_id: UUID, job=Depends(get_job)):
"""Cancel a pending or processing job."""
if job.status not in ("pending", "processing"):
raise HTTPException(
status_code=400, detail=f"Cannot cancel job with status: {job.status}"
)
job.status = "cancelled"
job.save(update_fields=["status"])
return job
@router.post("/{job_id}/retry", response_model=JobResponse)
def retry_job(job_id: UUID, job=Depends(get_job)):
"""Retry a failed job."""
if job.status != "failed":
raise HTTPException(status_code=400, detail="Only failed jobs can be retried")
job.status = "pending"
job.progress = 0
job.error_message = None
job.save(update_fields=["status", "progress", "error_message"])
return job

View File

@@ -1,100 +0,0 @@
"""
Preset endpoints - transcode configuration templates.
"""
from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException
from api.deps import get_preset
from api.schemas import PresetCreate, PresetResponse, PresetUpdate
router = APIRouter(prefix="/presets", tags=["presets"])
@router.post("/", response_model=PresetResponse, status_code=201)
def create_preset(data: PresetCreate):
"""Create a custom preset."""
from mpr.media_assets.models import TranscodePreset
preset = TranscodePreset.objects.create(
name=data.name,
description=data.description or "",
container=data.container or "mp4",
video_codec=data.video_codec or "libx264",
video_bitrate=data.video_bitrate,
video_crf=data.video_crf,
video_preset=data.video_preset,
resolution=data.resolution,
framerate=data.framerate,
audio_codec=data.audio_codec or "aac",
audio_bitrate=data.audio_bitrate,
audio_channels=data.audio_channels,
audio_samplerate=data.audio_samplerate,
extra_args=data.extra_args or [],
is_builtin=False,
)
return preset
@router.get("/", response_model=list[PresetResponse])
def list_presets(include_builtin: bool = True):
"""List all presets."""
from mpr.media_assets.models import TranscodePreset
qs = TranscodePreset.objects.all()
if not include_builtin:
qs = qs.filter(is_builtin=False)
return list(qs)
@router.get("/{preset_id}", response_model=PresetResponse)
def get_preset_detail(preset_id: UUID, preset=Depends(get_preset)):
"""Get preset details."""
return preset
@router.patch("/{preset_id}", response_model=PresetResponse)
def update_preset(preset_id: UUID, data: PresetUpdate, preset=Depends(get_preset)):
"""Update a custom preset. Builtin presets cannot be modified."""
if preset.is_builtin:
raise HTTPException(status_code=403, detail="Cannot modify builtin preset")
update_fields = []
for field in [
"name",
"description",
"container",
"video_codec",
"video_bitrate",
"video_crf",
"video_preset",
"resolution",
"framerate",
"audio_codec",
"audio_bitrate",
"audio_channels",
"audio_samplerate",
"extra_args",
]:
value = getattr(data, field, None)
if value is not None:
setattr(preset, field, value)
update_fields.append(field)
if update_fields:
preset.save(update_fields=update_fields)
return preset
@router.delete("/{preset_id}", status_code=204)
def delete_preset(preset_id: UUID, preset=Depends(get_preset)):
"""Delete a custom preset. Builtin presets cannot be deleted."""
if preset.is_builtin:
raise HTTPException(status_code=403, detail="Cannot delete builtin preset")
preset.delete()

View File

@@ -1,51 +0,0 @@
"""
System endpoints - health checks and FFmpeg capabilities.
"""
from fastapi import APIRouter
from core.ffmpeg import get_decoders, get_encoders, get_formats
router = APIRouter(prefix="/system", tags=["system"])
@router.get("/health")
def health_check():
"""Health check endpoint."""
return {"status": "healthy"}
@router.get("/status")
def system_status():
"""System status for UI."""
return {"status": "ok", "version": "0.1.0"}
@router.get("/worker")
def worker_status():
"""Worker status from gRPC."""
try:
from rpc.client import get_client
client = get_client()
status = client.get_worker_status()
if status:
return status
return {"available": False, "error": "No response from worker"}
except Exception as e:
return {"available": False, "error": str(e)}
@router.get("/ffmpeg/codecs")
def ffmpeg_codecs():
"""Get available FFmpeg encoders and decoders."""
return {
"encoders": get_encoders(),
"decoders": get_decoders(),
}
@router.get("/ffmpeg/formats")
def ffmpeg_formats():
"""Get available FFmpeg muxers and demuxers."""
return get_formats()

View File

@@ -1,10 +0,0 @@
"""API Schemas - GENERATED FILE"""
from .base import BaseSchema
from .asset import AssetCreate, AssetUpdate, AssetResponse
from .asset import AssetStatus
from .preset import PresetCreate, PresetUpdate, PresetResponse
from .job import JobCreate, JobUpdate, JobResponse
from .job import JobStatus
__all__ = ["BaseSchema", "AssetCreate", "AssetUpdate", "AssetResponse", "AssetStatus", "PresetCreate", "PresetUpdate", "PresetResponse", "JobCreate", "JobUpdate", "JobResponse", "JobStatus"]

View File

@@ -1,69 +0,0 @@
"""MediaAsset Schemas - GENERATED FILE"""
from datetime import datetime
from typing import Any, Dict, List, Optional
from uuid import UUID
from .base import BaseSchema
from .models import AssetStatus
class AssetCreate(BaseSchema):
"""AssetCreate schema."""
filename: str
file_path: str
file_size: Optional[int] = None
duration: Optional[float] = None
video_codec: Optional[str] = None
audio_codec: Optional[str] = None
width: Optional[int] = None
height: Optional[int] = None
framerate: Optional[float] = None
bitrate: Optional[int] = None
properties: Dict[str, Any]
comments: str = ""
tags: List[str]
class AssetUpdate(BaseSchema):
"""AssetUpdate schema."""
filename: Optional[str] = None
file_path: Optional[str] = None
status: Optional[AssetStatus] = None
error_message: Optional[str] = None
file_size: Optional[int] = None
duration: Optional[float] = None
video_codec: Optional[str] = None
audio_codec: Optional[str] = None
width: Optional[int] = None
height: Optional[int] = None
framerate: Optional[float] = None
bitrate: Optional[int] = None
properties: Optional[Dict[str, Any]] = None
comments: Optional[str] = None
tags: Optional[List[str]] = None
class AssetResponse(BaseSchema):
"""AssetResponse schema."""
id: UUID
filename: str
file_path: str
status: AssetStatus = "AssetStatus.PENDING"
error_message: Optional[str] = None
file_size: Optional[int] = None
duration: Optional[float] = None
video_codec: Optional[str] = None
audio_codec: Optional[str] = None
width: Optional[int] = None
height: Optional[int] = None
framerate: Optional[float] = None
bitrate: Optional[int] = None
properties: Dict[str, Any]
comments: str = ""
tags: List[str]
created_at: Optional[datetime] = None
updated_at: Optional[datetime] = None

View File

@@ -1,8 +0,0 @@
"""Pydantic Base Schema - GENERATED FILE"""
from pydantic import BaseModel, ConfigDict
class BaseSchema(BaseModel):
"""Base schema with ORM mode."""
model_config = ConfigDict(from_attributes=True)

View File

@@ -1,129 +0,0 @@
"""
Graphene Types - GENERATED FILE
Do not edit directly. Regenerate using modelgen.
"""
import graphene
class AssetStatus(graphene.Enum):
PENDING = "pending"
READY = "ready"
ERROR = "error"
class JobStatus(graphene.Enum):
PENDING = "pending"
PROCESSING = "processing"
COMPLETED = "completed"
FAILED = "failed"
CANCELLED = "cancelled"
class MediaAssetType(graphene.ObjectType):
"""A video/audio file registered in the system."""
id = graphene.UUID()
filename = graphene.String()
file_path = graphene.String()
status = graphene.String()
error_message = graphene.String()
file_size = graphene.Int()
duration = graphene.Float()
video_codec = graphene.String()
audio_codec = graphene.String()
width = graphene.Int()
height = graphene.Int()
framerate = graphene.Float()
bitrate = graphene.Int()
properties = graphene.JSONString()
comments = graphene.String()
tags = graphene.List(graphene.String)
created_at = graphene.DateTime()
updated_at = graphene.DateTime()
class TranscodePresetType(graphene.ObjectType):
"""A reusable transcoding configuration (like Handbrake presets)."""
id = graphene.UUID()
name = graphene.String()
description = graphene.String()
is_builtin = graphene.Boolean()
container = graphene.String()
video_codec = graphene.String()
video_bitrate = graphene.String()
video_crf = graphene.Int()
video_preset = graphene.String()
resolution = graphene.String()
framerate = graphene.Float()
audio_codec = graphene.String()
audio_bitrate = graphene.String()
audio_channels = graphene.Int()
audio_samplerate = graphene.Int()
extra_args = graphene.List(graphene.String)
created_at = graphene.DateTime()
updated_at = graphene.DateTime()
class TranscodeJobType(graphene.ObjectType):
"""A transcoding or trimming job in the queue."""
id = graphene.UUID()
source_asset_id = graphene.UUID()
preset_id = graphene.UUID()
preset_snapshot = graphene.JSONString()
trim_start = graphene.Float()
trim_end = graphene.Float()
output_filename = graphene.String()
output_path = graphene.String()
output_asset_id = graphene.UUID()
status = graphene.String()
progress = graphene.Float()
current_frame = graphene.Int()
current_time = graphene.Float()
speed = graphene.String()
error_message = graphene.String()
celery_task_id = graphene.String()
execution_arn = graphene.String()
priority = graphene.Int()
created_at = graphene.DateTime()
started_at = graphene.DateTime()
completed_at = graphene.DateTime()
class CreateJobInput(graphene.InputObjectType):
"""Request body for creating a transcode/trim job."""
source_asset_id = graphene.UUID(required=True)
preset_id = graphene.UUID()
trim_start = graphene.Float()
trim_end = graphene.Float()
output_filename = graphene.String()
priority = graphene.Int(default_value=0)
class SystemStatusType(graphene.ObjectType):
"""System status response."""
status = graphene.String()
version = graphene.String()
class ScanResultType(graphene.ObjectType):
"""Result of scanning the media input bucket."""
found = graphene.Int()
registered = graphene.Int()
skipped = graphene.Int()
files = graphene.List(graphene.String)
class WorkerStatusType(graphene.ObjectType):
"""Worker health and capabilities."""
available = graphene.Boolean()
active_jobs = graphene.Int()
supported_codecs = graphene.List(graphene.String)
gpu_available = graphene.Boolean()

View File

@@ -1,67 +0,0 @@
"""TranscodeJob Schemas - GENERATED FILE"""
from datetime import datetime
from typing import Any, Dict, List, Optional
from uuid import UUID
from .base import BaseSchema
from .models import JobStatus
class JobCreate(BaseSchema):
"""Client-facing job creation request."""
source_asset_id: UUID
preset_id: Optional[UUID] = None
trim_start: Optional[float] = None
trim_end: Optional[float] = None
output_filename: Optional[str] = None
priority: int = 0
class JobUpdate(BaseSchema):
"""JobUpdate schema."""
source_asset_id: Optional[UUID] = None
preset_id: Optional[UUID] = None
preset_snapshot: Optional[Dict[str, Any]] = None
trim_start: Optional[float] = None
trim_end: Optional[float] = None
output_filename: Optional[str] = None
output_path: Optional[str] = None
output_asset_id: Optional[UUID] = None
status: Optional[JobStatus] = None
progress: Optional[float] = None
current_frame: Optional[int] = None
current_time: Optional[float] = None
speed: Optional[str] = None
error_message: Optional[str] = None
celery_task_id: Optional[str] = None
priority: Optional[int] = None
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
class JobResponse(BaseSchema):
"""JobResponse schema."""
id: UUID
source_asset_id: UUID
preset_id: Optional[UUID] = None
preset_snapshot: Dict[str, Any]
trim_start: Optional[float] = None
trim_end: Optional[float] = None
output_filename: str = ""
output_path: Optional[str] = None
output_asset_id: Optional[UUID] = None
status: JobStatus = "JobStatus.PENDING"
progress: float = 0.0
current_frame: Optional[int] = None
current_time: Optional[float] = None
speed: Optional[str] = None
error_message: Optional[str] = None
celery_task_id: Optional[str] = None
priority: int = 0
created_at: Optional[datetime] = None
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None

View File

@@ -1,90 +0,0 @@
"""
Pydantic Models - GENERATED FILE
Do not edit directly. Regenerate using modelgen.
"""
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional
from uuid import UUID
from pydantic import BaseModel, Field
class AssetStatus(str, Enum):
PENDING = "pending"
READY = "ready"
ERROR = "error"
class JobStatus(str, Enum):
PENDING = "pending"
PROCESSING = "processing"
COMPLETED = "completed"
FAILED = "failed"
CANCELLED = "cancelled"
class MediaAsset(BaseModel):
"""A video/audio file registered in the system."""
id: UUID
filename: str
file_path: str
status: AssetStatus = "AssetStatus.PENDING"
error_message: Optional[str] = None
file_size: Optional[int] = None
duration: Optional[float] = None
video_codec: Optional[str] = None
audio_codec: Optional[str] = None
width: Optional[int] = None
height: Optional[int] = None
framerate: Optional[float] = None
bitrate: Optional[int] = None
properties: Dict[str, Any]
comments: str = ""
tags: List[str] = Field(default_factory=list)
created_at: Optional[datetime] = None
updated_at: Optional[datetime] = None
class TranscodePreset(BaseModel):
"""A reusable transcoding configuration (like Handbrake presets)."""
id: UUID
name: str
description: str = ""
is_builtin: bool = False
container: str = "mp4"
video_codec: str = "libx264"
video_bitrate: Optional[str] = None
video_crf: Optional[int] = None
video_preset: Optional[str] = None
resolution: Optional[str] = None
framerate: Optional[float] = None
audio_codec: str = "aac"
audio_bitrate: Optional[str] = None
audio_channels: Optional[int] = None
audio_samplerate: Optional[int] = None
extra_args: List[str] = Field(default_factory=list)
created_at: Optional[datetime] = None
updated_at: Optional[datetime] = None
class TranscodeJob(BaseModel):
"""A transcoding or trimming job in the queue."""
id: UUID
source_asset_id: UUID
preset_id: Optional[UUID] = None
preset_snapshot: Dict[str, Any]
trim_start: Optional[float] = None
trim_end: Optional[float] = None
output_filename: str = ""
output_path: Optional[str] = None
output_asset_id: Optional[UUID] = None
status: JobStatus = "JobStatus.PENDING"
progress: float = 0.0
current_frame: Optional[int] = None
current_time: Optional[float] = None
speed: Optional[str] = None
error_message: Optional[str] = None
celery_task_id: Optional[str] = None
execution_arn: Optional[str] = None
priority: int = 0
created_at: Optional[datetime] = None
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None

View File

@@ -1,66 +0,0 @@
"""TranscodePreset Schemas - GENERATED FILE"""
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional
from uuid import UUID
from .base import BaseSchema
class PresetCreate(BaseSchema):
"""PresetCreate schema."""
name: str
description: str = ""
is_builtin: bool = False
container: str = "mp4"
video_codec: str = "libx264"
video_bitrate: Optional[str] = None
video_crf: Optional[int] = None
video_preset: Optional[str] = None
resolution: Optional[str] = None
framerate: Optional[float] = None
audio_codec: str = "aac"
audio_bitrate: Optional[str] = None
audio_channels: Optional[int] = None
audio_samplerate: Optional[int] = None
extra_args: List[str]
class PresetUpdate(BaseSchema):
"""PresetUpdate schema."""
name: Optional[str] = None
description: Optional[str] = None
is_builtin: Optional[bool] = None
container: Optional[str] = None
video_codec: Optional[str] = None
video_bitrate: Optional[str] = None
video_crf: Optional[int] = None
video_preset: Optional[str] = None
resolution: Optional[str] = None
framerate: Optional[float] = None
audio_codec: Optional[str] = None
audio_bitrate: Optional[str] = None
audio_channels: Optional[int] = None
audio_samplerate: Optional[int] = None
extra_args: Optional[List[str]] = None
class PresetResponse(BaseSchema):
"""PresetResponse schema."""
id: UUID
name: str
description: str = ""
is_builtin: bool = False
container: str = "mp4"
video_codec: str = "libx264"
video_bitrate: Optional[str] = None
video_crf: Optional[int] = None
video_preset: Optional[str] = None
resolution: Optional[str] = None
framerate: Optional[float] = None
audio_codec: str = "aac"
audio_bitrate: Optional[str] = None
audio_channels: Optional[int] = None
audio_samplerate: Optional[int] = None
extra_args: List[str]
created_at: Optional[datetime] = None
updated_at: Optional[datetime] = None

273
core/api/graphql.py Normal file
View File

@@ -0,0 +1,273 @@
"""
GraphQL API using strawberry, served via FastAPI.
Primary API for MPR — all client interactions go through GraphQL.
Uses core.db for data access.
Types are generated from schema/ via modelgen — see api/schema/graphql.py.
"""
import os
from typing import List, Optional
from uuid import UUID
import strawberry
from strawberry.schema.config import StrawberryConfig
from strawberry.types import Info
from core.api.schema.graphql import (
CreateJobInput,
DeleteResultType,
MediaAssetType,
ScanResultType,
SystemStatusType,
TranscodeJobType,
TranscodePresetType,
UpdateAssetInput,
)
from core.storage import BUCKET_IN, list_objects
VIDEO_EXTS = {".mp4", ".mkv", ".avi", ".mov", ".webm", ".flv", ".wmv", ".m4v"}
AUDIO_EXTS = {".mp3", ".wav", ".flac", ".aac", ".ogg", ".m4a"}
MEDIA_EXTS = VIDEO_EXTS | AUDIO_EXTS
# ---------------------------------------------------------------------------
# Queries
# ---------------------------------------------------------------------------
@strawberry.type
class Query:
@strawberry.field
def assets(
self,
info: Info,
status: Optional[str] = None,
search: Optional[str] = None,
) -> List[MediaAssetType]:
from core.db import list_assets
return list_assets(status=status, search=search)
@strawberry.field
def asset(self, info: Info, id: UUID) -> Optional[MediaAssetType]:
from core.db import get_asset
try:
return get_asset(id)
except Exception:
return None
@strawberry.field
def jobs(
self,
info: Info,
status: Optional[str] = None,
source_asset_id: Optional[UUID] = None,
) -> List[TranscodeJobType]:
from core.db import list_jobs
return list_jobs(status=status, source_asset_id=source_asset_id)
@strawberry.field
def job(self, info: Info, id: UUID) -> Optional[TranscodeJobType]:
from core.db import get_job
try:
return get_job(id)
except Exception:
return None
@strawberry.field
def presets(self, info: Info) -> List[TranscodePresetType]:
from core.db import list_presets
return list_presets()
@strawberry.field
def system_status(self, info: Info) -> SystemStatusType:
return SystemStatusType(status="ok", version="0.1.0")
# ---------------------------------------------------------------------------
# Mutations
# ---------------------------------------------------------------------------
@strawberry.type
class Mutation:
@strawberry.mutation
def scan_media_folder(self, info: Info) -> ScanResultType:
from core.db import create_asset, get_asset_filenames
objects = list_objects(BUCKET_IN, extensions=MEDIA_EXTS)
existing = get_asset_filenames()
registered = []
skipped = []
for obj in objects:
if obj["filename"] in existing:
skipped.append(obj["filename"])
continue
try:
create_asset(
filename=obj["filename"],
file_path=obj["key"],
file_size=obj["size"],
)
registered.append(obj["filename"])
except Exception:
pass
return ScanResultType(
found=len(objects),
registered=len(registered),
skipped=len(skipped),
files=registered,
)
@strawberry.mutation
def create_job(self, info: Info, input: CreateJobInput) -> TranscodeJobType:
from pathlib import Path
from core.db import create_job, get_asset, get_preset
try:
source = get_asset(input.source_asset_id)
except Exception:
raise Exception("Source asset not found")
preset = None
preset_snapshot = {}
if input.preset_id:
try:
preset = get_preset(input.preset_id)
preset_snapshot = {
"name": preset.name,
"container": preset.container,
"video_codec": preset.video_codec,
"audio_codec": preset.audio_codec,
}
except Exception:
raise Exception("Preset not found")
if not preset and not input.trim_start and not input.trim_end:
raise Exception("Must specify preset_id or trim_start/trim_end")
output_filename = input.output_filename
if not output_filename:
stem = Path(source.filename).stem
ext = preset_snapshot.get("container", "mp4") if preset else "mp4"
output_filename = f"{stem}_output.{ext}"
job = create_job(
source_asset_id=source.id,
preset_id=preset.id if preset else None,
preset_snapshot=preset_snapshot,
trim_start=input.trim_start,
trim_end=input.trim_end,
output_filename=output_filename,
output_path=output_filename,
priority=input.priority or 0,
)
executor_mode = os.environ.get("MPR_EXECUTOR", "local")
if executor_mode in ("lambda", "gcp"):
from core.task.executor import get_executor
get_executor().run(
job_id=str(job.id),
source_path=source.file_path,
output_path=output_filename,
preset=preset_snapshot or None,
trim_start=input.trim_start,
trim_end=input.trim_end,
duration=source.duration,
)
else:
from core.task.tasks import run_transcode_job
result = run_transcode_job.delay(
job_id=str(job.id),
source_key=source.file_path,
output_key=output_filename,
preset=preset_snapshot or None,
trim_start=input.trim_start,
trim_end=input.trim_end,
duration=source.duration,
)
job.celery_task_id = result.id
job.save(update_fields=["celery_task_id"])
return job
@strawberry.mutation
def cancel_job(self, info: Info, id: UUID) -> TranscodeJobType:
from core.db import get_job, update_job
try:
job = get_job(id)
except Exception:
raise Exception("Job not found")
if job.status not in ("pending", "processing"):
raise Exception(f"Cannot cancel job with status: {job.status}")
return update_job(job, status="cancelled")
@strawberry.mutation
def retry_job(self, info: Info, id: UUID) -> TranscodeJobType:
from core.db import get_job, update_job
try:
job = get_job(id)
except Exception:
raise Exception("Job not found")
if job.status != "failed":
raise Exception("Only failed jobs can be retried")
return update_job(job, status="pending", progress=0, error_message=None)
@strawberry.mutation
def update_asset(self, info: Info, id: UUID, input: UpdateAssetInput) -> MediaAssetType:
from core.db import get_asset, update_asset
try:
asset = get_asset(id)
except Exception:
raise Exception("Asset not found")
fields = {}
if input.comments is not None:
fields["comments"] = input.comments
if input.tags is not None:
fields["tags"] = input.tags
if fields:
asset = update_asset(asset, **fields)
return asset
@strawberry.mutation
def delete_asset(self, info: Info, id: UUID) -> DeleteResultType:
from core.db import delete_asset, get_asset
try:
asset = get_asset(id)
delete_asset(asset)
return DeleteResultType(ok=True)
except Exception:
raise Exception("Asset not found")
# ---------------------------------------------------------------------------
# Schema
# ---------------------------------------------------------------------------
schema = strawberry.Schema(
query=Query,
mutation=Mutation,
config=StrawberryConfig(auto_camel_case=False),
)

98
core/api/main.py Normal file
View File

@@ -0,0 +1,98 @@
"""
MPR FastAPI Application
Serves GraphQL API and Lambda callback endpoint.
"""
import os
import sys
from typing import Optional
from uuid import UUID
# Add project root to path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
# Initialize Django before importing models
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "admin.mpr.settings")
import django
django.setup()
from fastapi import FastAPI, Header, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from strawberry.fastapi import GraphQLRouter
from core.api.graphql import schema as graphql_schema
CALLBACK_API_KEY = os.environ.get("CALLBACK_API_KEY", "")
app = FastAPI(
title="MPR API",
description="Media Processor — GraphQL API",
version="0.1.0",
docs_url="/docs",
redoc_url="/redoc",
)
# CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["http://mpr.local.ar", "http://localhost:5173"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# GraphQL
graphql_router = GraphQLRouter(schema=graphql_schema, graphql_ide="graphiql")
app.include_router(graphql_router, prefix="/graphql")
@app.get("/")
def root():
"""API root."""
return {
"name": "MPR API",
"version": "0.1.0",
"graphql": "/graphql",
}
@app.post("/api/jobs/{job_id}/callback")
def job_callback(
job_id: UUID,
payload: dict,
x_api_key: Optional[str] = Header(None),
):
"""
Callback endpoint for Lambda to report job completion.
Protected by API key.
"""
if CALLBACK_API_KEY and x_api_key != CALLBACK_API_KEY:
raise HTTPException(status_code=403, detail="Invalid API key")
from django.utils import timezone
from core.db import get_job, update_job
try:
job = get_job(job_id)
except Exception:
raise HTTPException(status_code=404, detail="Job not found")
status = payload.get("status", "failed")
fields = {
"status": status,
"progress": 100.0 if status == "completed" else job.progress,
}
if payload.get("error"):
fields["error_message"] = payload["error"]
if status in ("completed", "failed"):
fields["completed_at"] = timezone.now()
update_job(job, **fields)
return {"ok": True}

158
core/api/schema/graphql.py Normal file
View File

@@ -0,0 +1,158 @@
"""
Strawberry Types - GENERATED FILE
Do not edit directly. Regenerate using modelgen.
"""
import strawberry
from enum import Enum
from typing import List, Optional
from uuid import UUID
from datetime import datetime
from strawberry.scalars import JSON
@strawberry.enum
class AssetStatus(Enum):
PENDING = "pending"
READY = "ready"
ERROR = "error"
@strawberry.enum
class JobStatus(Enum):
PENDING = "pending"
PROCESSING = "processing"
COMPLETED = "completed"
FAILED = "failed"
CANCELLED = "cancelled"
@strawberry.type
class MediaAssetType:
"""A video/audio file registered in the system."""
id: Optional[UUID] = None
filename: Optional[str] = None
file_path: Optional[str] = None
status: Optional[str] = None
error_message: Optional[str] = None
file_size: Optional[int] = None
duration: Optional[float] = None
video_codec: Optional[str] = None
audio_codec: Optional[str] = None
width: Optional[int] = None
height: Optional[int] = None
framerate: Optional[float] = None
bitrate: Optional[int] = None
properties: Optional[JSON] = None
comments: Optional[str] = None
tags: Optional[List[str]] = None
created_at: Optional[datetime] = None
updated_at: Optional[datetime] = None
@strawberry.type
class TranscodePresetType:
"""A reusable transcoding configuration (like Handbrake presets)."""
id: Optional[UUID] = None
name: Optional[str] = None
description: Optional[str] = None
is_builtin: Optional[bool] = None
container: Optional[str] = None
video_codec: Optional[str] = None
video_bitrate: Optional[str] = None
video_crf: Optional[int] = None
video_preset: Optional[str] = None
resolution: Optional[str] = None
framerate: Optional[float] = None
audio_codec: Optional[str] = None
audio_bitrate: Optional[str] = None
audio_channels: Optional[int] = None
audio_samplerate: Optional[int] = None
extra_args: Optional[List[str]] = None
created_at: Optional[datetime] = None
updated_at: Optional[datetime] = None
@strawberry.type
class TranscodeJobType:
"""A transcoding or trimming job in the queue."""
id: Optional[UUID] = None
source_asset_id: Optional[UUID] = None
preset_id: Optional[UUID] = None
preset_snapshot: Optional[JSON] = None
trim_start: Optional[float] = None
trim_end: Optional[float] = None
output_filename: Optional[str] = None
output_path: Optional[str] = None
output_asset_id: Optional[UUID] = None
status: Optional[str] = None
progress: Optional[float] = None
current_frame: Optional[int] = None
current_time: Optional[float] = None
speed: Optional[str] = None
error_message: Optional[str] = None
celery_task_id: Optional[str] = None
execution_arn: Optional[str] = None
priority: Optional[int] = None
created_at: Optional[datetime] = None
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
@strawberry.input
class CreateJobInput:
"""Request body for creating a transcode/trim job."""
source_asset_id: UUID
preset_id: Optional[UUID] = None
trim_start: Optional[float] = None
trim_end: Optional[float] = None
output_filename: Optional[str] = None
priority: int = 0
@strawberry.input
class UpdateAssetInput:
"""Request body for updating asset metadata."""
comments: Optional[str] = None
tags: Optional[List[str]] = None
@strawberry.type
class SystemStatusType:
"""System status response."""
status: Optional[str] = None
version: Optional[str] = None
@strawberry.type
class ScanResultType:
"""Result of scanning the media input bucket."""
found: Optional[int] = None
registered: Optional[int] = None
skipped: Optional[int] = None
files: Optional[List[str]] = None
@strawberry.type
class DeleteResultType:
"""Result of a delete operation."""
ok: Optional[bool] = None
@strawberry.type
class WorkerStatusType:
"""Worker health and capabilities."""
available: Optional[bool] = None
active_jobs: Optional[int] = None
supported_codecs: Optional[List[str]] = None
gpu_available: Optional[bool] = None

19
core/db/__init__.py Normal file
View File

@@ -0,0 +1,19 @@
from .assets import (
create_asset,
delete_asset,
get_asset,
get_asset_filenames,
list_assets,
update_asset,
)
from .jobs import (
create_job,
get_job,
list_jobs,
update_job,
update_job_fields,
)
from .presets import (
get_preset,
list_presets,
)

48
core/db/assets.py Normal file
View File

@@ -0,0 +1,48 @@
"""Database operations for MediaAsset."""
from typing import Optional
from uuid import UUID
def list_assets(status: Optional[str] = None, search: Optional[str] = None):
from admin.mpr.media_assets.models import MediaAsset
qs = MediaAsset.objects.all()
if status:
qs = qs.filter(status=status)
if search:
qs = qs.filter(filename__icontains=search)
return list(qs)
def get_asset(id: UUID):
from admin.mpr.media_assets.models import MediaAsset
return MediaAsset.objects.get(id=id)
def get_asset_filenames() -> set[str]:
from admin.mpr.media_assets.models import MediaAsset
return set(MediaAsset.objects.values_list("filename", flat=True))
def create_asset(*, filename: str, file_path: str, file_size: int):
from admin.mpr.media_assets.models import MediaAsset
return MediaAsset.objects.create(
filename=filename,
file_path=file_path,
file_size=file_size,
)
def update_asset(asset, **fields):
for key, value in fields.items():
setattr(asset, key, value)
asset.save(update_fields=list(fields.keys()))
return asset
def delete_asset(asset):
asset.delete()

40
core/db/jobs.py Normal file
View File

@@ -0,0 +1,40 @@
"""Database operations for TranscodeJob."""
from typing import Optional
from uuid import UUID
def list_jobs(status: Optional[str] = None, source_asset_id: Optional[UUID] = None):
from admin.mpr.media_assets.models import TranscodeJob
qs = TranscodeJob.objects.all()
if status:
qs = qs.filter(status=status)
if source_asset_id:
qs = qs.filter(source_asset_id=source_asset_id)
return list(qs)
def get_job(id: UUID):
from admin.mpr.media_assets.models import TranscodeJob
return TranscodeJob.objects.get(id=id)
def create_job(**fields):
from admin.mpr.media_assets.models import TranscodeJob
return TranscodeJob.objects.create(**fields)
def update_job(job, **fields):
for key, value in fields.items():
setattr(job, key, value)
job.save(update_fields=list(fields.keys()))
return job
def update_job_fields(job_id, **fields):
from admin.mpr.media_assets.models import TranscodeJob
TranscodeJob.objects.filter(id=job_id).update(**fields)

15
core/db/presets.py Normal file
View File

@@ -0,0 +1,15 @@
"""Database operations for TranscodePreset."""
from uuid import UUID
def list_presets():
from admin.mpr.media_assets.models import TranscodePreset
return list(TranscodePreset.objects.all())
def get_preset(id: UUID):
from admin.mpr.media_assets.models import TranscodePreset
return TranscodePreset.objects.get(id=id)

View File

@@ -59,7 +59,7 @@ class WorkerServicer(worker_pb2_grpc.WorkerServiceServicer):
# Dispatch to Celery if available # Dispatch to Celery if available
if self.celery_app: if self.celery_app:
from task.tasks import run_transcode_job from core.task.tasks import run_transcode_job
task = run_transcode_job.delay( task = run_transcode_job.delay(
job_id=job_id, job_id=job_id,
@@ -219,9 +219,8 @@ def update_job_progress(
try: try:
from django.utils import timezone from django.utils import timezone
from mpr.media_assets.models import TranscodeJob from core.db import update_job_fields
update_fields = ["progress", "current_frame", "current_time", "speed", "status"]
updates = { updates = {
"progress": progress, "progress": progress,
"current_frame": current_frame, "current_frame": current_frame,
@@ -232,16 +231,13 @@ def update_job_progress(
if error: if error:
updates["error_message"] = error updates["error_message"] = error
update_fields.append("error_message")
if status == "processing": if status == "processing":
updates["started_at"] = timezone.now() updates["started_at"] = timezone.now()
update_fields.append("started_at")
elif status in ("completed", "failed"): elif status in ("completed", "failed"):
updates["completed_at"] = timezone.now() updates["completed_at"] = timezone.now()
update_fields.append("completed_at")
TranscodeJob.objects.filter(id=job_id).update(**updates) update_job_fields(job_id, **updates)
except Exception as e: except Exception as e:
logger.warning(f"Failed to update job {job_id} in DB: {e}") logger.warning(f"Failed to update job {job_id} in DB: {e}")

View File

@@ -4,7 +4,7 @@ MPR Schema Definitions - Source of Truth
This package defines the core data models as Python dataclasses. This package defines the core data models as Python dataclasses.
These definitions are used to generate: These definitions are used to generate:
- Django ORM models (mpr/media_assets/models.py) - Django ORM models (mpr/media_assets/models.py)
- Pydantic schemas (api/schemas/*.py) - Pydantic schemas (api/schema/*.py)
- TypeScript types (ui/timeline/src/types.ts) - TypeScript types (ui/timeline/src/types.ts)
- Protobuf definitions (grpc/protos/worker.proto) - Protobuf definitions (grpc/protos/worker.proto)

25
core/schema/modelgen.json Normal file
View File

@@ -0,0 +1,25 @@
{
"schema": "core/schema/models",
"targets": [
{
"target": "django",
"output": "admin/mpr/media_assets/models.py",
"include": ["dataclasses", "enums"]
},
{
"target": "graphene",
"output": "core/api/schema/graphql.py",
"include": ["dataclasses", "enums", "api"]
},
{
"target": "typescript",
"output": "ui/timeline/src/types.ts",
"include": ["dataclasses", "enums", "api"]
},
{
"target": "protobuf",
"output": "core/rpc/protos/worker.proto",
"include": ["grpc"]
}
]
}

View File

@@ -5,7 +5,13 @@ This module exports all dataclasses, enums, and constants that the generator
should process. Add new models here to have them included in generation. should process. Add new models here to have them included in generation.
""" """
from .api import CreateJobRequest, ScanResult, SystemStatus from .api import (
CreateJobRequest,
DeleteResult,
ScanResult,
SystemStatus,
UpdateAssetRequest,
)
from .grpc import ( from .grpc import (
GRPC_SERVICE, GRPC_SERVICE,
CancelRequest, CancelRequest,
@@ -26,7 +32,14 @@ DATACLASSES = [MediaAsset, TranscodePreset, TranscodeJob]
# API request/response models - generates TypeScript only (no Django) # API request/response models - generates TypeScript only (no Django)
# WorkerStatus from grpc.py is reused here # WorkerStatus from grpc.py is reused here
API_MODELS = [CreateJobRequest, SystemStatus, ScanResult, WorkerStatus] API_MODELS = [
CreateJobRequest,
UpdateAssetRequest,
SystemStatus,
ScanResult,
DeleteResult,
WorkerStatus,
]
# Status enums - included in generated code # Status enums - included in generated code
ENUMS = [AssetStatus, JobStatus] ENUMS = [AssetStatus, JobStatus]
@@ -50,6 +63,8 @@ __all__ = [
"TranscodeJob", "TranscodeJob",
# API Models # API Models
"CreateJobRequest", "CreateJobRequest",
"UpdateAssetRequest",
"DeleteResult",
"ScanResult", "ScanResult",
"SystemStatus", "SystemStatus",
# Enums # Enums

View File

@@ -40,4 +40,19 @@ class ScanResult:
files: List[str] = field(default_factory=list) files: List[str] = field(default_factory=list)
@dataclass
class UpdateAssetRequest:
"""Request body for updating asset metadata."""
comments: Optional[str] = None
tags: Optional[List[str]] = None
@dataclass
class DeleteResult:
"""Result of a delete operation."""
ok: bool = False
# Note: WorkerStatus is defined in grpc.py and reused here # Note: WorkerStatus is defined in grpc.py and reused here

10
core/storage/__init__.py Normal file
View File

@@ -0,0 +1,10 @@
from .s3 import (
BUCKET_IN,
BUCKET_OUT,
download_file,
download_to_temp,
get_presigned_url,
get_s3_client,
list_objects,
upload_file,
)

1
core/storage/gcp.py Normal file
View File

@@ -0,0 +1 @@
"""GCP Cloud Storage backend (placeholder)."""

1
core/storage/local.py Normal file
View File

@@ -0,0 +1 @@
"""Local filesystem storage backend (placeholder)."""

View File

@@ -156,8 +156,81 @@ class LambdaExecutor(Executor):
# Store execution ARN on the job # Store execution ARN on the job
execution_arn = response["executionArn"] execution_arn = response["executionArn"]
try: try:
from mpr.media_assets.models import TranscodeJob from core.db import update_job_fields
TranscodeJob.objects.filter(id=job_id).update(execution_arn=execution_arn) update_job_fields(job_id, execution_arn=execution_arn)
except Exception:
pass
return True
class GCPExecutor(Executor):
"""Execute jobs via Google Cloud Run Jobs."""
def __init__(self):
from google.cloud import run_v2
self.client = run_v2.JobsClient()
self.project_id = os.environ["GCP_PROJECT_ID"]
self.region = os.environ.get("GCP_REGION", "us-central1")
self.job_name = os.environ["CLOUD_RUN_JOB"]
self.callback_url = os.environ.get("CALLBACK_URL", "")
self.callback_api_key = os.environ.get("CALLBACK_API_KEY", "")
def run(
self,
job_id: str,
source_path: str,
output_path: str,
preset: Optional[Dict[str, Any]] = None,
trim_start: Optional[float] = None,
trim_end: Optional[float] = None,
duration: Optional[float] = None,
progress_callback: Optional[Callable[[int, Dict[str, Any]], None]] = None,
) -> bool:
"""Trigger a Cloud Run Job execution for this job."""
import json
from google.cloud import run_v2
payload = {
"job_id": job_id,
"source_key": source_path,
"output_key": output_path,
"preset": preset,
"trim_start": trim_start,
"trim_end": trim_end,
"duration": duration,
"callback_url": self.callback_url,
"api_key": self.callback_api_key,
}
job_path = (
f"projects/{self.project_id}/locations/{self.region}/jobs/{self.job_name}"
)
request = run_v2.RunJobRequest(
name=job_path,
overrides=run_v2.RunJobRequest.Overrides(
container_overrides=[
run_v2.RunJobRequest.Overrides.ContainerOverride(
env=[
run_v2.EnvVar(
name="MPR_JOB_PAYLOAD", value=json.dumps(payload)
)
]
)
]
),
)
operation = self.client.run_job(request=request)
execution_name = operation.metadata.name
try:
from core.db import update_job_fields
update_job_fields(job_id, execution_arn=execution_name)
except Exception: except Exception:
pass pass
@@ -168,6 +241,7 @@ class LambdaExecutor(Executor):
_executors: Dict[str, type] = { _executors: Dict[str, type] = {
"local": LocalExecutor, "local": LocalExecutor,
"lambda": LambdaExecutor, "lambda": LambdaExecutor,
"gcp": GCPExecutor,
} }
_executor_instance: Optional[Executor] = None _executor_instance: Optional[Executor] = None

121
core/task/gcp_handler.py Normal file
View File

@@ -0,0 +1,121 @@
"""
Google Cloud Run Job handler for media transcoding.
Reads job payload from the MPR_JOB_PAYLOAD env var (injected by GCPExecutor),
downloads source from S3-compatible storage (GCS via HMAC + S3 API),
runs FFmpeg, uploads result, and calls back to the API.
Uses core/storage and core/ffmpeg — same modules as the Celery worker.
No cloud-provider SDK required here; storage goes through core.storage (boto3 + S3 compat).
Entry point: python -m task.gcp_handler (set as Cloud Run Job command)
"""
import json
import logging
import os
import sys
import tempfile
from pathlib import Path
import requests
from core.ffmpeg.transcode import TranscodeConfig, transcode
from core.storage import BUCKET_IN, BUCKET_OUT, download_to_temp, upload_file
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main() -> None:
raw = os.environ.get("MPR_JOB_PAYLOAD")
if not raw:
logger.error("MPR_JOB_PAYLOAD not set")
sys.exit(1)
event = json.loads(raw)
job_id = event["job_id"]
source_key = event["source_key"]
output_key = event["output_key"]
preset = event.get("preset")
trim_start = event.get("trim_start")
trim_end = event.get("trim_end")
duration = event.get("duration")
callback_url = event.get("callback_url", "")
api_key = event.get("api_key", "")
logger.info(f"Starting job {job_id}: {source_key} -> {output_key}")
tmp_source = download_to_temp(BUCKET_IN, source_key)
ext_out = Path(output_key).suffix or ".mp4"
fd, tmp_output = tempfile.mkstemp(suffix=ext_out)
os.close(fd)
try:
if preset:
config = TranscodeConfig(
input_path=tmp_source,
output_path=tmp_output,
video_codec=preset.get("video_codec", "libx264"),
video_bitrate=preset.get("video_bitrate"),
video_crf=preset.get("video_crf"),
video_preset=preset.get("video_preset"),
resolution=preset.get("resolution"),
framerate=preset.get("framerate"),
audio_codec=preset.get("audio_codec", "aac"),
audio_bitrate=preset.get("audio_bitrate"),
audio_channels=preset.get("audio_channels"),
audio_samplerate=preset.get("audio_samplerate"),
container=preset.get("container", "mp4"),
extra_args=preset.get("extra_args", []),
trim_start=trim_start,
trim_end=trim_end,
)
else:
config = TranscodeConfig(
input_path=tmp_source,
output_path=tmp_output,
video_codec="copy",
audio_codec="copy",
trim_start=trim_start,
trim_end=trim_end,
)
success = transcode(config, duration=duration)
if not success:
raise RuntimeError("Transcode returned False")
logger.info(f"Uploading to {BUCKET_OUT}/{output_key}")
upload_file(tmp_output, BUCKET_OUT, output_key)
_callback(callback_url, job_id, api_key, {"status": "completed"})
logger.info(f"Job {job_id} completed")
sys.exit(0)
except Exception as e:
logger.exception(f"Job {job_id} failed: {e}")
_callback(callback_url, job_id, api_key, {"status": "failed", "error": str(e)})
sys.exit(1)
finally:
for f in [tmp_source, tmp_output]:
try:
os.unlink(f)
except OSError:
pass
def _callback(callback_url: str, job_id: str, api_key: str, payload: dict) -> None:
if not callback_url:
return
try:
url = f"{callback_url}/jobs/{job_id}/callback"
headers = {"X-API-Key": api_key} if api_key else {}
resp = requests.post(url, json=payload, headers=headers, timeout=10)
logger.info(f"Callback response: {resp.status_code}")
except Exception as e:
logger.warning(f"Callback failed: {e}")
if __name__ == "__main__":
main()

View File

@@ -9,8 +9,8 @@ from typing import Any, Dict, Optional
from celery import shared_task from celery import shared_task
from core.storage import BUCKET_IN, BUCKET_OUT, download_to_temp, upload_file from core.storage import BUCKET_IN, BUCKET_OUT, download_to_temp, upload_file
from rpc.server import update_job_progress from core.rpc.server import update_job_progress
from task.executor import get_executor from core.task.executor import get_executor
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@@ -16,7 +16,7 @@ REDIS_URL=redis://redis:6379/0
# Django # Django
DEBUG=1 DEBUG=1
DJANGO_SETTINGS_MODULE=mpr.settings DJANGO_SETTINGS_MODULE=admin.mpr.settings
SECRET_KEY=change-this-in-production SECRET_KEY=change-this-in-production
# Worker # Worker

View File

@@ -1,14 +1,10 @@
FROM python:3.11-slim FROM python:3.11-slim
RUN apt-get update && apt-get install -y \
ffmpeg \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app WORKDIR /app
COPY requirements.txt . COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt RUN pip install --no-cache-dir -r requirements.txt
COPY . . # No COPY . . — code is volume-mounted in dev (..:/app)
CMD ["python", "manage.py", "runserver", "0.0.0.0:8000"] CMD ["python", "admin/manage.py", "runserver", "0.0.0.0:8000"]

14
ctrl/Dockerfile.worker Normal file
View File

@@ -0,0 +1,14 @@
FROM python:3.11-slim
RUN apt-get update && apt-get install -y \
ffmpeg \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY requirements.txt requirements-worker.txt ./
RUN pip install --no-cache-dir -r requirements-worker.txt
# No COPY . . — code is volume-mounted in dev (..:/app)
CMD ["celery", "-A", "admin.mpr", "worker", "--loglevel=info"]

View File

@@ -1,7 +1,7 @@
x-common-env: &common-env x-common-env: &common-env
DATABASE_URL: postgresql://mpr_user:mpr_pass@postgres:5432/mpr DATABASE_URL: postgresql://mpr_user:mpr_pass@postgres:5432/mpr
REDIS_URL: redis://redis:6379/0 REDIS_URL: redis://redis:6379/0
DJANGO_SETTINGS_MODULE: mpr.settings DJANGO_SETTINGS_MODULE: admin.mpr.settings
DEBUG: 1 DEBUG: 1
GRPC_HOST: grpc GRPC_HOST: grpc
GRPC_PORT: 50051 GRPC_PORT: 50051
@@ -96,9 +96,9 @@ services:
context: .. context: ..
dockerfile: ctrl/Dockerfile dockerfile: ctrl/Dockerfile
command: > command: >
bash -c "python manage.py migrate && bash -c "python admin/manage.py migrate &&
python manage.py loadbuiltins || true && python admin/manage.py loadbuiltins || true &&
python manage.py runserver 0.0.0.0:8701" python admin/manage.py runserver 0.0.0.0:8701"
ports: ports:
- "8701:8701" - "8701:8701"
environment: environment:
@@ -115,11 +115,12 @@ services:
build: build:
context: .. context: ..
dockerfile: ctrl/Dockerfile dockerfile: ctrl/Dockerfile
command: uvicorn api.main:app --host 0.0.0.0 --port 8702 --reload command: uvicorn core.api.main:app --host 0.0.0.0 --port 8702 --reload
ports: ports:
- "8702:8702" - "8702:8702"
environment: environment:
<<: *common-env <<: *common-env
DJANGO_ALLOW_ASYNC_UNSAFE: "true"
volumes: volumes:
- ..:/app - ..:/app
depends_on: depends_on:
@@ -132,7 +133,7 @@ services:
build: build:
context: .. context: ..
dockerfile: ctrl/Dockerfile dockerfile: ctrl/Dockerfile
command: python -m rpc.server command: python -m core.rpc.server
ports: ports:
- "50052:50051" - "50052:50051"
environment: environment:
@@ -150,8 +151,8 @@ services:
celery: celery:
build: build:
context: .. context: ..
dockerfile: ctrl/Dockerfile dockerfile: ctrl/Dockerfile.worker
command: celery -A mpr worker -l info -Q transcode -c 2 command: celery -A admin.mpr worker -l info -Q transcode -c 2
environment: environment:
<<: *common-env <<: *common-env
MPR_EXECUTOR: local MPR_EXECUTOR: local

View File

@@ -1,57 +1,22 @@
#!/bin/bash #!/bin/bash
# Model generation script for MPR # Model generation script for MPR
# Generates Django, Pydantic, TypeScript, and Protobuf from schema/models # Generates all targets from core/schema/modelgen.json config
set -e set -e
cd "$(dirname "$0")/.." cd "$(dirname "$0")/.."
echo "Generating models from schema/models..." echo "Generating models from core/schema/models..."
python -m modelgen generate --config core/schema/modelgen.json
# Django ORM models: domain models + enums
python -m modelgen from-schema \
--schema schema/models \
--output mpr/media_assets/models.py \
--targets django \
--include dataclasses,enums
# Pydantic schemas for FastAPI: domain models + enums
python -m modelgen from-schema \
--schema schema/models \
--output api/schemas/models.py \
--targets pydantic \
--include dataclasses,enums
# TypeScript types for Timeline UI: domain models + enums + API types
python -m modelgen from-schema \
--schema schema/models \
--output ui/timeline/src/types.ts \
--targets typescript \
--include dataclasses,enums,api
# Graphene types for GraphQL: domain models + enums + API types
python -m modelgen from-schema \
--schema schema/models \
--output api/schemas/graphql.py \
--targets graphene \
--include dataclasses,enums,api
# Protobuf for gRPC: gRPC messages + service
python -m modelgen from-schema \
--schema schema/models \
--output rpc/protos/worker.proto \
--targets proto \
--include grpc
# Generate gRPC stubs from proto # Generate gRPC stubs from proto
echo "Generating gRPC stubs..." echo "Generating gRPC stubs..."
python -m grpc_tools.protoc \ python -m grpc_tools.protoc \
-I rpc/protos \ -I core/rpc/protos \
--python_out=rpc \ --python_out=core/rpc \
--grpc_python_out=rpc \ --grpc_python_out=core/rpc \
rpc/protos/worker.proto core/rpc/protos/worker.proto
# Fix relative import in generated grpc stub # Fix relative import in generated grpc stub
sed -i 's/^import worker_pb2/from . import worker_pb2/' rpc/worker_pb2_grpc.py sed -i 's/^import worker_pb2/from . import worker_pb2/' core/rpc/worker_pb2_grpc.py
echo "Done!" echo "Done!"

View File

@@ -14,8 +14,8 @@ COPY ctrl/lambda/requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt RUN pip install --no-cache-dir -r requirements.txt
# Copy application code # Copy application code
COPY task/lambda_handler.py ${LAMBDA_TASK_ROOT}/task/lambda_handler.py COPY core/task/lambda_handler.py ${LAMBDA_TASK_ROOT}/core/task/lambda_handler.py
COPY task/__init__.py ${LAMBDA_TASK_ROOT}/task/__init__.py COPY core/task/__init__.py ${LAMBDA_TASK_ROOT}/core/task/__init__.py
COPY core/ ${LAMBDA_TASK_ROOT}/core/ COPY core/ ${LAMBDA_TASK_ROOT}/core/
CMD ["task.lambda_handler.handler"] CMD ["core.task.lambda_handler.handler"]

View File

@@ -44,9 +44,9 @@ http {
proxy_set_header Host $host; proxy_set_header Host $host;
} }
# FastAPI # FastAPI — trailing slash strips /api prefix before forwarding
location /api/ { location /api/ {
proxy_pass http://fastapi; proxy_pass http://fastapi/;
proxy_set_header Host $host; proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

View File

@@ -1,114 +0,0 @@
digraph system_overview {
rankdir=TB
node [shape=box, style=rounded, fontname="Helvetica"]
edge [fontname="Helvetica", fontsize=10]
labelloc="t"
label="MPR - System Overview"
fontsize=16
fontname="Helvetica-Bold"
graph [splines=ortho, nodesep=0.8, ranksep=0.8]
// External
subgraph cluster_external {
label="External"
style=dashed
color=gray
browser [label="Browser\nmpr.local.ar / mpr.mcrn.ar", shape=ellipse]
}
// Nginx reverse proxy
subgraph cluster_proxy {
label="Reverse Proxy"
style=filled
fillcolor="#e8f4f8"
nginx [label="nginx\nport 80"]
}
// Application layer
subgraph cluster_apps {
label="Application Layer"
style=filled
fillcolor="#f0f8e8"
django [label="Django\n/admin\nport 8701"]
fastapi [label="FastAPI\n/api + /graphql\nport 8702"]
timeline [label="Timeline UI\n/ui\nport 5173"]
}
// Worker layer
subgraph cluster_workers {
label="Worker Layer"
style=filled
fillcolor="#fff8e8"
grpc_server [label="gRPC Server\nport 50051"]
celery [label="Celery Worker\n(local mode)"]
}
// AWS layer
subgraph cluster_aws {
label="AWS (lambda mode)"
style=filled
fillcolor="#fde8d0"
step_functions [label="Step Functions\nstate machine"]
lambda [label="Lambda\nFFmpeg container"]
}
// Data layer
subgraph cluster_data {
label="Data Layer"
style=filled
fillcolor="#f8e8f0"
postgres [label="PostgreSQL\nport 5436", shape=cylinder]
redis [label="Redis\nport 6381", shape=cylinder]
}
// Storage
subgraph cluster_storage {
label="S3 Storage"
style=filled
fillcolor="#f0f0f0"
minio [label="MinIO (local)\nport 9000", shape=folder]
s3 [label="AWS S3 (cloud)", shape=folder, style="dashed,rounded"]
bucket_in [label="mpr-media-in", shape=note]
bucket_out [label="mpr-media-out", shape=note]
}
// Connections
browser -> nginx
nginx -> django [xlabel="/admin"]
nginx -> fastapi [xlabel="/api, /graphql"]
nginx -> timeline [xlabel="/ui"]
nginx -> minio [xlabel="/media/*"]
timeline -> fastapi [xlabel="REST API"]
fastapi -> postgres
fastapi -> grpc_server [xlabel="gRPC\nprogress"]
// Local mode
grpc_server -> celery [xlabel="task dispatch"]
celery -> redis [xlabel="queue"]
celery -> postgres [xlabel="job updates"]
celery -> minio [xlabel="S3 API\ndownload/upload"]
// Lambda mode
fastapi -> step_functions [xlabel="boto3\nstart_execution", style=dashed]
step_functions -> lambda [style=dashed]
lambda -> s3 [xlabel="download/upload", style=dashed]
lambda -> fastapi [xlabel="callback\nPOST /jobs/{id}/callback", style=dashed]
// Storage details
minio -> bucket_in [style=dotted, arrowhead=none]
minio -> bucket_out [style=dotted, arrowhead=none]
s3 -> bucket_in [style=dotted, arrowhead=none]
s3 -> bucket_out [style=dotted, arrowhead=none]
}

View File

@@ -1,293 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 14.1.2 (0)
-->
<!-- Title: system_overview Pages: 1 -->
<svg width="620pt" height="903pt"
viewBox="0.00 0.00 620.00 903.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 898.54)">
<title>system_overview</title>
<polygon fill="white" stroke="none" points="-4,4 -4,-898.54 616,-898.54 616,4 -4,4"/>
<text xml:space="preserve" text-anchor="middle" x="306" y="-875.34" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">MPR &#45; System Overview</text>
<g id="clust1" class="cluster">
<title>cluster_external</title>
<polygon fill="none" stroke="gray" stroke-dasharray="5,2" points="246,-755.44 246,-859.04 540,-859.04 540,-755.44 246,-755.44"/>
<text xml:space="preserve" text-anchor="middle" x="393" y="-839.84" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">External</text>
</g>
<g id="clust2" class="cluster">
<title>cluster_proxy</title>
<polygon fill="#e8f4f8" stroke="black" points="320,-654.94 320,-740.94 466,-740.94 466,-654.94 320,-654.94"/>
<text xml:space="preserve" text-anchor="middle" x="393" y="-721.74" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">Reverse Proxy</text>
</g>
<g id="clust3" class="cluster">
<title>cluster_apps</title>
<polygon fill="#f0f8e8" stroke="black" points="278,-419.44 278,-640.44 532,-640.44 532,-419.44 278,-419.44"/>
<text xml:space="preserve" text-anchor="middle" x="405" y="-621.24" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">Application Layer</text>
</g>
<g id="clust4" class="cluster">
<title>cluster_workers</title>
<polygon fill="#fff8e8" stroke="black" points="142,-218.44 142,-404.94 280,-404.94 280,-218.44 142,-218.44"/>
<text xml:space="preserve" text-anchor="middle" x="211" y="-385.74" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">Worker Layer</text>
</g>
<g id="clust5" class="cluster">
<title>cluster_aws</title>
<polygon fill="#fde8d0" stroke="black" points="383,-218.44 383,-404.94 581,-404.94 581,-218.44 383,-218.44"/>
<text xml:space="preserve" text-anchor="middle" x="482" y="-385.74" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">AWS (lambda mode)</text>
</g>
<g id="clust6" class="cluster">
<title>cluster_data</title>
<polygon fill="#f8e8f0" stroke="black" points="8,-102 8,-203.94 263,-203.94 263,-102 8,-102"/>
<text xml:space="preserve" text-anchor="middle" x="135.5" y="-184.74" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">Data Layer</text>
</g>
<g id="clust7" class="cluster">
<title>cluster_storage</title>
<polygon fill="#f0f0f0" stroke="black" points="302,-8 302,-195.97 604,-195.97 604,-8 302,-8"/>
<text xml:space="preserve" text-anchor="middle" x="453" y="-176.77" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">S3 Storage</text>
</g>
<!-- browser -->
<g id="node1" class="node">
<title>browser</title>
<ellipse fill="none" stroke="black" cx="393" cy="-793.49" rx="139.12" ry="30.05"/>
<text xml:space="preserve" text-anchor="middle" x="393" y="-797.44" font-family="Helvetica,sans-Serif" font-size="14.00">Browser</text>
<text xml:space="preserve" text-anchor="middle" x="393" y="-780.19" font-family="Helvetica,sans-Serif" font-size="14.00">mpr.local.ar / mpr.mcrn.ar</text>
</g>
<!-- nginx -->
<g id="node2" class="node">
<title>nginx</title>
<path fill="none" stroke="black" d="M414.5,-705.44C414.5,-705.44 371.5,-705.44 371.5,-705.44 365.5,-705.44 359.5,-699.44 359.5,-693.44 359.5,-693.44 359.5,-674.94 359.5,-674.94 359.5,-668.94 365.5,-662.94 371.5,-662.94 371.5,-662.94 414.5,-662.94 414.5,-662.94 420.5,-662.94 426.5,-668.94 426.5,-674.94 426.5,-674.94 426.5,-693.44 426.5,-693.44 426.5,-699.44 420.5,-705.44 414.5,-705.44"/>
<text xml:space="preserve" text-anchor="middle" x="393" y="-688.14" font-family="Helvetica,sans-Serif" font-size="14.00">nginx</text>
<text xml:space="preserve" text-anchor="middle" x="393" y="-670.89" font-family="Helvetica,sans-Serif" font-size="14.00">port 80</text>
</g>
<!-- browser&#45;&gt;nginx -->
<g id="edge1" class="edge">
<title>browser&#45;&gt;nginx</title>
<path fill="none" stroke="black" d="M393,-763.04C393,-763.04 393,-717.33 393,-717.33"/>
<polygon fill="black" stroke="black" points="396.5,-717.33 393,-707.33 389.5,-717.33 396.5,-717.33"/>
</g>
<!-- django -->
<g id="node3" class="node">
<title>django</title>
<path fill="none" stroke="black" d="M359.5,-604.94C359.5,-604.94 298.5,-604.94 298.5,-604.94 292.5,-604.94 286.5,-598.94 286.5,-592.94 286.5,-592.94 286.5,-557.19 286.5,-557.19 286.5,-551.19 292.5,-545.19 298.5,-545.19 298.5,-545.19 359.5,-545.19 359.5,-545.19 365.5,-545.19 371.5,-551.19 371.5,-557.19 371.5,-557.19 371.5,-592.94 371.5,-592.94 371.5,-598.94 365.5,-604.94 359.5,-604.94"/>
<text xml:space="preserve" text-anchor="middle" x="329" y="-587.64" font-family="Helvetica,sans-Serif" font-size="14.00">Django</text>
<text xml:space="preserve" text-anchor="middle" x="329" y="-570.39" font-family="Helvetica,sans-Serif" font-size="14.00">/admin</text>
<text xml:space="preserve" text-anchor="middle" x="329" y="-553.14" font-family="Helvetica,sans-Serif" font-size="14.00">port 8701</text>
</g>
<!-- nginx&#45;&gt;django -->
<g id="edge2" class="edge">
<title>nginx&#45;&gt;django</title>
<path fill="none" stroke="black" d="M365.5,-662.63C365.5,-662.63 365.5,-616.77 365.5,-616.77"/>
<polygon fill="black" stroke="black" points="369,-616.77 365.5,-606.77 362,-616.77 369,-616.77"/>
<text xml:space="preserve" text-anchor="middle" x="348.62" y="-642.95" font-family="Helvetica,sans-Serif" font-size="10.00">/admin</text>
</g>
<!-- fastapi -->
<g id="node4" class="node">
<title>fastapi</title>
<path fill="none" stroke="black" d="M395.5,-487.19C395.5,-487.19 298.5,-487.19 298.5,-487.19 292.5,-487.19 286.5,-481.19 286.5,-475.19 286.5,-475.19 286.5,-439.44 286.5,-439.44 286.5,-433.44 292.5,-427.44 298.5,-427.44 298.5,-427.44 395.5,-427.44 395.5,-427.44 401.5,-427.44 407.5,-433.44 407.5,-439.44 407.5,-439.44 407.5,-475.19 407.5,-475.19 407.5,-481.19 401.5,-487.19 395.5,-487.19"/>
<text xml:space="preserve" text-anchor="middle" x="347" y="-469.89" font-family="Helvetica,sans-Serif" font-size="14.00">FastAPI</text>
<text xml:space="preserve" text-anchor="middle" x="347" y="-452.64" font-family="Helvetica,sans-Serif" font-size="14.00">/api + /graphql</text>
<text xml:space="preserve" text-anchor="middle" x="347" y="-435.39" font-family="Helvetica,sans-Serif" font-size="14.00">port 8702</text>
</g>
<!-- nginx&#45;&gt;fastapi -->
<g id="edge3" class="edge">
<title>nginx&#45;&gt;fastapi</title>
<path fill="none" stroke="black" d="M383.5,-662.84C383.5,-662.84 383.5,-498.82 383.5,-498.82"/>
<polygon fill="black" stroke="black" points="387,-498.82 383.5,-488.82 380,-498.82 387,-498.82"/>
<text xml:space="preserve" text-anchor="middle" x="399.44" y="-571.33" font-family="Helvetica,sans-Serif" font-size="10.00">/api, /graphql</text>
</g>
<!-- timeline -->
<g id="node5" class="node">
<title>timeline</title>
<path fill="none" stroke="black" d="M512,-604.94C512,-604.94 442,-604.94 442,-604.94 436,-604.94 430,-598.94 430,-592.94 430,-592.94 430,-557.19 430,-557.19 430,-551.19 436,-545.19 442,-545.19 442,-545.19 512,-545.19 512,-545.19 518,-545.19 524,-551.19 524,-557.19 524,-557.19 524,-592.94 524,-592.94 524,-598.94 518,-604.94 512,-604.94"/>
<text xml:space="preserve" text-anchor="middle" x="477" y="-587.64" font-family="Helvetica,sans-Serif" font-size="14.00">Timeline UI</text>
<text xml:space="preserve" text-anchor="middle" x="477" y="-570.39" font-family="Helvetica,sans-Serif" font-size="14.00">/ui</text>
<text xml:space="preserve" text-anchor="middle" x="477" y="-553.14" font-family="Helvetica,sans-Serif" font-size="14.00">port 5173</text>
</g>
<!-- nginx&#45;&gt;timeline -->
<g id="edge4" class="edge">
<title>nginx&#45;&gt;timeline</title>
<path fill="none" stroke="black" d="M422.62,-662.67C422.62,-633.49 422.62,-585 422.62,-585 422.62,-585 423.34,-585 423.34,-585"/>
<polygon fill="black" stroke="black" points="418.22,-588.5 428.22,-585 418.22,-581.5 418.22,-588.5"/>
<text xml:space="preserve" text-anchor="middle" x="416.62" y="-613.98" font-family="Helvetica,sans-Serif" font-size="10.00">/ui</text>
</g>
<!-- minio -->
<g id="node12" class="node">
<title>minio</title>
<polygon fill="none" stroke="black" points="415.5,-160.47 412.5,-164.47 391.5,-164.47 388.5,-160.47 312.5,-160.47 312.5,-117.97 415.5,-117.97 415.5,-160.47"/>
<text xml:space="preserve" text-anchor="middle" x="364" y="-143.17" font-family="Helvetica,sans-Serif" font-size="14.00">MinIO (local)</text>
<text xml:space="preserve" text-anchor="middle" x="364" y="-125.92" font-family="Helvetica,sans-Serif" font-size="14.00">port 9000</text>
</g>
<!-- nginx&#45;&gt;minio -->
<g id="edge5" class="edge">
<title>nginx&#45;&gt;minio</title>
<path fill="none" stroke="black" d="M414.88,-662.68C414.88,-596.12 414.88,-398 414.88,-398 414.88,-398 344.17,-398 344.17,-398 344.17,-398 344.17,-172.35 344.17,-172.35"/>
<polygon fill="black" stroke="black" points="347.67,-172.35 344.17,-162.35 340.67,-172.35 347.67,-172.35"/>
<text xml:space="preserve" text-anchor="middle" x="378.03" y="-401.25" font-family="Helvetica,sans-Serif" font-size="10.00">/media/*</text>
</g>
<!-- grpc_server -->
<g id="node6" class="node">
<title>grpc_server</title>
<path fill="none" stroke="black" d="M246.5,-369.44C246.5,-369.44 167.5,-369.44 167.5,-369.44 161.5,-369.44 155.5,-363.44 155.5,-357.44 155.5,-357.44 155.5,-338.94 155.5,-338.94 155.5,-332.94 161.5,-326.94 167.5,-326.94 167.5,-326.94 246.5,-326.94 246.5,-326.94 252.5,-326.94 258.5,-332.94 258.5,-338.94 258.5,-338.94 258.5,-357.44 258.5,-357.44 258.5,-363.44 252.5,-369.44 246.5,-369.44"/>
<text xml:space="preserve" text-anchor="middle" x="207" y="-352.14" font-family="Helvetica,sans-Serif" font-size="14.00">gRPC Server</text>
<text xml:space="preserve" text-anchor="middle" x="207" y="-334.89" font-family="Helvetica,sans-Serif" font-size="14.00">port 50051</text>
</g>
<!-- fastapi&#45;&gt;grpc_server -->
<g id="edge8" class="edge">
<title>fastapi&#45;&gt;grpc_server</title>
<path fill="none" stroke="black" d="M298.5,-427.06C298.5,-392.59 298.5,-341 298.5,-341 298.5,-341 270.41,-341 270.41,-341"/>
<polygon fill="black" stroke="black" points="270.41,-337.5 260.41,-341 270.41,-344.5 270.41,-337.5"/>
<text xml:space="preserve" text-anchor="middle" x="319.5" y="-385.98" font-family="Helvetica,sans-Serif" font-size="10.00">gRPC</text>
<text xml:space="preserve" text-anchor="middle" x="319.5" y="-373.23" font-family="Helvetica,sans-Serif" font-size="10.00">progress</text>
</g>
<!-- step_functions -->
<g id="node8" class="node">
<title>step_functions</title>
<path fill="none" stroke="black" d="M541.38,-369.44C541.38,-369.44 446.62,-369.44 446.62,-369.44 440.62,-369.44 434.62,-363.44 434.62,-357.44 434.62,-357.44 434.62,-338.94 434.62,-338.94 434.62,-332.94 440.62,-326.94 446.62,-326.94 446.62,-326.94 541.38,-326.94 541.38,-326.94 547.38,-326.94 553.38,-332.94 553.38,-338.94 553.38,-338.94 553.38,-357.44 553.38,-357.44 553.38,-363.44 547.38,-369.44 541.38,-369.44"/>
<text xml:space="preserve" text-anchor="middle" x="494" y="-352.14" font-family="Helvetica,sans-Serif" font-size="14.00">Step Functions</text>
<text xml:space="preserve" text-anchor="middle" x="494" y="-334.89" font-family="Helvetica,sans-Serif" font-size="14.00">state machine</text>
</g>
<!-- fastapi&#45;&gt;step_functions -->
<g id="edge13" class="edge">
<title>fastapi&#45;&gt;step_functions</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M375.83,-427.17C375.83,-396.99 375.83,-355 375.83,-355 375.83,-355 422.71,-355 422.71,-355"/>
<polygon fill="black" stroke="black" points="422.71,-358.5 432.71,-355 422.71,-351.5 422.71,-358.5"/>
<text xml:space="preserve" text-anchor="middle" x="338.33" y="-358.15" font-family="Helvetica,sans-Serif" font-size="10.00">boto3</text>
<text xml:space="preserve" text-anchor="middle" x="338.33" y="-345.4" font-family="Helvetica,sans-Serif" font-size="10.00">start_execution</text>
</g>
<!-- postgres -->
<g id="node10" class="node">
<title>postgres</title>
<path fill="none" stroke="black" d="M111.75,-163.12C111.75,-166.06 90.35,-168.44 64,-168.44 37.65,-168.44 16.25,-166.06 16.25,-163.12 16.25,-163.12 16.25,-115.31 16.25,-115.31 16.25,-112.38 37.65,-110 64,-110 90.35,-110 111.75,-112.38 111.75,-115.31 111.75,-115.31 111.75,-163.12 111.75,-163.12"/>
<path fill="none" stroke="black" d="M111.75,-163.12C111.75,-160.19 90.35,-157.81 64,-157.81 37.65,-157.81 16.25,-160.19 16.25,-163.12"/>
<text xml:space="preserve" text-anchor="middle" x="64" y="-143.17" font-family="Helvetica,sans-Serif" font-size="14.00">PostgreSQL</text>
<text xml:space="preserve" text-anchor="middle" x="64" y="-125.92" font-family="Helvetica,sans-Serif" font-size="14.00">port 5436</text>
</g>
<!-- fastapi&#45;&gt;postgres -->
<g id="edge7" class="edge">
<title>fastapi&#45;&gt;postgres</title>
<path fill="none" stroke="black" d="M286.29,-457C203.13,-457 64,-457 64,-457 64,-457 64,-180.34 64,-180.34"/>
<polygon fill="black" stroke="black" points="67.5,-180.34 64,-170.34 60.5,-180.34 67.5,-180.34"/>
</g>
<!-- timeline&#45;&gt;fastapi -->
<g id="edge6" class="edge">
<title>timeline&#45;&gt;fastapi</title>
<path fill="none" stroke="black" d="M429.59,-565C411.66,-565 395.5,-565 395.5,-565 395.5,-565 395.5,-499.11 395.5,-499.11"/>
<polygon fill="black" stroke="black" points="399,-499.11 395.5,-489.11 392,-499.11 399,-499.11"/>
<text xml:space="preserve" text-anchor="middle" x="406.38" y="-539.6" font-family="Helvetica,sans-Serif" font-size="10.00">REST API</text>
</g>
<!-- celery -->
<g id="node7" class="node">
<title>celery</title>
<path fill="none" stroke="black" d="M255.75,-268.94C255.75,-268.94 166.25,-268.94 166.25,-268.94 160.25,-268.94 154.25,-262.94 154.25,-256.94 154.25,-256.94 154.25,-238.44 154.25,-238.44 154.25,-232.44 160.25,-226.44 166.25,-226.44 166.25,-226.44 255.75,-226.44 255.75,-226.44 261.75,-226.44 267.75,-232.44 267.75,-238.44 267.75,-238.44 267.75,-256.94 267.75,-256.94 267.75,-262.94 261.75,-268.94 255.75,-268.94"/>
<text xml:space="preserve" text-anchor="middle" x="211" y="-251.64" font-family="Helvetica,sans-Serif" font-size="14.00">Celery Worker</text>
<text xml:space="preserve" text-anchor="middle" x="211" y="-234.39" font-family="Helvetica,sans-Serif" font-size="14.00">(local mode)</text>
</g>
<!-- grpc_server&#45;&gt;celery -->
<g id="edge9" class="edge">
<title>grpc_server&#45;&gt;celery</title>
<path fill="none" stroke="black" d="M207,-326.87C207,-326.87 207,-280.83 207,-280.83"/>
<polygon fill="black" stroke="black" points="210.5,-280.83 207,-270.83 203.5,-280.83 210.5,-280.83"/>
<text xml:space="preserve" text-anchor="middle" x="174.38" y="-307.1" font-family="Helvetica,sans-Serif" font-size="10.00">task dispatch</text>
</g>
<!-- celery&#45;&gt;postgres -->
<g id="edge11" class="edge">
<title>celery&#45;&gt;postgres</title>
<path fill="none" stroke="black" d="M161.88,-225.95C161.88,-194.24 161.88,-139 161.88,-139 161.88,-139 123.59,-139 123.59,-139"/>
<polygon fill="black" stroke="black" points="123.59,-135.5 113.59,-139 123.59,-142.5 123.59,-135.5"/>
<text xml:space="preserve" text-anchor="middle" x="133.38" y="-166.59" font-family="Helvetica,sans-Serif" font-size="10.00">job updates</text>
</g>
<!-- redis -->
<g id="node11" class="node">
<title>redis</title>
<path fill="none" stroke="black" d="M254.5,-163.12C254.5,-166.06 235.45,-168.44 212,-168.44 188.55,-168.44 169.5,-166.06 169.5,-163.12 169.5,-163.12 169.5,-115.31 169.5,-115.31 169.5,-112.38 188.55,-110 212,-110 235.45,-110 254.5,-112.38 254.5,-115.31 254.5,-115.31 254.5,-163.12 254.5,-163.12"/>
<path fill="none" stroke="black" d="M254.5,-163.12C254.5,-160.19 235.45,-157.81 212,-157.81 188.55,-157.81 169.5,-160.19 169.5,-163.12"/>
<text xml:space="preserve" text-anchor="middle" x="212" y="-143.17" font-family="Helvetica,sans-Serif" font-size="14.00">Redis</text>
<text xml:space="preserve" text-anchor="middle" x="212" y="-125.92" font-family="Helvetica,sans-Serif" font-size="14.00">port 6381</text>
</g>
<!-- celery&#45;&gt;redis -->
<g id="edge10" class="edge">
<title>celery&#45;&gt;redis</title>
<path fill="none" stroke="black" d="M212,-226C212,-226 212,-180.19 212,-180.19"/>
<polygon fill="black" stroke="black" points="215.5,-180.19 212,-170.19 208.5,-180.19 215.5,-180.19"/>
<text xml:space="preserve" text-anchor="middle" x="197" y="-206.34" font-family="Helvetica,sans-Serif" font-size="10.00">queue</text>
</g>
<!-- celery&#45;&gt;minio -->
<g id="edge12" class="edge">
<title>celery&#45;&gt;minio</title>
<path fill="none" stroke="black" d="M261.12,-225.95C261.12,-194.24 261.12,-139 261.12,-139 261.12,-139 300.75,-139 300.75,-139"/>
<polygon fill="black" stroke="black" points="300.75,-142.5 310.75,-139 300.75,-135.5 300.75,-142.5"/>
<text xml:space="preserve" text-anchor="middle" x="302.75" y="-178.67" font-family="Helvetica,sans-Serif" font-size="10.00">S3 API</text>
<text xml:space="preserve" text-anchor="middle" x="302.75" y="-165.92" font-family="Helvetica,sans-Serif" font-size="10.00">download/upload</text>
</g>
<!-- lambda -->
<g id="node9" class="node">
<title>lambda</title>
<path fill="none" stroke="black" d="M541,-268.94C541,-268.94 423,-268.94 423,-268.94 417,-268.94 411,-262.94 411,-256.94 411,-256.94 411,-238.44 411,-238.44 411,-232.44 417,-226.44 423,-226.44 423,-226.44 541,-226.44 541,-226.44 547,-226.44 553,-232.44 553,-238.44 553,-238.44 553,-256.94 553,-256.94 553,-262.94 547,-268.94 541,-268.94"/>
<text xml:space="preserve" text-anchor="middle" x="482" y="-251.64" font-family="Helvetica,sans-Serif" font-size="14.00">Lambda</text>
<text xml:space="preserve" text-anchor="middle" x="482" y="-234.39" font-family="Helvetica,sans-Serif" font-size="14.00">FFmpeg container</text>
</g>
<!-- step_functions&#45;&gt;lambda -->
<g id="edge14" class="edge">
<title>step_functions&#45;&gt;lambda</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M493.81,-326.87C493.81,-326.87 493.81,-280.83 493.81,-280.83"/>
<polygon fill="black" stroke="black" points="497.31,-280.83 493.81,-270.83 490.31,-280.83 497.31,-280.83"/>
</g>
<!-- lambda&#45;&gt;fastapi -->
<g id="edge16" class="edge">
<title>lambda&#45;&gt;fastapi</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M418.75,-269.3C418.75,-322.78 418.75,-457 418.75,-457 418.75,-457 417.66,-457 417.66,-457"/>
<polygon fill="black" stroke="black" points="419.37,-453.5 409.37,-457 419.37,-460.5 419.37,-453.5"/>
<text xml:space="preserve" text-anchor="middle" x="359.12" y="-379.69" font-family="Helvetica,sans-Serif" font-size="10.00">callback</text>
<text xml:space="preserve" text-anchor="middle" x="359.12" y="-366.94" font-family="Helvetica,sans-Serif" font-size="10.00">POST /jobs/{id}/callback</text>
</g>
<!-- s3 -->
<g id="node13" class="node">
<title>s3</title>
<polygon fill="none" stroke="black" stroke-dasharray="5,2" points="596.25,-157.22 593.25,-161.22 572.25,-161.22 569.25,-157.22 473.75,-157.22 473.75,-121.22 596.25,-121.22 596.25,-157.22"/>
<text xml:space="preserve" text-anchor="middle" x="535" y="-134.54" font-family="Helvetica,sans-Serif" font-size="14.00">AWS S3 (cloud)</text>
</g>
<!-- lambda&#45;&gt;s3 -->
<g id="edge15" class="edge">
<title>lambda&#45;&gt;s3</title>
<path fill="none" stroke="black" stroke-dasharray="5,2" d="M513.38,-226C513.38,-226 513.38,-169.14 513.38,-169.14"/>
<polygon fill="black" stroke="black" points="516.88,-169.14 513.38,-159.14 509.88,-169.14 516.88,-169.14"/>
<text xml:space="preserve" text-anchor="middle" x="471.75" y="-200.82" font-family="Helvetica,sans-Serif" font-size="10.00">download/upload</text>
</g>
<!-- bucket_in -->
<g id="node14" class="node">
<title>bucket_in</title>
<polygon fill="none" stroke="black" points="413.5,-52 310.5,-52 310.5,-16 419.5,-16 419.5,-46 413.5,-52"/>
<polyline fill="none" stroke="black" points="413.5,-52 413.5,-46"/>
<polyline fill="none" stroke="black" points="419.5,-46 413.5,-46"/>
<text xml:space="preserve" text-anchor="middle" x="365" y="-29.32" font-family="Helvetica,sans-Serif" font-size="14.00">mpr&#45;media&#45;in</text>
</g>
<!-- minio&#45;&gt;bucket_in -->
<g id="edge17" class="edge">
<title>minio&#45;&gt;bucket_in</title>
<path fill="none" stroke="black" stroke-dasharray="1,5" d="M364,-117.67C364,-98.43 364,-70.56 364,-52.36"/>
</g>
<!-- bucket_out -->
<g id="node15" class="node">
<title>bucket_out</title>
<polygon fill="none" stroke="black" points="590.38,-52 477.62,-52 477.62,-16 596.38,-16 596.38,-46 590.38,-52"/>
<polyline fill="none" stroke="black" points="590.38,-52 590.38,-46"/>
<polyline fill="none" stroke="black" points="596.38,-46 590.38,-46"/>
<text xml:space="preserve" text-anchor="middle" x="537" y="-29.32" font-family="Helvetica,sans-Serif" font-size="14.00">mpr&#45;media&#45;out</text>
</g>
<!-- minio&#45;&gt;bucket_out -->
<g id="edge18" class="edge">
<title>minio&#45;&gt;bucket_out</title>
<path fill="none" stroke="black" stroke-dasharray="1,5" d="M415.9,-145C428.08,-145 437.58,-145 437.58,-145 437.58,-145 437.58,-40 437.58,-40 437.58,-40 456.11,-40 477.16,-40"/>
</g>
<!-- s3&#45;&gt;bucket_in -->
<g id="edge19" class="edge">
<title>s3&#45;&gt;bucket_in</title>
<path fill="none" stroke="black" stroke-dasharray="1,5" d="M473.27,-133C463.03,-133 455.67,-133 455.67,-133 455.67,-133 455.67,-28 455.67,-28 455.67,-28 438.93,-28 419.83,-28"/>
</g>
<!-- s3&#45;&gt;bucket_out -->
<g id="edge20" class="edge">
<title>s3&#45;&gt;bucket_out</title>
<path fill="none" stroke="black" stroke-dasharray="1,5" d="M536.94,-120.89C536.94,-101.7 536.94,-71.72 536.94,-52.47"/>
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 21 KiB

View File

@@ -0,0 +1,94 @@
digraph local_architecture {
rankdir=TB
node [shape=box, style=rounded, fontname="Helvetica"]
edge [fontname="Helvetica", fontsize=10]
labelloc="t"
label="MPR - Local Architecture (Celery + MinIO)"
fontsize=16
fontname="Helvetica-Bold"
graph [splines=ortho, nodesep=0.8, ranksep=0.8]
// External
subgraph cluster_external {
label="External"
style=dashed
color=gray
browser [label="Browser\nmpr.local.ar", shape=ellipse]
}
// Nginx reverse proxy
subgraph cluster_proxy {
label="Reverse Proxy"
style=filled
fillcolor="#e8f4f8"
nginx [label="nginx\nport 80"]
}
// Application layer
subgraph cluster_apps {
label="Application Layer"
style=filled
fillcolor="#f0f8e8"
django [label="Django Admin\n/admin\nport 8701"]
fastapi [label="GraphQL API\n/graphql\nport 8702"]
timeline [label="Timeline UI\n/\nport 5173"]
}
// Worker layer
subgraph cluster_workers {
label="Worker Layer"
style=filled
fillcolor="#fff8e8"
grpc_server [label="gRPC Server\nport 50051"]
celery [label="Celery Worker\nFFmpeg transcoding"]
}
// Data layer
subgraph cluster_data {
label="Data Layer"
style=filled
fillcolor="#f8e8f0"
postgres [label="PostgreSQL\nport 5436", shape=cylinder]
redis [label="Redis\nCelery queue\nport 6381", shape=cylinder]
}
// Storage
subgraph cluster_storage {
label="S3 Storage (MinIO)"
style=filled
fillcolor="#f0f0f0"
minio [label="MinIO\nS3-compatible API\nport 9000", shape=folder]
bucket_in [label="mpr-media-in/\ninput videos", shape=note]
bucket_out [label="mpr-media-out/\ntranscoded output", shape=note]
}
// Connections
browser -> nginx [label="HTTP"]
nginx -> django [xlabel="/admin"]
nginx -> fastapi [xlabel="/graphql"]
nginx -> timeline [xlabel="/"]
nginx -> minio [xlabel="/media/*"]
timeline -> fastapi [label="GraphQL"]
django -> postgres
fastapi -> postgres [label="read/write jobs"]
fastapi -> grpc_server [label="gRPC\nprogress updates"]
grpc_server -> celery [label="dispatch tasks"]
celery -> redis [label="task queue"]
celery -> postgres [label="update job status"]
celery -> minio [label="S3 API\ndownload input\nupload output"]
minio -> bucket_in [style=dotted, arrowhead=none]
minio -> bucket_out [style=dotted, arrowhead=none]
}

View File

@@ -0,0 +1,242 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 14.1.2 (0)
-->
<!-- Title: local_architecture Pages: 1 -->
<svg width="667pt" height="1095pt"
viewBox="0.00 0.00 667.00 1095.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 1090.76)">
<title>local_architecture</title>
<polygon fill="white" stroke="none" points="-4,4 -4,-1090.76 663,-1090.76 663,4 -4,4"/>
<text xml:space="preserve" text-anchor="middle" x="329.5" y="-1067.56" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">MPR &#45; Local Architecture (Celery + MinIO)</text>
<g id="clust1" class="cluster">
<title>cluster_external</title>
<polygon fill="none" stroke="gray" stroke-dasharray="5,2" points="270,-947.66 270,-1051.26 424,-1051.26 424,-947.66 270,-947.66"/>
<text xml:space="preserve" text-anchor="middle" x="347" y="-1032.06" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">External</text>
</g>
<g id="clust2" class="cluster">
<title>cluster_proxy</title>
<polygon fill="#e8f4f8" stroke="black" points="274,-819.91 274,-905.91 420,-905.91 420,-819.91 274,-819.91"/>
<text xml:space="preserve" text-anchor="middle" x="347" y="-886.71" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">Reverse Proxy</text>
</g>
<g id="clust3" class="cluster">
<title>cluster_apps</title>
<polygon fill="#f0f8e8" stroke="black" points="19,-556.16 19,-789.91 301,-789.91 301,-556.16 19,-556.16"/>
<text xml:space="preserve" text-anchor="middle" x="160" y="-770.71" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">Application Layer</text>
</g>
<g id="clust4" class="cluster">
<title>cluster_workers</title>
<polygon fill="#fff8e8" stroke="black" points="193,-302.41 193,-501.66 369,-501.66 369,-302.41 193,-302.41"/>
<text xml:space="preserve" text-anchor="middle" x="281" y="-482.46" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">Worker Layer</text>
</g>
<g id="clust5" class="cluster">
<title>cluster_data</title>
<polygon fill="#f8e8f0" stroke="black" points="8,-109.5 8,-235.16 286,-235.16 286,-109.5 8,-109.5"/>
<text xml:space="preserve" text-anchor="middle" x="147" y="-215.96" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">Data Layer</text>
</g>
<g id="clust6" class="cluster">
<title>cluster_storage</title>
<polygon fill="#f0f0f0" stroke="black" points="319,-8 319,-223.95 651,-223.95 651,-8 319,-8"/>
<text xml:space="preserve" text-anchor="middle" x="485" y="-204.75" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">S3 Storage (MinIO)</text>
</g>
<!-- browser -->
<g id="node1" class="node">
<title>browser</title>
<ellipse fill="none" stroke="black" cx="347" cy="-985.71" rx="69.12" ry="30.05"/>
<text xml:space="preserve" text-anchor="middle" x="347" y="-989.66" font-family="Helvetica,sans-Serif" font-size="14.00">Browser</text>
<text xml:space="preserve" text-anchor="middle" x="347" y="-972.41" font-family="Helvetica,sans-Serif" font-size="14.00">mpr.local.ar</text>
</g>
<!-- nginx -->
<g id="node2" class="node">
<title>nginx</title>
<path fill="none" stroke="black" d="M368.5,-870.41C368.5,-870.41 325.5,-870.41 325.5,-870.41 319.5,-870.41 313.5,-864.41 313.5,-858.41 313.5,-858.41 313.5,-839.91 313.5,-839.91 313.5,-833.91 319.5,-827.91 325.5,-827.91 325.5,-827.91 368.5,-827.91 368.5,-827.91 374.5,-827.91 380.5,-833.91 380.5,-839.91 380.5,-839.91 380.5,-858.41 380.5,-858.41 380.5,-864.41 374.5,-870.41 368.5,-870.41"/>
<text xml:space="preserve" text-anchor="middle" x="347" y="-853.11" font-family="Helvetica,sans-Serif" font-size="14.00">nginx</text>
<text xml:space="preserve" text-anchor="middle" x="347" y="-835.86" font-family="Helvetica,sans-Serif" font-size="14.00">port 80</text>
</g>
<!-- browser&#45;&gt;nginx -->
<g id="edge1" class="edge">
<title>browser&#45;&gt;nginx</title>
<path fill="none" stroke="black" d="M347,-955.4C347,-955.4 347,-882.41 347,-882.41"/>
<polygon fill="black" stroke="black" points="350.5,-882.41 347,-872.41 343.5,-882.41 350.5,-882.41"/>
<text xml:space="preserve" text-anchor="middle" x="359.75" y="-917.16" font-family="Helvetica,sans-Serif" font-size="10.00">HTTP</text>
</g>
<!-- django -->
<g id="node3" class="node">
<title>django</title>
<path fill="none" stroke="black" d="M128.75,-754.41C128.75,-754.41 39.25,-754.41 39.25,-754.41 33.25,-754.41 27.25,-748.41 27.25,-742.41 27.25,-742.41 27.25,-706.66 27.25,-706.66 27.25,-700.66 33.25,-694.66 39.25,-694.66 39.25,-694.66 128.75,-694.66 128.75,-694.66 134.75,-694.66 140.75,-700.66 140.75,-706.66 140.75,-706.66 140.75,-742.41 140.75,-742.41 140.75,-748.41 134.75,-754.41 128.75,-754.41"/>
<text xml:space="preserve" text-anchor="middle" x="84" y="-737.11" font-family="Helvetica,sans-Serif" font-size="14.00">Django Admin</text>
<text xml:space="preserve" text-anchor="middle" x="84" y="-719.86" font-family="Helvetica,sans-Serif" font-size="14.00">/admin</text>
<text xml:space="preserve" text-anchor="middle" x="84" y="-702.61" font-family="Helvetica,sans-Serif" font-size="14.00">port 8701</text>
</g>
<!-- nginx&#45;&gt;django -->
<g id="edge2" class="edge">
<title>nginx&#45;&gt;django</title>
<path fill="none" stroke="black" d="M313.16,-856C242.12,-856 84,-856 84,-856 84,-856 84,-766.21 84,-766.21"/>
<polygon fill="black" stroke="black" points="87.5,-766.21 84,-756.21 80.5,-766.21 87.5,-766.21"/>
<text xml:space="preserve" text-anchor="middle" x="136.81" y="-859.25" font-family="Helvetica,sans-Serif" font-size="10.00">/admin</text>
</g>
<!-- fastapi -->
<g id="node4" class="node">
<title>fastapi</title>
<path fill="none" stroke="black" d="M281.25,-623.91C281.25,-623.91 200.75,-623.91 200.75,-623.91 194.75,-623.91 188.75,-617.91 188.75,-611.91 188.75,-611.91 188.75,-576.16 188.75,-576.16 188.75,-570.16 194.75,-564.16 200.75,-564.16 200.75,-564.16 281.25,-564.16 281.25,-564.16 287.25,-564.16 293.25,-570.16 293.25,-576.16 293.25,-576.16 293.25,-611.91 293.25,-611.91 293.25,-617.91 287.25,-623.91 281.25,-623.91"/>
<text xml:space="preserve" text-anchor="middle" x="241" y="-606.61" font-family="Helvetica,sans-Serif" font-size="14.00">GraphQL API</text>
<text xml:space="preserve" text-anchor="middle" x="241" y="-589.36" font-family="Helvetica,sans-Serif" font-size="14.00">/graphql</text>
<text xml:space="preserve" text-anchor="middle" x="241" y="-572.11" font-family="Helvetica,sans-Serif" font-size="14.00">port 8702</text>
</g>
<!-- nginx&#45;&gt;fastapi -->
<g id="edge3" class="edge">
<title>nginx&#45;&gt;fastapi</title>
<path fill="none" stroke="black" d="M337.06,-827.84C337.06,-766.52 337.06,-594 337.06,-594 337.06,-594 305.04,-594 305.04,-594"/>
<polygon fill="black" stroke="black" points="305.04,-590.5 295.04,-594 305.04,-597.5 305.04,-590.5"/>
<text xml:space="preserve" text-anchor="middle" x="317.19" y="-698.16" font-family="Helvetica,sans-Serif" font-size="10.00">/graphql</text>
</g>
<!-- timeline -->
<g id="node5" class="node">
<title>timeline</title>
<path fill="none" stroke="black" d="M281,-754.41C281,-754.41 211,-754.41 211,-754.41 205,-754.41 199,-748.41 199,-742.41 199,-742.41 199,-706.66 199,-706.66 199,-700.66 205,-694.66 211,-694.66 211,-694.66 281,-694.66 281,-694.66 287,-694.66 293,-700.66 293,-706.66 293,-706.66 293,-742.41 293,-742.41 293,-748.41 287,-754.41 281,-754.41"/>
<text xml:space="preserve" text-anchor="middle" x="246" y="-737.11" font-family="Helvetica,sans-Serif" font-size="14.00">Timeline UI</text>
<text xml:space="preserve" text-anchor="middle" x="246" y="-719.86" font-family="Helvetica,sans-Serif" font-size="14.00">/</text>
<text xml:space="preserve" text-anchor="middle" x="246" y="-702.61" font-family="Helvetica,sans-Serif" font-size="14.00">port 5173</text>
</g>
<!-- nginx&#45;&gt;timeline -->
<g id="edge4" class="edge">
<title>nginx&#45;&gt;timeline</title>
<path fill="none" stroke="black" d="M313.34,-842C298.97,-842 285.44,-842 285.44,-842 285.44,-842 285.44,-766.3 285.44,-766.3"/>
<polygon fill="black" stroke="black" points="288.94,-766.3 285.44,-756.3 281.94,-766.3 288.94,-766.3"/>
<text xml:space="preserve" text-anchor="middle" x="283.94" y="-821.35" font-family="Helvetica,sans-Serif" font-size="10.00">/</text>
</g>
<!-- minio -->
<g id="node10" class="node">
<title>minio</title>
<polygon fill="none" stroke="black" points="486.38,-188.45 483.38,-192.45 462.38,-192.45 459.38,-188.45 343.62,-188.45 343.62,-128.7 486.38,-128.7 486.38,-188.45"/>
<text xml:space="preserve" text-anchor="middle" x="415" y="-171.15" font-family="Helvetica,sans-Serif" font-size="14.00">MinIO</text>
<text xml:space="preserve" text-anchor="middle" x="415" y="-153.9" font-family="Helvetica,sans-Serif" font-size="14.00">S3&#45;compatible API</text>
<text xml:space="preserve" text-anchor="middle" x="415" y="-136.65" font-family="Helvetica,sans-Serif" font-size="14.00">port 9000</text>
</g>
<!-- nginx&#45;&gt;minio -->
<g id="edge5" class="edge">
<title>nginx&#45;&gt;minio</title>
<path fill="none" stroke="black" d="M370.56,-827.73C370.56,-827.73 370.56,-200.13 370.56,-200.13"/>
<polygon fill="black" stroke="black" points="374.06,-200.13 370.56,-190.13 367.06,-200.13 374.06,-200.13"/>
<text xml:space="preserve" text-anchor="middle" x="391.56" y="-517.18" font-family="Helvetica,sans-Serif" font-size="10.00">/media/*</text>
</g>
<!-- postgres -->
<g id="node8" class="node">
<title>postgres</title>
<path fill="none" stroke="black" d="M111.75,-182.48C111.75,-185.42 90.35,-187.8 64,-187.8 37.65,-187.8 16.25,-185.42 16.25,-182.48 16.25,-182.48 16.25,-134.67 16.25,-134.67 16.25,-131.74 37.65,-129.36 64,-129.36 90.35,-129.36 111.75,-131.74 111.75,-134.67 111.75,-134.67 111.75,-182.48 111.75,-182.48"/>
<path fill="none" stroke="black" d="M111.75,-182.48C111.75,-179.55 90.35,-177.17 64,-177.17 37.65,-177.17 16.25,-179.55 16.25,-182.48"/>
<text xml:space="preserve" text-anchor="middle" x="64" y="-162.53" font-family="Helvetica,sans-Serif" font-size="14.00">PostgreSQL</text>
<text xml:space="preserve" text-anchor="middle" x="64" y="-145.28" font-family="Helvetica,sans-Serif" font-size="14.00">port 5436</text>
</g>
<!-- django&#45;&gt;postgres -->
<g id="edge7" class="edge">
<title>django&#45;&gt;postgres</title>
<path fill="none" stroke="black" d="M48.38,-694.5C48.38,-694.5 48.38,-199.71 48.38,-199.71"/>
<polygon fill="black" stroke="black" points="51.88,-199.71 48.38,-189.71 44.88,-199.71 51.88,-199.71"/>
</g>
<!-- grpc_server -->
<g id="node6" class="node">
<title>grpc_server</title>
<path fill="none" stroke="black" d="M301.5,-466.16C301.5,-466.16 222.5,-466.16 222.5,-466.16 216.5,-466.16 210.5,-460.16 210.5,-454.16 210.5,-454.16 210.5,-435.66 210.5,-435.66 210.5,-429.66 216.5,-423.66 222.5,-423.66 222.5,-423.66 301.5,-423.66 301.5,-423.66 307.5,-423.66 313.5,-429.66 313.5,-435.66 313.5,-435.66 313.5,-454.16 313.5,-454.16 313.5,-460.16 307.5,-466.16 301.5,-466.16"/>
<text xml:space="preserve" text-anchor="middle" x="262" y="-448.86" font-family="Helvetica,sans-Serif" font-size="14.00">gRPC Server</text>
<text xml:space="preserve" text-anchor="middle" x="262" y="-431.61" font-family="Helvetica,sans-Serif" font-size="14.00">port 50051</text>
</g>
<!-- fastapi&#45;&gt;grpc_server -->
<g id="edge9" class="edge">
<title>fastapi&#45;&gt;grpc_server</title>
<path fill="none" stroke="black" d="M251.88,-563.85C251.88,-563.85 251.88,-477.88 251.88,-477.88"/>
<polygon fill="black" stroke="black" points="255.38,-477.88 251.88,-467.88 248.38,-477.88 255.38,-477.88"/>
<text xml:space="preserve" text-anchor="middle" x="292" y="-525.66" font-family="Helvetica,sans-Serif" font-size="10.00">gRPC</text>
<text xml:space="preserve" text-anchor="middle" x="292" y="-512.91" font-family="Helvetica,sans-Serif" font-size="10.00">progress updates</text>
</g>
<!-- fastapi&#45;&gt;postgres -->
<g id="edge8" class="edge">
<title>fastapi&#45;&gt;postgres</title>
<path fill="none" stroke="black" d="M188.61,-594C138.18,-594 69.5,-594 69.5,-594 69.5,-594 69.5,-199.68 69.5,-199.68"/>
<polygon fill="black" stroke="black" points="73,-199.68 69.5,-189.68 66,-199.68 73,-199.68"/>
<text xml:space="preserve" text-anchor="middle" x="82.38" y="-385.16" font-family="Helvetica,sans-Serif" font-size="10.00">read/write jobs</text>
</g>
<!-- timeline&#45;&gt;fastapi -->
<g id="edge6" class="edge">
<title>timeline&#45;&gt;fastapi</title>
<path fill="none" stroke="black" d="M246,-694.26C246,-694.26 246,-635.65 246,-635.65"/>
<polygon fill="black" stroke="black" points="249.5,-635.65 246,-625.65 242.5,-635.65 249.5,-635.65"/>
<text xml:space="preserve" text-anchor="middle" x="264" y="-656.16" font-family="Helvetica,sans-Serif" font-size="10.00">GraphQL</text>
</g>
<!-- celery -->
<g id="node7" class="node">
<title>celery</title>
<path fill="none" stroke="black" d="M348.62,-352.91C348.62,-352.91 213.38,-352.91 213.38,-352.91 207.38,-352.91 201.38,-346.91 201.38,-340.91 201.38,-340.91 201.38,-322.41 201.38,-322.41 201.38,-316.41 207.38,-310.41 213.38,-310.41 213.38,-310.41 348.62,-310.41 348.62,-310.41 354.62,-310.41 360.62,-316.41 360.62,-322.41 360.62,-322.41 360.62,-340.91 360.62,-340.91 360.62,-346.91 354.62,-352.91 348.62,-352.91"/>
<text xml:space="preserve" text-anchor="middle" x="281" y="-335.61" font-family="Helvetica,sans-Serif" font-size="14.00">Celery Worker</text>
<text xml:space="preserve" text-anchor="middle" x="281" y="-318.36" font-family="Helvetica,sans-Serif" font-size="14.00">FFmpeg transcoding</text>
</g>
<!-- grpc_server&#45;&gt;celery -->
<g id="edge10" class="edge">
<title>grpc_server&#45;&gt;celery</title>
<path fill="none" stroke="black" d="M262,-423.34C262,-423.34 262,-364.66 262,-364.66"/>
<polygon fill="black" stroke="black" points="265.5,-364.66 262,-354.66 258.5,-364.66 265.5,-364.66"/>
<text xml:space="preserve" text-anchor="middle" x="305.25" y="-385.16" font-family="Helvetica,sans-Serif" font-size="10.00">dispatch tasks</text>
</g>
<!-- celery&#45;&gt;postgres -->
<g id="edge12" class="edge">
<title>celery&#45;&gt;postgres</title>
<path fill="none" stroke="black" d="M201.09,-332C148.99,-332 90.62,-332 90.62,-332 90.62,-332 90.62,-199.51 90.62,-199.51"/>
<polygon fill="black" stroke="black" points="94.13,-199.51 90.63,-189.51 87.13,-199.51 94.13,-199.51"/>
<text xml:space="preserve" text-anchor="middle" x="181.38" y="-259.16" font-family="Helvetica,sans-Serif" font-size="10.00">update job status</text>
</g>
<!-- redis -->
<g id="node9" class="node">
<title>redis</title>
<path fill="none" stroke="black" d="M278.12,-192.19C278.12,-196.31 253.87,-199.66 224,-199.66 194.13,-199.66 169.88,-196.31 169.88,-192.19 169.88,-192.19 169.88,-124.97 169.88,-124.97 169.88,-120.85 194.13,-117.5 224,-117.5 253.87,-117.5 278.12,-120.85 278.12,-124.97 278.12,-124.97 278.12,-192.19 278.12,-192.19"/>
<path fill="none" stroke="black" d="M278.12,-192.19C278.12,-188.07 253.87,-184.72 224,-184.72 194.13,-184.72 169.88,-188.07 169.88,-192.19"/>
<text xml:space="preserve" text-anchor="middle" x="224" y="-171.15" font-family="Helvetica,sans-Serif" font-size="14.00">Redis</text>
<text xml:space="preserve" text-anchor="middle" x="224" y="-153.9" font-family="Helvetica,sans-Serif" font-size="14.00">Celery queue</text>
<text xml:space="preserve" text-anchor="middle" x="224" y="-136.65" font-family="Helvetica,sans-Serif" font-size="14.00">port 6381</text>
</g>
<!-- celery&#45;&gt;redis -->
<g id="edge11" class="edge">
<title>celery&#45;&gt;redis</title>
<path fill="none" stroke="black" d="M239.75,-310.09C239.75,-310.09 239.75,-211.49 239.75,-211.49"/>
<polygon fill="black" stroke="black" points="243.25,-211.49 239.75,-201.49 236.25,-211.49 243.25,-211.49"/>
<text xml:space="preserve" text-anchor="middle" x="314" y="-259.16" font-family="Helvetica,sans-Serif" font-size="10.00">task queue</text>
</g>
<!-- celery&#45;&gt;minio -->
<g id="edge13" class="edge">
<title>celery&#45;&gt;minio</title>
<path fill="none" stroke="black" d="M352.12,-310.09C352.12,-310.09 352.12,-200.39 352.12,-200.39"/>
<polygon fill="black" stroke="black" points="355.63,-200.39 352.13,-190.39 348.63,-200.39 355.63,-200.39"/>
<text xml:space="preserve" text-anchor="middle" x="441.5" y="-271.91" font-family="Helvetica,sans-Serif" font-size="10.00">S3 API</text>
<text xml:space="preserve" text-anchor="middle" x="441.5" y="-259.16" font-family="Helvetica,sans-Serif" font-size="10.00">download input</text>
<text xml:space="preserve" text-anchor="middle" x="441.5" y="-246.41" font-family="Helvetica,sans-Serif" font-size="10.00">upload output</text>
</g>
<!-- bucket_in -->
<g id="node11" class="node">
<title>bucket_in</title>
<polygon fill="none" stroke="black" points="434.75,-58.5 327.25,-58.5 327.25,-16 440.75,-16 440.75,-52.5 434.75,-58.5"/>
<polyline fill="none" stroke="black" points="434.75,-58.5 434.75,-52.5"/>
<polyline fill="none" stroke="black" points="440.75,-52.5 434.75,-52.5"/>
<text xml:space="preserve" text-anchor="middle" x="384" y="-41.2" font-family="Helvetica,sans-Serif" font-size="14.00">mpr&#45;media&#45;in/</text>
<text xml:space="preserve" text-anchor="middle" x="384" y="-23.95" font-family="Helvetica,sans-Serif" font-size="14.00">input videos</text>
</g>
<!-- minio&#45;&gt;bucket_in -->
<g id="edge14" class="edge">
<title>minio&#45;&gt;bucket_in</title>
<path fill="none" stroke="black" stroke-dasharray="1,5" d="M392.19,-128.27C392.19,-106.66 392.19,-78.11 392.19,-58.79"/>
</g>
<!-- bucket_out -->
<g id="node12" class="node">
<title>bucket_out</title>
<polygon fill="none" stroke="black" points="637.12,-58.5 498.88,-58.5 498.88,-16 643.12,-16 643.12,-52.5 637.12,-58.5"/>
<polyline fill="none" stroke="black" points="637.12,-58.5 637.12,-52.5"/>
<polyline fill="none" stroke="black" points="643.12,-52.5 637.12,-52.5"/>
<text xml:space="preserve" text-anchor="middle" x="571" y="-41.2" font-family="Helvetica,sans-Serif" font-size="14.00">mpr&#45;media&#45;out/</text>
<text xml:space="preserve" text-anchor="middle" x="571" y="-23.95" font-family="Helvetica,sans-Serif" font-size="14.00">transcoded output</text>
</g>
<!-- minio&#45;&gt;bucket_out -->
<g id="edge15" class="edge">
<title>minio&#45;&gt;bucket_out</title>
<path fill="none" stroke="black" stroke-dasharray="1,5" d="M463.56,-128.21C463.56,-92.2 463.56,-37 463.56,-37 463.56,-37 479.15,-37 498.44,-37"/>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 18 KiB

View File

@@ -0,0 +1,85 @@
digraph aws_architecture {
rankdir=TB
node [shape=box, style=rounded, fontname="Helvetica"]
edge [fontname="Helvetica", fontsize=10]
labelloc="t"
label="MPR - AWS Architecture (Lambda + Step Functions)"
fontsize=16
fontname="Helvetica-Bold"
graph [splines=ortho, nodesep=0.8, ranksep=0.8]
// External
subgraph cluster_external {
label="External"
style=dashed
color=gray
browser [label="Browser\nmpr.mcrn.ar", shape=ellipse]
}
// Nginx reverse proxy
subgraph cluster_proxy {
label="Reverse Proxy"
style=filled
fillcolor="#e8f4f8"
nginx [label="nginx\nport 80"]
}
// Application layer
subgraph cluster_apps {
label="Application Layer"
style=filled
fillcolor="#f0f8e8"
django [label="Django Admin\n/admin\nport 8701"]
fastapi [label="GraphQL API\n/graphql\nport 8702"]
timeline [label="Timeline UI\n/\nport 5173"]
}
// Data layer (still local)
subgraph cluster_data {
label="Data Layer"
style=filled
fillcolor="#f8e8f0"
postgres [label="PostgreSQL\nport 5436", shape=cylinder]
}
// AWS layer
subgraph cluster_aws {
label="AWS Cloud"
style=filled
fillcolor="#fde8d0"
step_functions [label="Step Functions\nOrchestration\nstate machine"]
lambda [label="Lambda Function\nFFmpeg container\ntranscoding"]
s3 [label="S3 Buckets", shape=folder]
bucket_in [label="mpr-media-in/\ninput videos", shape=note]
bucket_out [label="mpr-media-out/\ntranscoded output", shape=note]
}
// Connections
browser -> nginx [label="HTTP"]
nginx -> django [xlabel="/admin"]
nginx -> fastapi [xlabel="/graphql"]
nginx -> timeline [xlabel="/"]
timeline -> fastapi [label="GraphQL"]
django -> postgres
fastapi -> postgres [label="read/write jobs"]
fastapi -> step_functions [label="boto3\nstart_execution()\nexecution_arn"]
step_functions -> lambda [label="invoke with\njob parameters"]
lambda -> s3 [label="download input\nupload output"]
lambda -> fastapi [label="POST /jobs/{id}/callback\nupdate status"]
fastapi -> postgres [label="callback updates\njob status"]
s3 -> bucket_in [style=dotted, arrowhead=none]
s3 -> bucket_out [style=dotted, arrowhead=none]
}

View File

@@ -0,0 +1,224 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 14.1.2 (0)
-->
<!-- Title: aws_architecture Pages: 1 -->
<svg width="639pt" height="1081pt"
viewBox="0.00 0.00 639.00 1081.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 1077.35)">
<title>aws_architecture</title>
<polygon fill="white" stroke="none" points="-4,4 -4,-1077.35 635.25,-1077.35 635.25,4 -4,4"/>
<text xml:space="preserve" text-anchor="middle" x="315.62" y="-1054.15" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">MPR &#45; AWS Architecture (Lambda + Step Functions)</text>
<g id="clust1" class="cluster">
<title>cluster_external</title>
<polygon fill="none" stroke="gray" stroke-dasharray="5,2" points="155,-934.25 155,-1037.85 315,-1037.85 315,-934.25 155,-934.25"/>
<text xml:space="preserve" text-anchor="middle" x="235" y="-1018.65" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">External</text>
</g>
<g id="clust2" class="cluster">
<title>cluster_proxy</title>
<polygon fill="#e8f4f8" stroke="black" points="162,-806.5 162,-892.5 308,-892.5 308,-806.5 162,-806.5"/>
<text xml:space="preserve" text-anchor="middle" x="235" y="-873.3" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">Reverse Proxy</text>
</g>
<g id="clust3" class="cluster">
<title>cluster_apps</title>
<polygon fill="#f0f8e8" stroke="black" points="8,-542.75 8,-776.5 290,-776.5 290,-542.75 8,-542.75"/>
<text xml:space="preserve" text-anchor="middle" x="149" y="-757.3" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">Application Layer</text>
</g>
<g id="clust4" class="cluster">
<title>cluster_data</title>
<polygon fill="#f8e8f0" stroke="black" points="27,-372.91 27,-474.84 141,-474.84 141,-372.91 27,-372.91"/>
<text xml:space="preserve" text-anchor="middle" x="84" y="-455.64" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">Data Layer</text>
</g>
<g id="clust5" class="cluster">
<title>cluster_aws</title>
<polygon fill="#fde8d0" stroke="black" points="264,-8 264,-475.5 596,-475.5 596,-8 264,-8"/>
<text xml:space="preserve" text-anchor="middle" x="430" y="-456.3" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">AWS Cloud</text>
</g>
<!-- browser -->
<g id="node1" class="node">
<title>browser</title>
<ellipse fill="none" stroke="black" cx="235" cy="-972.3" rx="71.77" ry="30.05"/>
<text xml:space="preserve" text-anchor="middle" x="235" y="-976.25" font-family="Helvetica,sans-Serif" font-size="14.00">Browser</text>
<text xml:space="preserve" text-anchor="middle" x="235" y="-959" font-family="Helvetica,sans-Serif" font-size="14.00">mpr.mcrn.ar</text>
</g>
<!-- nginx -->
<g id="node2" class="node">
<title>nginx</title>
<path fill="none" stroke="black" d="M256.5,-857C256.5,-857 213.5,-857 213.5,-857 207.5,-857 201.5,-851 201.5,-845 201.5,-845 201.5,-826.5 201.5,-826.5 201.5,-820.5 207.5,-814.5 213.5,-814.5 213.5,-814.5 256.5,-814.5 256.5,-814.5 262.5,-814.5 268.5,-820.5 268.5,-826.5 268.5,-826.5 268.5,-845 268.5,-845 268.5,-851 262.5,-857 256.5,-857"/>
<text xml:space="preserve" text-anchor="middle" x="235" y="-839.7" font-family="Helvetica,sans-Serif" font-size="14.00">nginx</text>
<text xml:space="preserve" text-anchor="middle" x="235" y="-822.45" font-family="Helvetica,sans-Serif" font-size="14.00">port 80</text>
</g>
<!-- browser&#45;&gt;nginx -->
<g id="edge1" class="edge">
<title>browser&#45;&gt;nginx</title>
<path fill="none" stroke="black" d="M235,-942C235,-942 235,-869 235,-869"/>
<polygon fill="black" stroke="black" points="238.5,-869 235,-859 231.5,-869 238.5,-869"/>
<text xml:space="preserve" text-anchor="middle" x="247.75" y="-903.75" font-family="Helvetica,sans-Serif" font-size="10.00">HTTP</text>
</g>
<!-- django -->
<g id="node3" class="node">
<title>django</title>
<path fill="none" stroke="black" d="M117.75,-741C117.75,-741 28.25,-741 28.25,-741 22.25,-741 16.25,-735 16.25,-729 16.25,-729 16.25,-693.25 16.25,-693.25 16.25,-687.25 22.25,-681.25 28.25,-681.25 28.25,-681.25 117.75,-681.25 117.75,-681.25 123.75,-681.25 129.75,-687.25 129.75,-693.25 129.75,-693.25 129.75,-729 129.75,-729 129.75,-735 123.75,-741 117.75,-741"/>
<text xml:space="preserve" text-anchor="middle" x="73" y="-723.7" font-family="Helvetica,sans-Serif" font-size="14.00">Django Admin</text>
<text xml:space="preserve" text-anchor="middle" x="73" y="-706.45" font-family="Helvetica,sans-Serif" font-size="14.00">/admin</text>
<text xml:space="preserve" text-anchor="middle" x="73" y="-689.2" font-family="Helvetica,sans-Serif" font-size="14.00">port 8701</text>
</g>
<!-- nginx&#45;&gt;django -->
<g id="edge2" class="edge">
<title>nginx&#45;&gt;django</title>
<path fill="none" stroke="black" d="M201.04,-843C153.54,-843 73,-843 73,-843 73,-843 73,-752.89 73,-752.89"/>
<polygon fill="black" stroke="black" points="76.5,-752.89 73,-742.89 69.5,-752.89 76.5,-752.89"/>
<text xml:space="preserve" text-anchor="middle" x="75.09" y="-846.25" font-family="Helvetica,sans-Serif" font-size="10.00">/admin</text>
</g>
<!-- fastapi -->
<g id="node4" class="node">
<title>fastapi</title>
<path fill="none" stroke="black" d="M270.25,-610.5C270.25,-610.5 189.75,-610.5 189.75,-610.5 183.75,-610.5 177.75,-604.5 177.75,-598.5 177.75,-598.5 177.75,-562.75 177.75,-562.75 177.75,-556.75 183.75,-550.75 189.75,-550.75 189.75,-550.75 270.25,-550.75 270.25,-550.75 276.25,-550.75 282.25,-556.75 282.25,-562.75 282.25,-562.75 282.25,-598.5 282.25,-598.5 282.25,-604.5 276.25,-610.5 270.25,-610.5"/>
<text xml:space="preserve" text-anchor="middle" x="230" y="-593.2" font-family="Helvetica,sans-Serif" font-size="14.00">GraphQL API</text>
<text xml:space="preserve" text-anchor="middle" x="230" y="-575.95" font-family="Helvetica,sans-Serif" font-size="14.00">/graphql</text>
<text xml:space="preserve" text-anchor="middle" x="230" y="-558.7" font-family="Helvetica,sans-Serif" font-size="14.00">port 8702</text>
</g>
<!-- nginx&#45;&gt;fastapi -->
<g id="edge3" class="edge">
<title>nginx&#45;&gt;fastapi</title>
<path fill="none" stroke="black" d="M201.11,-829C191.15,-829 182.88,-829 182.88,-829 182.88,-829 182.88,-622.1 182.88,-622.1"/>
<polygon fill="black" stroke="black" points="186.38,-622.1 182.88,-612.1 179.38,-622.1 186.38,-622.1"/>
<text xml:space="preserve" text-anchor="middle" x="163" y="-737.91" font-family="Helvetica,sans-Serif" font-size="10.00">/graphql</text>
</g>
<!-- timeline -->
<g id="node5" class="node">
<title>timeline</title>
<path fill="none" stroke="black" d="M270,-741C270,-741 200,-741 200,-741 194,-741 188,-735 188,-729 188,-729 188,-693.25 188,-693.25 188,-687.25 194,-681.25 200,-681.25 200,-681.25 270,-681.25 270,-681.25 276,-681.25 282,-687.25 282,-693.25 282,-693.25 282,-729 282,-729 282,-735 276,-741 270,-741"/>
<text xml:space="preserve" text-anchor="middle" x="235" y="-723.7" font-family="Helvetica,sans-Serif" font-size="14.00">Timeline UI</text>
<text xml:space="preserve" text-anchor="middle" x="235" y="-706.45" font-family="Helvetica,sans-Serif" font-size="14.00">/</text>
<text xml:space="preserve" text-anchor="middle" x="235" y="-689.2" font-family="Helvetica,sans-Serif" font-size="14.00">port 5173</text>
</g>
<!-- nginx&#45;&gt;timeline -->
<g id="edge4" class="edge">
<title>nginx&#45;&gt;timeline</title>
<path fill="none" stroke="black" d="M235,-814.04C235,-814.04 235,-752.97 235,-752.97"/>
<polygon fill="black" stroke="black" points="238.5,-752.97 235,-742.97 231.5,-752.97 238.5,-752.97"/>
<text xml:space="preserve" text-anchor="middle" x="233.5" y="-786.75" font-family="Helvetica,sans-Serif" font-size="10.00">/</text>
</g>
<!-- postgres -->
<g id="node6" class="node">
<title>postgres</title>
<path fill="none" stroke="black" d="M131.75,-434.03C131.75,-436.96 110.35,-439.34 84,-439.34 57.65,-439.34 36.25,-436.96 36.25,-434.03 36.25,-434.03 36.25,-386.22 36.25,-386.22 36.25,-383.29 57.65,-380.91 84,-380.91 110.35,-380.91 131.75,-383.29 131.75,-386.22 131.75,-386.22 131.75,-434.03 131.75,-434.03"/>
<path fill="none" stroke="black" d="M131.75,-434.03C131.75,-431.1 110.35,-428.72 84,-428.72 57.65,-428.72 36.25,-431.1 36.25,-434.03"/>
<text xml:space="preserve" text-anchor="middle" x="84" y="-414.07" font-family="Helvetica,sans-Serif" font-size="14.00">PostgreSQL</text>
<text xml:space="preserve" text-anchor="middle" x="84" y="-396.82" font-family="Helvetica,sans-Serif" font-size="14.00">port 5436</text>
</g>
<!-- django&#45;&gt;postgres -->
<g id="edge6" class="edge">
<title>django&#45;&gt;postgres</title>
<path fill="none" stroke="black" d="M83,-680.89C83,-680.89 83,-450.97 83,-450.97"/>
<polygon fill="black" stroke="black" points="86.5,-450.97 83,-440.97 79.5,-450.97 86.5,-450.97"/>
</g>
<!-- fastapi&#45;&gt;postgres -->
<g id="edge7" class="edge">
<title>fastapi&#45;&gt;postgres</title>
<path fill="none" stroke="black" d="M201.38,-550.41C201.38,-503.88 201.38,-420 201.38,-420 201.38,-420 143.59,-420 143.59,-420"/>
<polygon fill="black" stroke="black" points="143.59,-416.5 133.59,-420 143.59,-423.5 143.59,-416.5"/>
<text xml:space="preserve" text-anchor="middle" x="266.38" y="-499.5" font-family="Helvetica,sans-Serif" font-size="10.00">read/write jobs</text>
</g>
<!-- fastapi&#45;&gt;postgres -->
<g id="edge12" class="edge">
<title>fastapi&#45;&gt;postgres</title>
<path fill="none" stroke="black" d="M225,-550.39C225,-498.97 225,-400 225,-400 225,-400 143.64,-400 143.64,-400"/>
<polygon fill="black" stroke="black" points="143.64,-396.5 133.64,-400 143.64,-403.5 143.64,-396.5"/>
<text xml:space="preserve" text-anchor="middle" x="125.25" y="-505.88" font-family="Helvetica,sans-Serif" font-size="10.00">callback updates</text>
<text xml:space="preserve" text-anchor="middle" x="125.25" y="-493.12" font-family="Helvetica,sans-Serif" font-size="10.00">job status</text>
</g>
<!-- step_functions -->
<g id="node7" class="node">
<title>step_functions</title>
<path fill="none" stroke="black" d="M384.38,-440C384.38,-440 289.62,-440 289.62,-440 283.62,-440 277.62,-434 277.62,-428 277.62,-428 277.62,-392.25 277.62,-392.25 277.62,-386.25 283.62,-380.25 289.62,-380.25 289.62,-380.25 384.38,-380.25 384.38,-380.25 390.38,-380.25 396.38,-386.25 396.38,-392.25 396.38,-392.25 396.38,-428 396.38,-428 396.38,-434 390.38,-440 384.38,-440"/>
<text xml:space="preserve" text-anchor="middle" x="337" y="-422.7" font-family="Helvetica,sans-Serif" font-size="14.00">Step Functions</text>
<text xml:space="preserve" text-anchor="middle" x="337" y="-405.45" font-family="Helvetica,sans-Serif" font-size="14.00">Orchestration</text>
<text xml:space="preserve" text-anchor="middle" x="337" y="-388.2" font-family="Helvetica,sans-Serif" font-size="14.00">state machine</text>
</g>
<!-- fastapi&#45;&gt;step_functions -->
<g id="edge8" class="edge">
<title>fastapi&#45;&gt;step_functions</title>
<path fill="none" stroke="black" d="M282.68,-581C289.69,-581 294.51,-581 294.51,-581 294.51,-581 294.51,-451.79 294.51,-451.79"/>
<polygon fill="black" stroke="black" points="298.01,-451.79 294.51,-441.79 291.01,-451.79 298.01,-451.79"/>
<text xml:space="preserve" text-anchor="middle" x="407.25" y="-512.25" font-family="Helvetica,sans-Serif" font-size="10.00">boto3</text>
<text xml:space="preserve" text-anchor="middle" x="407.25" y="-499.5" font-family="Helvetica,sans-Serif" font-size="10.00">start_execution()</text>
<text xml:space="preserve" text-anchor="middle" x="407.25" y="-486.75" font-family="Helvetica,sans-Serif" font-size="10.00">execution_arn</text>
</g>
<!-- timeline&#45;&gt;fastapi -->
<g id="edge5" class="edge">
<title>timeline&#45;&gt;fastapi</title>
<path fill="none" stroke="black" d="M235,-680.86C235,-680.86 235,-622.24 235,-622.24"/>
<polygon fill="black" stroke="black" points="238.5,-622.24 235,-612.24 231.5,-622.24 238.5,-622.24"/>
<text xml:space="preserve" text-anchor="middle" x="253" y="-642.75" font-family="Helvetica,sans-Serif" font-size="10.00">GraphQL</text>
</g>
<!-- lambda -->
<g id="node8" class="node">
<title>lambda</title>
<path fill="none" stroke="black" d="M486,-296.75C486,-296.75 368,-296.75 368,-296.75 362,-296.75 356,-290.75 356,-284.75 356,-284.75 356,-249 356,-249 356,-243 362,-237 368,-237 368,-237 486,-237 486,-237 492,-237 498,-243 498,-249 498,-249 498,-284.75 498,-284.75 498,-290.75 492,-296.75 486,-296.75"/>
<text xml:space="preserve" text-anchor="middle" x="427" y="-279.45" font-family="Helvetica,sans-Serif" font-size="14.00">Lambda Function</text>
<text xml:space="preserve" text-anchor="middle" x="427" y="-262.2" font-family="Helvetica,sans-Serif" font-size="14.00">FFmpeg container</text>
<text xml:space="preserve" text-anchor="middle" x="427" y="-244.95" font-family="Helvetica,sans-Serif" font-size="14.00">transcoding</text>
</g>
<!-- step_functions&#45;&gt;lambda -->
<g id="edge9" class="edge">
<title>step_functions&#45;&gt;lambda</title>
<path fill="none" stroke="black" d="M376.19,-380.1C376.19,-380.1 376.19,-308.38 376.19,-308.38"/>
<polygon fill="black" stroke="black" points="379.69,-308.38 376.19,-298.38 372.69,-308.38 379.69,-308.38"/>
<text xml:space="preserve" text-anchor="middle" x="407.12" y="-341.75" font-family="Helvetica,sans-Serif" font-size="10.00">invoke with</text>
<text xml:space="preserve" text-anchor="middle" x="407.12" y="-329" font-family="Helvetica,sans-Serif" font-size="10.00">job parameters</text>
</g>
<!-- lambda&#45;&gt;fastapi -->
<g id="edge11" class="edge">
<title>lambda&#45;&gt;fastapi</title>
<path fill="none" stroke="black" d="M355.73,-267C306.05,-267 248.62,-267 248.62,-267 248.62,-267 248.62,-538.75 248.62,-538.75"/>
<polygon fill="black" stroke="black" points="245.13,-538.75 248.63,-548.75 252.13,-538.75 245.13,-538.75"/>
<text xml:space="preserve" text-anchor="middle" x="571.62" y="-413.38" font-family="Helvetica,sans-Serif" font-size="10.00">POST /jobs/{id}/callback</text>
<text xml:space="preserve" text-anchor="middle" x="571.62" y="-400.62" font-family="Helvetica,sans-Serif" font-size="10.00">update status</text>
</g>
<!-- s3 -->
<g id="node9" class="node">
<title>s3</title>
<polygon fill="none" stroke="black" points="473.62,-153.5 470.62,-157.5 449.62,-157.5 446.62,-153.5 380.38,-153.5 380.38,-117.5 473.62,-117.5 473.62,-153.5"/>
<text xml:space="preserve" text-anchor="middle" x="427" y="-130.82" font-family="Helvetica,sans-Serif" font-size="14.00">S3 Buckets</text>
</g>
<!-- lambda&#45;&gt;s3 -->
<g id="edge10" class="edge">
<title>lambda&#45;&gt;s3</title>
<path fill="none" stroke="black" d="M427,-236.73C427,-236.73 427,-165.27 427,-165.27"/>
<polygon fill="black" stroke="black" points="430.5,-165.27 427,-155.27 423.5,-165.27 430.5,-165.27"/>
<text xml:space="preserve" text-anchor="middle" x="464.5" y="-198.5" font-family="Helvetica,sans-Serif" font-size="10.00">download input</text>
<text xml:space="preserve" text-anchor="middle" x="464.5" y="-185.75" font-family="Helvetica,sans-Serif" font-size="10.00">upload output</text>
</g>
<!-- bucket_in -->
<g id="node10" class="node">
<title>bucket_in</title>
<polygon fill="none" stroke="black" points="379.75,-58.5 272.25,-58.5 272.25,-16 385.75,-16 385.75,-52.5 379.75,-58.5"/>
<polyline fill="none" stroke="black" points="379.75,-58.5 379.75,-52.5"/>
<polyline fill="none" stroke="black" points="385.75,-52.5 379.75,-52.5"/>
<text xml:space="preserve" text-anchor="middle" x="329" y="-41.2" font-family="Helvetica,sans-Serif" font-size="14.00">mpr&#45;media&#45;in/</text>
<text xml:space="preserve" text-anchor="middle" x="329" y="-23.95" font-family="Helvetica,sans-Serif" font-size="14.00">input videos</text>
</g>
<!-- s3&#45;&gt;bucket_in -->
<g id="edge13" class="edge">
<title>s3&#45;&gt;bucket_in</title>
<path fill="none" stroke="black" stroke-dasharray="1,5" d="M380.09,-136C373.1,-136 368.19,-136 368.19,-136 368.19,-136 368.19,-87.72 368.19,-58.68"/>
</g>
<!-- bucket_out -->
<g id="node11" class="node">
<title>bucket_out</title>
<polygon fill="none" stroke="black" points="582.12,-58.5 443.88,-58.5 443.88,-16 588.12,-16 588.12,-52.5 582.12,-58.5"/>
<polyline fill="none" stroke="black" points="582.12,-58.5 582.12,-52.5"/>
<polyline fill="none" stroke="black" points="588.12,-52.5 582.12,-52.5"/>
<text xml:space="preserve" text-anchor="middle" x="516" y="-41.2" font-family="Helvetica,sans-Serif" font-size="14.00">mpr&#45;media&#45;out/</text>
<text xml:space="preserve" text-anchor="middle" x="516" y="-23.95" font-family="Helvetica,sans-Serif" font-size="14.00">transcoded output</text>
</g>
<!-- s3&#45;&gt;bucket_out -->
<g id="edge14" class="edge">
<title>s3&#45;&gt;bucket_out</title>
<path fill="none" stroke="black" stroke-dasharray="1,5" d="M458.75,-117.02C458.75,-100.45 458.75,-76.15 458.75,-58.73"/>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 16 KiB

View File

@@ -0,0 +1,83 @@
digraph gcp_architecture {
rankdir=TB
node [shape=box, style=rounded, fontname="Helvetica"]
edge [fontname="Helvetica", fontsize=10]
labelloc="t"
label="MPR - GCP Architecture (Cloud Run Jobs + GCS)"
fontsize=16
fontname="Helvetica-Bold"
graph [splines=ortho, nodesep=0.8, ranksep=0.8]
// External
subgraph cluster_external {
label="External"
style=dashed
color=gray
browser [label="Browser\nmpr.mcrn.ar", shape=ellipse]
}
// Nginx reverse proxy
subgraph cluster_proxy {
label="Reverse Proxy"
style=filled
fillcolor="#e8f4f8"
nginx [label="nginx\nport 80"]
}
// Application layer
subgraph cluster_apps {
label="Application Layer"
style=filled
fillcolor="#f0f8e8"
django [label="Django Admin\n/admin\nport 8701"]
fastapi [label="GraphQL API\n/graphql\nport 8702"]
timeline [label="Timeline UI\n/\nport 5173"]
}
// Data layer (still local)
subgraph cluster_data {
label="Data Layer"
style=filled
fillcolor="#f8e8f0"
postgres [label="PostgreSQL\nport 5436", shape=cylinder]
}
// GCP layer
subgraph cluster_gcp {
label="Google Cloud"
style=filled
fillcolor="#e8f0fd"
cloud_run_job [label="Cloud Run Job\nFFmpeg container\ntranscoding"]
gcs [label="GCS Buckets\n(S3-compat API)", shape=folder]
bucket_in [label="mpr-media-in/\ninput videos", shape=note]
bucket_out [label="mpr-media-out/\ntranscoded output", shape=note]
}
// Connections
browser -> nginx [label="HTTP"]
nginx -> django [xlabel="/admin"]
nginx -> fastapi [xlabel="/graphql"]
nginx -> timeline [xlabel="/"]
timeline -> fastapi [label="GraphQL"]
django -> postgres
fastapi -> postgres [label="read/write jobs"]
fastapi -> cloud_run_job [label="google-cloud-run\nrun_job() + payload\nexecution_name"]
cloud_run_job -> gcs [label="S3 compat (HMAC)\ndownload input\nupload output"]
cloud_run_job -> fastapi [label="POST /jobs/{id}/callback\nupdate status"]
fastapi -> postgres [label="callback updates\njob status"]
gcs -> bucket_in [style=dotted, arrowhead=none]
gcs -> bucket_out [style=dotted, arrowhead=none]
}

View File

@@ -0,0 +1,210 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<!-- Generated by graphviz version 14.1.2 (0)
-->
<!-- Title: gcp_architecture Pages: 1 -->
<svg width="653pt" height="957pt"
viewBox="0.00 0.00 653.00 957.00" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="graph0" class="graph" transform="scale(1 1) rotate(0) translate(4 953.35)">
<title>gcp_architecture</title>
<polygon fill="white" stroke="none" points="-4,4 -4,-953.35 649.25,-953.35 649.25,4 -4,4"/>
<text xml:space="preserve" text-anchor="middle" x="322.62" y="-930.15" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">MPR &#45; GCP Architecture (Cloud Run Jobs + GCS)</text>
<g id="clust1" class="cluster">
<title>cluster_external</title>
<polygon fill="none" stroke="gray" stroke-dasharray="5,2" points="155,-810.25 155,-913.85 315,-913.85 315,-810.25 155,-810.25"/>
<text xml:space="preserve" text-anchor="middle" x="235" y="-894.65" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">External</text>
</g>
<g id="clust2" class="cluster">
<title>cluster_proxy</title>
<polygon fill="#e8f4f8" stroke="black" points="162,-682.5 162,-768.5 308,-768.5 308,-682.5 162,-682.5"/>
<text xml:space="preserve" text-anchor="middle" x="235" y="-749.3" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">Reverse Proxy</text>
</g>
<g id="clust3" class="cluster">
<title>cluster_apps</title>
<polygon fill="#f0f8e8" stroke="black" points="8,-418.75 8,-652.5 290,-652.5 290,-418.75 8,-418.75"/>
<text xml:space="preserve" text-anchor="middle" x="149" y="-633.3" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">Application Layer</text>
</g>
<g id="clust4" class="cluster">
<title>cluster_data</title>
<polygon fill="#f8e8f0" stroke="black" points="27,-248.91 27,-350.84 141,-350.84 141,-248.91 27,-248.91"/>
<text xml:space="preserve" text-anchor="middle" x="84" y="-331.64" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">Data Layer</text>
</g>
<g id="clust5" class="cluster">
<title>cluster_gcp</title>
<polygon fill="#e8f0fd" stroke="black" points="299,-8 299,-351.5 631,-351.5 631,-8 299,-8"/>
<text xml:space="preserve" text-anchor="middle" x="465" y="-332.3" font-family="Helvetica,sans-Serif" font-weight="bold" font-size="16.00">Google Cloud</text>
</g>
<!-- browser -->
<g id="node1" class="node">
<title>browser</title>
<ellipse fill="none" stroke="black" cx="235" cy="-848.3" rx="71.77" ry="30.05"/>
<text xml:space="preserve" text-anchor="middle" x="235" y="-852.25" font-family="Helvetica,sans-Serif" font-size="14.00">Browser</text>
<text xml:space="preserve" text-anchor="middle" x="235" y="-835" font-family="Helvetica,sans-Serif" font-size="14.00">mpr.mcrn.ar</text>
</g>
<!-- nginx -->
<g id="node2" class="node">
<title>nginx</title>
<path fill="none" stroke="black" d="M256.5,-733C256.5,-733 213.5,-733 213.5,-733 207.5,-733 201.5,-727 201.5,-721 201.5,-721 201.5,-702.5 201.5,-702.5 201.5,-696.5 207.5,-690.5 213.5,-690.5 213.5,-690.5 256.5,-690.5 256.5,-690.5 262.5,-690.5 268.5,-696.5 268.5,-702.5 268.5,-702.5 268.5,-721 268.5,-721 268.5,-727 262.5,-733 256.5,-733"/>
<text xml:space="preserve" text-anchor="middle" x="235" y="-715.7" font-family="Helvetica,sans-Serif" font-size="14.00">nginx</text>
<text xml:space="preserve" text-anchor="middle" x="235" y="-698.45" font-family="Helvetica,sans-Serif" font-size="14.00">port 80</text>
</g>
<!-- browser&#45;&gt;nginx -->
<g id="edge1" class="edge">
<title>browser&#45;&gt;nginx</title>
<path fill="none" stroke="black" d="M235,-818C235,-818 235,-745 235,-745"/>
<polygon fill="black" stroke="black" points="238.5,-745 235,-735 231.5,-745 238.5,-745"/>
<text xml:space="preserve" text-anchor="middle" x="247.75" y="-779.75" font-family="Helvetica,sans-Serif" font-size="10.00">HTTP</text>
</g>
<!-- django -->
<g id="node3" class="node">
<title>django</title>
<path fill="none" stroke="black" d="M117.75,-617C117.75,-617 28.25,-617 28.25,-617 22.25,-617 16.25,-611 16.25,-605 16.25,-605 16.25,-569.25 16.25,-569.25 16.25,-563.25 22.25,-557.25 28.25,-557.25 28.25,-557.25 117.75,-557.25 117.75,-557.25 123.75,-557.25 129.75,-563.25 129.75,-569.25 129.75,-569.25 129.75,-605 129.75,-605 129.75,-611 123.75,-617 117.75,-617"/>
<text xml:space="preserve" text-anchor="middle" x="73" y="-599.7" font-family="Helvetica,sans-Serif" font-size="14.00">Django Admin</text>
<text xml:space="preserve" text-anchor="middle" x="73" y="-582.45" font-family="Helvetica,sans-Serif" font-size="14.00">/admin</text>
<text xml:space="preserve" text-anchor="middle" x="73" y="-565.2" font-family="Helvetica,sans-Serif" font-size="14.00">port 8701</text>
</g>
<!-- nginx&#45;&gt;django -->
<g id="edge2" class="edge">
<title>nginx&#45;&gt;django</title>
<path fill="none" stroke="black" d="M201.04,-719C153.54,-719 73,-719 73,-719 73,-719 73,-628.89 73,-628.89"/>
<polygon fill="black" stroke="black" points="76.5,-628.89 73,-618.89 69.5,-628.89 76.5,-628.89"/>
<text xml:space="preserve" text-anchor="middle" x="75.09" y="-722.25" font-family="Helvetica,sans-Serif" font-size="10.00">/admin</text>
</g>
<!-- fastapi -->
<g id="node4" class="node">
<title>fastapi</title>
<path fill="none" stroke="black" d="M270.25,-486.5C270.25,-486.5 189.75,-486.5 189.75,-486.5 183.75,-486.5 177.75,-480.5 177.75,-474.5 177.75,-474.5 177.75,-438.75 177.75,-438.75 177.75,-432.75 183.75,-426.75 189.75,-426.75 189.75,-426.75 270.25,-426.75 270.25,-426.75 276.25,-426.75 282.25,-432.75 282.25,-438.75 282.25,-438.75 282.25,-474.5 282.25,-474.5 282.25,-480.5 276.25,-486.5 270.25,-486.5"/>
<text xml:space="preserve" text-anchor="middle" x="230" y="-469.2" font-family="Helvetica,sans-Serif" font-size="14.00">GraphQL API</text>
<text xml:space="preserve" text-anchor="middle" x="230" y="-451.95" font-family="Helvetica,sans-Serif" font-size="14.00">/graphql</text>
<text xml:space="preserve" text-anchor="middle" x="230" y="-434.7" font-family="Helvetica,sans-Serif" font-size="14.00">port 8702</text>
</g>
<!-- nginx&#45;&gt;fastapi -->
<g id="edge3" class="edge">
<title>nginx&#45;&gt;fastapi</title>
<path fill="none" stroke="black" d="M201.11,-705C191.15,-705 182.88,-705 182.88,-705 182.88,-705 182.88,-498.1 182.88,-498.1"/>
<polygon fill="black" stroke="black" points="186.38,-498.1 182.88,-488.1 179.38,-498.1 186.38,-498.1"/>
<text xml:space="preserve" text-anchor="middle" x="163" y="-613.91" font-family="Helvetica,sans-Serif" font-size="10.00">/graphql</text>
</g>
<!-- timeline -->
<g id="node5" class="node">
<title>timeline</title>
<path fill="none" stroke="black" d="M270,-617C270,-617 200,-617 200,-617 194,-617 188,-611 188,-605 188,-605 188,-569.25 188,-569.25 188,-563.25 194,-557.25 200,-557.25 200,-557.25 270,-557.25 270,-557.25 276,-557.25 282,-563.25 282,-569.25 282,-569.25 282,-605 282,-605 282,-611 276,-617 270,-617"/>
<text xml:space="preserve" text-anchor="middle" x="235" y="-599.7" font-family="Helvetica,sans-Serif" font-size="14.00">Timeline UI</text>
<text xml:space="preserve" text-anchor="middle" x="235" y="-582.45" font-family="Helvetica,sans-Serif" font-size="14.00">/</text>
<text xml:space="preserve" text-anchor="middle" x="235" y="-565.2" font-family="Helvetica,sans-Serif" font-size="14.00">port 5173</text>
</g>
<!-- nginx&#45;&gt;timeline -->
<g id="edge4" class="edge">
<title>nginx&#45;&gt;timeline</title>
<path fill="none" stroke="black" d="M235,-690.04C235,-690.04 235,-628.97 235,-628.97"/>
<polygon fill="black" stroke="black" points="238.5,-628.97 235,-618.97 231.5,-628.97 238.5,-628.97"/>
<text xml:space="preserve" text-anchor="middle" x="233.5" y="-662.75" font-family="Helvetica,sans-Serif" font-size="10.00">/</text>
</g>
<!-- postgres -->
<g id="node6" class="node">
<title>postgres</title>
<path fill="none" stroke="black" d="M131.75,-310.03C131.75,-312.96 110.35,-315.34 84,-315.34 57.65,-315.34 36.25,-312.96 36.25,-310.03 36.25,-310.03 36.25,-262.22 36.25,-262.22 36.25,-259.29 57.65,-256.91 84,-256.91 110.35,-256.91 131.75,-259.29 131.75,-262.22 131.75,-262.22 131.75,-310.03 131.75,-310.03"/>
<path fill="none" stroke="black" d="M131.75,-310.03C131.75,-307.1 110.35,-304.72 84,-304.72 57.65,-304.72 36.25,-307.1 36.25,-310.03"/>
<text xml:space="preserve" text-anchor="middle" x="84" y="-290.07" font-family="Helvetica,sans-Serif" font-size="14.00">PostgreSQL</text>
<text xml:space="preserve" text-anchor="middle" x="84" y="-272.82" font-family="Helvetica,sans-Serif" font-size="14.00">port 5436</text>
</g>
<!-- django&#45;&gt;postgres -->
<g id="edge6" class="edge">
<title>django&#45;&gt;postgres</title>
<path fill="none" stroke="black" d="M59.62,-556.89C59.62,-556.89 59.62,-326.97 59.62,-326.97"/>
<polygon fill="black" stroke="black" points="63.13,-326.97 59.63,-316.97 56.13,-326.97 63.13,-326.97"/>
</g>
<!-- fastapi&#45;&gt;postgres -->
<g id="edge7" class="edge">
<title>fastapi&#45;&gt;postgres</title>
<path fill="none" stroke="black" d="M177.34,-467C135.16,-467 83,-467 83,-467 83,-467 83,-327.1 83,-327.1"/>
<polygon fill="black" stroke="black" points="86.5,-327.1 83,-317.1 79.5,-327.1 86.5,-327.1"/>
<text xml:space="preserve" text-anchor="middle" x="266.38" y="-375.5" font-family="Helvetica,sans-Serif" font-size="10.00">read/write jobs</text>
</g>
<!-- fastapi&#45;&gt;postgres -->
<g id="edge11" class="edge">
<title>fastapi&#45;&gt;postgres</title>
<path fill="none" stroke="black" d="M177.57,-447C143.88,-447 106.38,-447 106.38,-447 106.38,-447 106.38,-327.15 106.38,-327.15"/>
<polygon fill="black" stroke="black" points="109.88,-327.15 106.38,-317.15 102.88,-327.15 109.88,-327.15"/>
<text xml:space="preserve" text-anchor="middle" x="125.25" y="-381.88" font-family="Helvetica,sans-Serif" font-size="10.00">callback updates</text>
<text xml:space="preserve" text-anchor="middle" x="125.25" y="-369.12" font-family="Helvetica,sans-Serif" font-size="10.00">job status</text>
</g>
<!-- cloud_run_job -->
<g id="node7" class="node">
<title>cloud_run_job</title>
<path fill="none" stroke="black" d="M505,-316C505,-316 387,-316 387,-316 381,-316 375,-310 375,-304 375,-304 375,-268.25 375,-268.25 375,-262.25 381,-256.25 387,-256.25 387,-256.25 505,-256.25 505,-256.25 511,-256.25 517,-262.25 517,-268.25 517,-268.25 517,-304 517,-304 517,-310 511,-316 505,-316"/>
<text xml:space="preserve" text-anchor="middle" x="446" y="-298.7" font-family="Helvetica,sans-Serif" font-size="14.00">Cloud Run Job</text>
<text xml:space="preserve" text-anchor="middle" x="446" y="-281.45" font-family="Helvetica,sans-Serif" font-size="14.00">FFmpeg container</text>
<text xml:space="preserve" text-anchor="middle" x="446" y="-264.2" font-family="Helvetica,sans-Serif" font-size="14.00">transcoding</text>
</g>
<!-- fastapi&#45;&gt;cloud_run_job -->
<g id="edge8" class="edge">
<title>fastapi&#45;&gt;cloud_run_job</title>
<path fill="none" stroke="black" d="M247.42,-426.41C247.42,-379.88 247.42,-296 247.42,-296 247.42,-296 363.07,-296 363.07,-296"/>
<polygon fill="black" stroke="black" points="363.07,-299.5 373.07,-296 363.07,-292.5 363.07,-299.5"/>
<text xml:space="preserve" text-anchor="middle" x="414.38" y="-388.25" font-family="Helvetica,sans-Serif" font-size="10.00">google&#45;cloud&#45;run</text>
<text xml:space="preserve" text-anchor="middle" x="414.38" y="-375.5" font-family="Helvetica,sans-Serif" font-size="10.00">run_job() + payload</text>
<text xml:space="preserve" text-anchor="middle" x="414.38" y="-362.75" font-family="Helvetica,sans-Serif" font-size="10.00">execution_name</text>
</g>
<!-- timeline&#45;&gt;fastapi -->
<g id="edge5" class="edge">
<title>timeline&#45;&gt;fastapi</title>
<path fill="none" stroke="black" d="M235,-556.86C235,-556.86 235,-498.24 235,-498.24"/>
<polygon fill="black" stroke="black" points="238.5,-498.24 235,-488.24 231.5,-498.24 238.5,-498.24"/>
<text xml:space="preserve" text-anchor="middle" x="253" y="-518.75" font-family="Helvetica,sans-Serif" font-size="10.00">GraphQL</text>
</g>
<!-- cloud_run_job&#45;&gt;fastapi -->
<g id="edge10" class="edge">
<title>cloud_run_job&#45;&gt;fastapi</title>
<path fill="none" stroke="black" d="M374.7,-276C306.06,-276 212.58,-276 212.58,-276 212.58,-276 212.58,-414.88 212.58,-414.88"/>
<polygon fill="black" stroke="black" points="209.08,-414.88 212.58,-424.88 216.08,-414.88 209.08,-414.88"/>
<text xml:space="preserve" text-anchor="middle" x="585.62" y="-381.88" font-family="Helvetica,sans-Serif" font-size="10.00">POST /jobs/{id}/callback</text>
<text xml:space="preserve" text-anchor="middle" x="585.62" y="-369.12" font-family="Helvetica,sans-Serif" font-size="10.00">update status</text>
</g>
<!-- gcs -->
<g id="node8" class="node">
<title>gcs</title>
<polygon fill="none" stroke="black" points="510.25,-160 507.25,-164 486.25,-164 483.25,-160 381.75,-160 381.75,-117.5 510.25,-117.5 510.25,-160"/>
<text xml:space="preserve" text-anchor="middle" x="446" y="-142.7" font-family="Helvetica,sans-Serif" font-size="14.00">GCS Buckets</text>
<text xml:space="preserve" text-anchor="middle" x="446" y="-125.45" font-family="Helvetica,sans-Serif" font-size="14.00">(S3&#45;compat API)</text>
</g>
<!-- cloud_run_job&#45;&gt;gcs -->
<g id="edge9" class="edge">
<title>cloud_run_job&#45;&gt;gcs</title>
<path fill="none" stroke="black" d="M446,-255.95C446,-255.95 446,-171.81 446,-171.81"/>
<polygon fill="black" stroke="black" points="449.5,-171.81 446,-161.81 442.5,-171.81 449.5,-171.81"/>
<text xml:space="preserve" text-anchor="middle" x="492.12" y="-217.75" font-family="Helvetica,sans-Serif" font-size="10.00">S3 compat (HMAC)</text>
<text xml:space="preserve" text-anchor="middle" x="492.12" y="-205" font-family="Helvetica,sans-Serif" font-size="10.00">download input</text>
<text xml:space="preserve" text-anchor="middle" x="492.12" y="-192.25" font-family="Helvetica,sans-Serif" font-size="10.00">upload output</text>
</g>
<!-- bucket_in -->
<g id="node9" class="node">
<title>bucket_in</title>
<polygon fill="none" stroke="black" points="414.75,-58.5 307.25,-58.5 307.25,-16 420.75,-16 420.75,-52.5 414.75,-58.5"/>
<polyline fill="none" stroke="black" points="414.75,-58.5 414.75,-52.5"/>
<polyline fill="none" stroke="black" points="420.75,-52.5 414.75,-52.5"/>
<text xml:space="preserve" text-anchor="middle" x="364" y="-41.2" font-family="Helvetica,sans-Serif" font-size="14.00">mpr&#45;media&#45;in/</text>
<text xml:space="preserve" text-anchor="middle" x="364" y="-23.95" font-family="Helvetica,sans-Serif" font-size="14.00">input videos</text>
</g>
<!-- gcs&#45;&gt;bucket_in -->
<g id="edge12" class="edge">
<title>gcs&#45;&gt;bucket_in</title>
<path fill="none" stroke="black" stroke-dasharray="1,5" d="M401.25,-117.22C401.25,-100 401.25,-75.96 401.25,-58.74"/>
</g>
<!-- bucket_out -->
<g id="node10" class="node">
<title>bucket_out</title>
<polygon fill="none" stroke="black" points="617.12,-58.5 478.88,-58.5 478.88,-16 623.12,-16 623.12,-52.5 617.12,-58.5"/>
<polyline fill="none" stroke="black" points="617.12,-58.5 617.12,-52.5"/>
<polyline fill="none" stroke="black" points="623.12,-52.5 617.12,-52.5"/>
<text xml:space="preserve" text-anchor="middle" x="551" y="-41.2" font-family="Helvetica,sans-Serif" font-size="14.00">mpr&#45;media&#45;out/</text>
<text xml:space="preserve" text-anchor="middle" x="551" y="-23.95" font-family="Helvetica,sans-Serif" font-size="14.00">transcoded output</text>
</g>
<!-- gcs&#45;&gt;bucket_out -->
<g id="edge13" class="edge">
<title>gcs&#45;&gt;bucket_out</title>
<path fill="none" stroke="black" stroke-dasharray="1,5" d="M494.56,-117.22C494.56,-100 494.56,-75.96 494.56,-58.74"/>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 15 KiB

View File

@@ -68,9 +68,50 @@ aws s3 cp video.mp4 s3://mpr-media-in/
aws s3 sync /local/media/ s3://mpr-media-in/ aws s3 sync /local/media/ s3://mpr-media-in/
``` ```
## GCP Production (GCS via S3 compatibility)
GCS exposes an S3-compatible API. The same `core/storage/s3.py` boto3 code works
with no changes — only the endpoint and credentials differ.
### GCS HMAC Keys
Generate under **Cloud Storage → Settings → Interoperability** in the GCP console.
These act as `AWS_ACCESS_KEY_ID` / `AWS_SECRET_ACCESS_KEY`.
### Configuration
```bash
S3_ENDPOINT_URL=https://storage.googleapis.com
S3_BUCKET_IN=mpr-media-in
S3_BUCKET_OUT=mpr-media-out
AWS_ACCESS_KEY_ID=<GCS HMAC access key>
AWS_SECRET_ACCESS_KEY=<GCS HMAC secret>
# Executor
MPR_EXECUTOR=gcp
GCP_PROJECT_ID=my-project
GCP_REGION=us-central1
CLOUD_RUN_JOB=mpr-transcode
CALLBACK_URL=https://mpr.mcrn.ar/api
CALLBACK_API_KEY=<secret>
```
### Upload Files to GCS
```bash
gcloud storage cp video.mp4 gs://mpr-media-in/
# Or with the aws CLI via compat endpoint
aws --endpoint-url https://storage.googleapis.com s3 cp video.mp4 s3://mpr-media-in/
```
### Cloud Run Job Handler
`core/task/gcp_handler.py` is the Cloud Run Job entrypoint. It reads the job payload
from `MPR_JOB_PAYLOAD` (injected by `GCPExecutor`), uses `core/storage` for all
GCS access (S3 compat), and POSTs the completion callback to the API.
Set the Cloud Run Job command to: `python -m core.task.gcp_handler`
## Storage Module ## Storage Module
`core/storage.py` provides all S3 operations: `core/storage/` package provides all S3 operations:
```python ```python
from core.storage import ( from core.storage import (
@@ -114,7 +155,14 @@ mutation { scanMediaFolder { found registered skipped files } }
4. Uploads result to `S3_BUCKET_OUT` 4. Uploads result to `S3_BUCKET_OUT`
5. Calls back to API with result 5. Calls back to API with result
Both paths use the same S3 buckets and key structure. ### Cloud Run Job Mode (GCP)
1. `GCPExecutor` triggers Cloud Run Job with payload in `MPR_JOB_PAYLOAD`
2. `core/task/gcp_handler.py` downloads source from `S3_BUCKET_IN` (GCS S3 compat)
3. Runs FFmpeg in container
4. Uploads result to `S3_BUCKET_OUT` (GCS S3 compat)
5. Calls back to API with result
All three paths use the same S3-compatible bucket names and key structure.
## Supported File Types ## Supported File Types

View File

@@ -17,16 +17,32 @@
<a href="#overview">System Overview</a> <a href="#overview">System Overview</a>
<a href="#data-model">Data Model</a> <a href="#data-model">Data Model</a>
<a href="#job-flow">Job Flow</a> <a href="#job-flow">Job Flow</a>
<a href="#media-storage">Media Storage</a>
</nav> </nav>
<h2 id="overview">System Overview</h2> <h2 id="overview">System Overview</h2>
<div class="diagram-container"> <div class="diagram-container">
<div class="diagram"> <div class="diagram">
<h3>Architecture</h3> <h3>Local Architecture (Development)</h3>
<object type="image/svg+xml" data="01-system-overview.svg"> <object type="image/svg+xml" data="01a-local-architecture.svg">
<img src="01-system-overview.svg" alt="System Overview" /> <img
src="01a-local-architecture.svg"
alt="Local Architecture"
/>
</object> </object>
<a href="01-system-overview.svg" target="_blank" <a href="01a-local-architecture.svg" target="_blank"
>Open full size</a
>
</div>
<div class="diagram">
<h3>AWS Architecture (Production)</h3>
<object type="image/svg+xml" data="01b-aws-architecture.svg">
<img
src="01b-aws-architecture.svg"
alt="AWS Architecture"
/>
</object>
<a href="01b-aws-architecture.svg" target="_blank"
>Open full size</a >Open full size</a
> >
</div> </div>
@@ -41,8 +57,7 @@
</li> </li>
<li> <li>
<span class="color-box" style="background: #f0f8e8"></span> <span class="color-box" style="background: #f0f8e8"></span>
Application Layer (Django Admin, FastAPI + GraphQL, Timeline Application Layer (Django Admin, GraphQL API, Timeline UI)
UI)
</li> </li>
<li> <li>
<span class="color-box" style="background: #fff8e8"></span> <span class="color-box" style="background: #fff8e8"></span>
@@ -141,24 +156,44 @@
</ul> </ul>
</div> </div>
<h2>API Interfaces</h2> <h2 id="media-storage">Media Storage</h2>
<pre><code># REST API <div class="diagram-container">
http://mpr.local.ar/api/docs - Swagger UI <p>
POST /api/assets/scan - Scan S3 bucket for media MPR separates media into input and output paths for flexible
POST /api/jobs/ - Create transcode job storage configuration.
POST /api/jobs/{id}/callback - Lambda completion callback </p>
<p>
<a href="04-media-storage.md" target="_blank"
>View Media Storage Documentation →</a
>
</p>
</div>
# GraphQL (GraphiQL) <h2>API (GraphQL)</h2>
http://mpr.local.ar/graphql - GraphiQL IDE <pre><code># GraphiQL IDE
query { assets { id filename } } http://mpr.local.ar/graphql
mutation { createJob(input: {...}) { id status } }
mutation { scanMediaFolder { found registered } }</code></pre> # Queries
query { assets(status: "ready") { id filename duration } }
query { jobs(status: "processing") { id status progress } }
query { presets { id name container videoCodec } }
query { systemStatus { status version } }
# Mutations
mutation { scanMediaFolder { found registered skipped } }
mutation { createJob(input: { sourceAssetId: "...", presetId: "..." }) { id status } }
mutation { cancelJob(id: "...") { id status } }
mutation { retryJob(id: "...") { id status } }
mutation { updateAsset(id: "...", input: { comments: "..." }) { id comments } }
mutation { deleteAsset(id: "...") { ok } }
# Lambda callback (REST)
POST /api/jobs/{id}/callback - Lambda completion webhook</code></pre>
<h2>Access Points</h2> <h2>Access Points</h2>
<pre><code># Local development <pre><code># Local development
127.0.0.1 mpr.local.ar 127.0.0.1 mpr.local.ar
http://mpr.local.ar/admin - Django Admin http://mpr.local.ar/admin - Django Admin
http://mpr.local.ar/api/docs - FastAPI Swagger
http://mpr.local.ar/graphql - GraphiQL http://mpr.local.ar/graphql - GraphiQL
http://mpr.local.ar/ - Timeline UI http://mpr.local.ar/ - Timeline UI
http://localhost:9001 - MinIO Console http://localhost:9001 - MinIO Console

279
docs/index.html Normal file
View File

@@ -0,0 +1,279 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>MPR - Architecture</title>
<link rel="stylesheet" href="architecture/styles.css" />
</head>
<body>
<h1>MPR - Media Processor</h1>
<p>
Media transcoding platform with three execution modes: local (Celery
+ MinIO), AWS (Step Functions + Lambda + S3), and GCP (Cloud Run
Jobs + GCS). Storage is S3-compatible across all environments.
</p>
<nav>
<a href="#overview">System Overview</a>
<a href="#data-model">Data Model</a>
<a href="#job-flow">Job Flow</a>
<a href="#media-storage">Media Storage</a>
</nav>
<h2 id="overview">System Overview</h2>
<div class="diagram-container">
<div class="diagram">
<h3>Local Architecture (Development)</h3>
<object
type="image/svg+xml"
data="architecture/01a-local-architecture.svg"
>
<img
src="architecture/01a-local-architecture.svg"
alt="Local Architecture"
/>
</object>
<a
href="architecture/01a-local-architecture.svg"
target="_blank"
>Open full size</a
>
</div>
<div class="diagram">
<h3>AWS Architecture (Production)</h3>
<object
type="image/svg+xml"
data="architecture/01b-aws-architecture.svg"
>
<img
src="architecture/01b-aws-architecture.svg"
alt="AWS Architecture"
/>
</object>
<a href="architecture/01b-aws-architecture.svg" target="_blank"
>Open full size</a
>
</div>
<div class="diagram">
<h3>GCP Architecture (Production)</h3>
<object
type="image/svg+xml"
data="architecture/01c-gcp-architecture.svg"
>
<img
src="architecture/01c-gcp-architecture.svg"
alt="GCP Architecture"
/>
</object>
<a href="architecture/01c-gcp-architecture.svg" target="_blank"
>Open full size</a
>
</div>
</div>
<div class="legend">
<h3>Components</h3>
<ul>
<li>
<span class="color-box" style="background: #e8f4f8"></span>
Reverse Proxy (nginx)
</li>
<li>
<span class="color-box" style="background: #f0f8e8"></span>
Application Layer (Django Admin, GraphQL API, Timeline UI)
</li>
<li>
<span class="color-box" style="background: #fff8e8"></span>
Worker Layer (Celery local mode)
</li>
<li>
<span class="color-box" style="background: #fde8d0"></span>
AWS (Step Functions, Lambda)
</li>
<li>
<span class="color-box" style="background: #e8f0fd"></span>
GCP (Cloud Run Jobs + GCS)
</li>
<li>
<span class="color-box" style="background: #f8e8f0"></span>
Data Layer (PostgreSQL, Redis)
</li>
<li>
<span class="color-box" style="background: #f0f0f0"></span>
S3-compatible Storage (MinIO / AWS S3 / GCS)
</li>
</ul>
</div>
<h2 id="data-model">Data Model</h2>
<div class="diagram-container">
<div class="diagram">
<h3>Entity Relationships</h3>
<object
type="image/svg+xml"
data="architecture/02-data-model.svg"
>
<img
src="architecture/02-data-model.svg"
alt="Data Model"
/>
</object>
<a href="architecture/02-data-model.svg" target="_blank"
>Open full size</a
>
</div>
</div>
<div class="legend">
<h3>Entities</h3>
<ul>
<li>
<span class="color-box" style="background: #4a90d9"></span>
MediaAsset - Video/audio files with metadata
</li>
<li>
<span class="color-box" style="background: #50b050"></span>
TranscodePreset - Encoding configurations
</li>
<li>
<span class="color-box" style="background: #d9534f"></span>
TranscodeJob - Processing queue items
</li>
</ul>
</div>
<h2 id="job-flow">Job Flow</h2>
<div class="diagram-container">
<div class="diagram">
<h3>Job Lifecycle</h3>
<object
type="image/svg+xml"
data="architecture/03-job-flow.svg"
>
<img src="architecture/03-job-flow.svg" alt="Job Flow" />
</object>
<a href="architecture/03-job-flow.svg" target="_blank"
>Open full size</a
>
</div>
</div>
<div class="legend">
<h3>Job States</h3>
<ul>
<li>
<span class="color-box" style="background: #ffc107"></span>
PENDING - Waiting in queue
</li>
<li>
<span class="color-box" style="background: #17a2b8"></span>
PROCESSING - Worker executing
</li>
<li>
<span class="color-box" style="background: #28a745"></span>
COMPLETED - Success
</li>
<li>
<span class="color-box" style="background: #dc3545"></span>
FAILED - Error occurred
</li>
<li>
<span class="color-box" style="background: #6c757d"></span>
CANCELLED - User cancelled
</li>
</ul>
</div>
<h2 id="media-storage">Media Storage</h2>
<div class="diagram-container">
<p>
MPR separates media into <strong>input</strong> and
<strong>output</strong> paths, each independently configurable.
File paths are stored
<strong>relative to their respective root</strong> to ensure
portability between local development and cloud deployments (AWS
S3, etc.).
</p>
</div>
<div class="legend">
<h3>Input / Output Separation</h3>
<ul>
<li>
<span class="color-box" style="background: #4a90d9"></span>
<code>MEDIA_IN</code> - Source media files to process
</li>
<li>
<span class="color-box" style="background: #50b050"></span>
<code>MEDIA_OUT</code> - Transcoded/trimmed output files
</li>
</ul>
<p><strong>Why Relative Paths?</strong></p>
<ul>
<li>Portability: Same database works locally and in cloud</li>
<li>Flexibility: Easy to switch between storage backends</li>
<li>Simplicity: No need to update paths when migrating</li>
</ul>
</div>
<div class="legend">
<h3>Local Development</h3>
<pre><code>MEDIA_IN=/app/media/in
MEDIA_OUT=/app/media/out
/app/media/
├── in/ # Source files
│ ├── video1.mp4
│ └── subfolder/video3.mp4
└── out/ # Transcoded output
└── video1_h264.mp4</code></pre>
</div>
<div class="legend">
<h3>AWS/Cloud Deployment</h3>
<pre><code>MEDIA_IN=s3://source-bucket/media/
MEDIA_OUT=s3://output-bucket/transcoded/
MEDIA_BASE_URL=https://source-bucket.s3.amazonaws.com/media/</code></pre>
<p>
Database paths remain unchanged (already relative). Just upload
files to S3 and update environment variables.
</p>
</div>
<div class="legend">
<h3>API (GraphQL)</h3>
<p>
All client interactions go through GraphQL at
<code>/graphql</code>.
</p>
<ul>
<li>
<code>scanMediaFolder</code> - Scan S3 bucket for media
files
</li>
<li><code>createJob</code> - Create transcode/trim job</li>
<li>
<code>cancelJob / retryJob</code> - Job lifecycle management
</li>
<li>
<code>updateAsset / deleteAsset</code> - Asset management
</li>
</ul>
<p><strong>Supported File Types:</strong></p>
<p>
Video: mp4, mkv, avi, mov, webm, flv, wmv, m4v<br />
Audio: mp3, wav, flac, aac, ogg, m4a
</p>
</div>
<h2>Access Points</h2>
<pre><code># Add to /etc/hosts
127.0.0.1 mpr.local.ar
# URLs
http://mpr.local.ar/admin - Django Admin
http://mpr.local.ar/graphql - GraphiQL IDE
http://mpr.local.ar/ - Timeline UI</code></pre>
</body>
</html>

125
docs/media-storage.html Normal file
View File

@@ -0,0 +1,125 @@
<h1>Media Storage Architecture</h1>
<h2>Overview</h2>
<p>MPR separates media into <strong>input</strong> and <strong>output</strong> paths, each independently configurable. File paths are stored <strong>relative to their respective root</strong> to ensure portability between local development and cloud deployments (AWS S3, etc.).</p>
<h2>Storage Strategy</h2>
<h3>Input / Output Separation</h3>
<p>| Path | Env Var | Purpose |
|------|---------|---------|
| <code>MEDIA_IN</code> | <code>/app/media/in</code> | Source media files to process |
| <code>MEDIA_OUT</code> | <code>/app/media/out</code> | Transcoded/trimmed output files |</p>
<p>These can point to different locations or even different servers/buckets in production.</p>
<h3>File Path Storage</h3>
<ul>
<li><strong>Database</strong>: Stores only the relative path (e.g., <code>videos/sample.mp4</code>)</li>
<li><strong>Input Root</strong>: Configurable via <code>MEDIA_IN</code> env var</li>
<li><strong>Output Root</strong>: Configurable via <code>MEDIA_OUT</code> env var</li>
<li><strong>Serving</strong>: Base URL configurable via <code>MEDIA_BASE_URL</code> env var</li>
</ul>
<h3>Why Relative Paths?</h3>
<ol>
<li><strong>Portability</strong>: Same database works locally and in cloud</li>
<li><strong>Flexibility</strong>: Easy to switch between storage backends</li>
<li><strong>Simplicity</strong>: No need to update paths when migrating</li>
</ol>
<h2>Local Development</h2>
<h3>Configuration</h3>
<p><code>bash
MEDIA_IN=/app/media/in
MEDIA_OUT=/app/media/out</code></p>
<h3>File Structure</h3>
<p><code>/app/media/
├── in/ # Source files
│ ├── video1.mp4
│ ├── video2.mp4
│ └── subfolder/
│ └── video3.mp4
└── out/ # Transcoded output
├── video1_h264.mp4
└── video2_trimmed.mp4</code></p>
<h3>Database Storage</h3>
<p>```</p>
<h1>Source assets (scanned from media/in)</h1>
<p>filename: video1.mp4
file_path: video1.mp4</p>
<p>filename: video3.mp4
file_path: subfolder/video3.mp4
```</p>
<h3>URL Serving</h3>
<ul>
<li>Nginx serves input via <code>location /media/in { alias /app/media/in; }</code></li>
<li>Nginx serves output via <code>location /media/out { alias /app/media/out; }</code></li>
<li>Frontend accesses: <code>http://mpr.local.ar/media/in/video1.mp4</code></li>
<li>Video player: <code>&lt;video src="/media/in/video1.mp4" /&gt;</code></li>
</ul>
<h2>AWS/Cloud Deployment</h2>
<h3>S3 Configuration</h3>
<p>```bash</p>
<h1>Input and output can be different buckets/paths</h1>
<p>MEDIA_IN=s3://source-bucket/media/
MEDIA_OUT=s3://output-bucket/transcoded/
MEDIA_BASE_URL=https://source-bucket.s3.amazonaws.com/media/
```</p>
<h3>S3 Structure</h3>
<p>```
s3://source-bucket/media/
├── video1.mp4
└── subfolder/
└── video3.mp4</p>
<p>s3://output-bucket/transcoded/
├── video1_h264.mp4
└── video2_trimmed.mp4
```</p>
<h3>Database Storage (Same!)</h3>
<p>```
filename: video1.mp4
file_path: video1.mp4</p>
<p>filename: video3.mp4
file_path: subfolder/video3.mp4
```</p>
<h2>API Endpoints</h2>
<h3>Scan Media Folder</h3>
<p><code>http
POST /api/assets/scan</code></p>
<p><strong>Behavior:</strong>
1. Recursively scans <code>MEDIA_IN</code> directory
2. Finds all video/audio files (mp4, mkv, avi, mov, mp3, wav, etc.)
3. Stores paths <strong>relative to MEDIA_IN</strong>
4. Skips already-registered files (by filename)
5. Returns summary: <code>{ found, registered, skipped, files }</code></p>
<h3>Create Job</h3>
<p>```http
POST /api/jobs/
Content-Type: application/json</p>
<p>{
"source_asset_id": "uuid",
"preset_id": "uuid",
"trim_start": 10.0,
"trim_end": 30.0
}
```</p>
<p><strong>Behavior:</strong>
- Server sets <code>output_path</code> using <code>MEDIA_OUT</code> + generated filename
- Output goes to the output directory, not alongside source files</p>
<h2>Migration Guide</h2>
<h3>Moving from Local to S3</h3>
<ol>
<li>
<p><strong>Upload source files to S3:</strong>
<code>bash
aws s3 sync /app/media/in/ s3://source-bucket/media/
aws s3 sync /app/media/out/ s3://output-bucket/transcoded/</code></p>
</li>
<li>
<p><strong>Update environment variables:</strong>
<code>bash
MEDIA_IN=s3://source-bucket/media/
MEDIA_OUT=s3://output-bucket/transcoded/
MEDIA_BASE_URL=https://source-bucket.s3.amazonaws.com/media/</code></p>
</li>
<li>
<p><strong>Database paths remain unchanged</strong> (already relative)</p>
</li>
</ol>
<h2>Supported File Types</h2>
<p><strong>Video:</strong> <code>.mp4</code>, <code>.mkv</code>, <code>.avi</code>, <code>.mov</code>, <code>.webm</code>, <code>.flv</code>, <code>.wmv</code>, <code>.m4v</code>
<strong>Audio:</strong> <code>.mp3</code>, <code>.wav</code>, <code>.flac</code>, <code>.aac</code>, <code>.ogg</code>, <code>.m4a</code></p>

View File

@@ -16,10 +16,10 @@ Output formats:
- prisma: Prisma schema - prisma: Prisma schema
Usage: Usage:
python -m modelgen from-config -c config.json -o models.py python -m soleprint.station.tools.modelgen from-config -c config.json -o models.py
python -m modelgen from-schema -o models/ --targets pydantic,typescript python -m soleprint.station.tools.modelgen from-schema -o models/ --targets pydantic,typescript
python -m modelgen extract --source /path/to/django --targets pydantic python -m soleprint.station.tools.modelgen extract --source /path/to/django --targets pydantic
python -m modelgen list-formats python -m soleprint.station.tools.modelgen list-formats
""" """
__version__ = "0.2.0" __version__ = "0.2.0"

View File

@@ -16,10 +16,11 @@ Output formats:
- prisma: Prisma schema - prisma: Prisma schema
Usage: Usage:
python -m modelgen --help python -m soleprint.station.tools.modelgen --help
python -m modelgen from-config -c config.json -o models.py python -m soleprint.station.tools.modelgen from-config -c config.json -o models.py
python -m modelgen from-schema -o models/ --targets pydantic,typescript python -m soleprint.station.tools.modelgen from-schema -o models/ --targets pydantic,typescript
python -m modelgen extract --source /path/to/django --targets pydantic python -m soleprint.station.tools.modelgen extract --source /path/to/django --targets pydantic
python -m soleprint.station.tools.modelgen generate --config schema/modelgen.json
""" """
import argparse import argparse
@@ -177,6 +178,47 @@ def cmd_extract(args):
print("Done!") print("Done!")
def cmd_generate(args):
"""Generate all targets from a JSON config file."""
import json
from .loader import load_schema
config_path = Path(args.config)
if not config_path.exists():
print(f"Error: Config file not found: {config_path}", file=sys.stderr)
sys.exit(1)
with open(config_path) as f:
config = json.load(f)
# Resolve paths relative to current working directory
schema_path = Path(config["schema"])
if not schema_path.exists():
print(f"Error: Schema folder not found: {schema_path}", file=sys.stderr)
sys.exit(1)
print(f"Loading schema: {schema_path}")
for target_conf in config["targets"]:
target = target_conf["target"]
output = Path(target_conf["output"])
include = set(target_conf.get("include", []))
name_map = target_conf.get("name_map", {})
if target not in GENERATORS:
print(f"Warning: Unknown target '{target}', skipping", file=sys.stderr)
continue
# Load schema with this target's include filter
schema = load_schema(schema_path, include=include or None)
generator = GENERATORS[target](name_map=name_map)
print(f"Generating {target} to: {output}")
generator.generate(schema, output)
print("Done!")
def cmd_list_formats(args): def cmd_list_formats(args):
"""List available output formats.""" """List available output formats."""
print("Available output formats:") print("Available output formats:")
@@ -295,6 +337,21 @@ def main():
) )
extract_parser.set_defaults(func=cmd_extract) extract_parser.set_defaults(func=cmd_extract)
# generate command (config-driven multi-target)
gen_parser = subparsers.add_parser(
"generate",
help="Generate all targets from a JSON config file",
)
gen_parser.add_argument(
"--config",
"-c",
type=str,
required=True,
help="Path to generation config file (e.g., schema/modelgen.json)",
)
gen_parser.set_defaults(func=cmd_generate)
# list-formats command # list-formats command
formats_parser = subparsers.add_parser( formats_parser = subparsers.add_parser(
"list-formats", "list-formats",

View File

@@ -7,16 +7,17 @@ Supported generators:
- TypeScriptGenerator: TypeScript interfaces - TypeScriptGenerator: TypeScript interfaces
- ProtobufGenerator: Protocol Buffer definitions - ProtobufGenerator: Protocol Buffer definitions
- PrismaGenerator: Prisma schema - PrismaGenerator: Prisma schema
- StrawberryGenerator: Strawberry type/input/enum classes
""" """
from typing import Dict, Type from typing import Dict, Type
from .base import BaseGenerator from .base import BaseGenerator
from .django import DjangoGenerator from .django import DjangoGenerator
from .graphene import GrapheneGenerator
from .prisma import PrismaGenerator from .prisma import PrismaGenerator
from .protobuf import ProtobufGenerator from .protobuf import ProtobufGenerator
from .pydantic import PydanticGenerator from .pydantic import PydanticGenerator
from .strawberry import StrawberryGenerator
from .typescript import TypeScriptGenerator from .typescript import TypeScriptGenerator
# Registry of available generators # Registry of available generators
@@ -28,14 +29,14 @@ GENERATORS: Dict[str, Type[BaseGenerator]] = {
"protobuf": ProtobufGenerator, "protobuf": ProtobufGenerator,
"proto": ProtobufGenerator, # Alias "proto": ProtobufGenerator, # Alias
"prisma": PrismaGenerator, "prisma": PrismaGenerator,
"graphene": GrapheneGenerator, "strawberry": StrawberryGenerator,
} }
__all__ = [ __all__ = [
"BaseGenerator", "BaseGenerator",
"PydanticGenerator", "PydanticGenerator",
"DjangoGenerator", "DjangoGenerator",
"GrapheneGenerator", "StrawberryGenerator",
"TypeScriptGenerator", "TypeScriptGenerator",
"ProtobufGenerator", "ProtobufGenerator",
"PrismaGenerator", "PrismaGenerator",

View File

@@ -6,12 +6,19 @@ Abstract base class for all code generators.
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from pathlib import Path from pathlib import Path
from typing import Any from typing import Any, Dict
class BaseGenerator(ABC): class BaseGenerator(ABC):
"""Abstract base for code generators.""" """Abstract base for code generators."""
def __init__(self, name_map: Dict[str, str] = None):
self.name_map = name_map or {}
def map_name(self, name: str) -> str:
"""Apply name_map to a model name."""
return self.name_map.get(name, name)
@abstractmethod @abstractmethod
def generate(self, models: Any, output_path: Path) -> None: def generate(self, models: Any, output_path: Path) -> None:
"""Generate code for the given models to the specified path.""" """Generate code for the given models to the specified path."""

View File

@@ -224,7 +224,8 @@ class DjangoGenerator(BaseGenerator):
if default is not dc.MISSING and isinstance(default, Enum): if default is not dc.MISSING and isinstance(default, Enum):
extra.append(f"default={enum_name}.{default.name}") extra.append(f"default={enum_name}.{default.name}")
return DJANGO_TYPES["enum"].format( return DJANGO_TYPES["enum"].format(
enum_name=enum_name, opts=", " + ", ".join(extra) if extra else "" enum_name=enum_name,
opts=", " + ", ".join(extra) if extra else ""
) )
# Text fields (based on name heuristics) # Text fields (based on name heuristics)

View File

@@ -2,8 +2,12 @@
Pydantic Generator Pydantic Generator
Generates Pydantic BaseModel classes from model definitions. Generates Pydantic BaseModel classes from model definitions.
Supports two output modes:
- File output: flat models (backwards compatible)
- Directory output: CRUD variants (Create/Update/Response) per model
""" """
import dataclasses as dc
from enum import Enum from enum import Enum
from pathlib import Path from pathlib import Path
from typing import Any, List, get_type_hints from typing import Any, List, get_type_hints
@@ -13,6 +17,13 @@ from ..loader.schema import EnumDefinition, FieldDefinition, ModelDefinition
from ..types import PYDANTIC_RESOLVERS from ..types import PYDANTIC_RESOLVERS
from .base import BaseGenerator from .base import BaseGenerator
# Fields to skip per CRUD variant
SKIP_FIELDS = {
"Create": {"id", "created_at", "updated_at", "status", "error_message"},
"Update": {"id", "created_at", "updated_at"},
"Response": set(),
}
class PydanticGenerator(BaseGenerator): class PydanticGenerator(BaseGenerator):
"""Generates Pydantic model files.""" """Generates Pydantic model files."""
@@ -21,52 +32,187 @@ class PydanticGenerator(BaseGenerator):
return ".py" return ".py"
def generate(self, models, output_path: Path) -> None: def generate(self, models, output_path: Path) -> None:
"""Generate Pydantic models to output_path.""" """Generate Pydantic models to output_path.
If output_path is a directory (or doesn't end in .py), generate
multi-file CRUD variants. Otherwise, generate flat models to a
single file.
"""
output_path = Path(output_path)
if output_path.suffix != ".py":
# Directory mode: CRUD variants
self._generate_crud_directory(models, output_path)
else:
# File mode: flat models (backwards compatible)
self._generate_flat_file(models, output_path)
def _generate_flat_file(self, models, output_path: Path) -> None:
"""Generate flat models to a single file (original behavior)."""
output_path.parent.mkdir(parents=True, exist_ok=True) output_path.parent.mkdir(parents=True, exist_ok=True)
# Detect input type and generate accordingly
if hasattr(models, "get_shared_component"): if hasattr(models, "get_shared_component"):
# ConfigLoader (soleprint config)
content = self._generate_from_config(models) content = self._generate_from_config(models)
elif hasattr(models, "models"): elif hasattr(models, "models"):
# SchemaLoader
content = self._generate_from_definitions( content = self._generate_from_definitions(
models.models, getattr(models, "enums", []) models.models, getattr(models, "enums", [])
) )
elif isinstance(models, tuple): elif isinstance(models, tuple):
# (models, enums) tuple from extractor
content = self._generate_from_definitions(models[0], models[1]) content = self._generate_from_definitions(models[0], models[1])
elif isinstance(models, list): elif isinstance(models, list):
# List of dataclasses (MPR style)
content = self._generate_from_dataclasses(models) content = self._generate_from_dataclasses(models)
else: else:
raise ValueError(f"Unsupported input type: {type(models)}") raise ValueError(f"Unsupported input type: {type(models)}")
output_path.write_text(content) output_path.write_text(content)
def _generate_crud_directory(self, models, output_dir: Path) -> None:
"""Generate CRUD variant files in a directory."""
output_dir.mkdir(parents=True, exist_ok=True)
if hasattr(models, "models"):
model_defs = models.models
enum_defs = getattr(models, "enums", [])
elif isinstance(models, tuple):
model_defs = models[0]
enum_defs = models[1]
else:
raise ValueError(f"Unsupported input type for CRUD mode: {type(models)}")
# base.py
base_content = "\n".join([
'"""Pydantic Base Schema - GENERATED FILE"""',
"",
"from pydantic import BaseModel, ConfigDict",
"",
"",
"class BaseSchema(BaseModel):",
' """Base schema with ORM mode."""',
" model_config = ConfigDict(from_attributes=True)",
"",
])
(output_dir / "base.py").write_text(base_content)
# Per-model files
imports = ["from .base import BaseSchema"]
all_exports = ['"BaseSchema"']
for model_def in model_defs:
mapped = self.map_name(model_def.name)
module_name = mapped.lower()
lines = [
f'"""{model_def.name} Schemas - GENERATED FILE"""',
"",
"from datetime import datetime",
"from enum import Enum",
"from typing import Any, Dict, List, Optional",
"from uuid import UUID",
"",
"from .base import BaseSchema",
"",
]
# Inline enums used by this model
model_enums = self._collect_model_enums(model_def, enum_defs)
for enum_def in model_enums:
lines.append("")
lines.extend(self._generate_enum(enum_def))
lines.append("")
# CRUD variants
for suffix in ["Create", "Update", "Response"]:
lines.append("")
lines.extend(self._generate_crud_model(model_def, mapped, suffix))
lines.append("")
content = "\n".join(lines)
(output_dir / f"{module_name}.py").write_text(content)
# Track imports
imports.append(
f"from .{module_name} import {mapped}Create, {mapped}Update, {mapped}Response"
)
all_exports.extend([
f'"{mapped}Create"', f'"{mapped}Update"', f'"{mapped}Response"'
])
for enum_def in model_enums:
imports.append(f"from .{module_name} import {enum_def.name}")
all_exports.append(f'"{enum_def.name}"')
# __init__.py
init_content = "\n".join([
'"""API Schemas - GENERATED FILE"""',
"",
*imports,
"",
f"__all__ = [{', '.join(all_exports)}]",
"",
])
(output_dir / "__init__.py").write_text(init_content)
def _collect_model_enums(
self, model_def: ModelDefinition, enum_defs: List[EnumDefinition]
) -> List[EnumDefinition]:
"""Find enums referenced by a model's fields."""
enum_names = set()
for field in model_def.fields:
base, _ = unwrap_optional(field.type_hint)
if isinstance(base, type) and issubclass(base, Enum):
enum_names.add(base.__name__)
return [e for e in enum_defs if e.name in enum_names]
def _generate_crud_model(
self, model_def: ModelDefinition, mapped_name: str, suffix: str
) -> List[str]:
"""Generate a single CRUD variant (Create/Update/Response)."""
class_name = f"{mapped_name}{suffix}"
skip = SKIP_FIELDS.get(suffix, set())
lines = [
f"class {class_name}(BaseSchema):",
f' """{class_name} schema."""',
]
has_fields = False
for field in model_def.fields:
if field.name.startswith("_") or field.name in skip:
continue
has_fields = True
py_type = self._resolve_type(field.type_hint, field.optional)
# Update variant: all fields optional
if suffix == "Update" and "Optional" not in py_type:
py_type = f"Optional[{py_type}]"
default = self._format_default(field.default, "Optional" in py_type)
lines.append(f" {field.name}: {py_type}{default}")
if not has_fields:
lines.append(" pass")
return lines
# =========================================================================
# Flat file generation (original behavior)
# =========================================================================
def _generate_from_definitions( def _generate_from_definitions(
self, models: List[ModelDefinition], enums: List[EnumDefinition] self, models: List[ModelDefinition], enums: List[EnumDefinition]
) -> str: ) -> str:
"""Generate from ModelDefinition objects (schema/extract mode)."""
lines = self._generate_header() lines = self._generate_header()
# Generate enums
for enum_def in enums: for enum_def in enums:
lines.extend(self._generate_enum(enum_def)) lines.extend(self._generate_enum(enum_def))
lines.append("") lines.append("")
# Generate models
for model_def in models: for model_def in models:
lines.extend(self._generate_model_from_definition(model_def)) lines.extend(self._generate_model_from_definition(model_def))
lines.append("") lines.append("")
return "\n".join(lines) return "\n".join(lines)
def _generate_from_dataclasses(self, dataclasses: List[type]) -> str: def _generate_from_dataclasses(self, dataclasses: List[type]) -> str:
"""Generate from Python dataclasses (MPR style)."""
lines = self._generate_header() lines = self._generate_header()
# Collect and generate enums first
enums_generated = set() enums_generated = set()
for cls in dataclasses: for cls in dataclasses:
hints = get_type_hints(cls) hints = get_type_hints(cls)
@@ -77,16 +223,12 @@ class PydanticGenerator(BaseGenerator):
lines.extend(self._generate_enum_from_python(base)) lines.extend(self._generate_enum_from_python(base))
lines.append("") lines.append("")
enums_generated.add(base.__name__) enums_generated.add(base.__name__)
# Generate models
for cls in dataclasses: for cls in dataclasses:
lines.extend(self._generate_model_from_dataclass(cls)) lines.extend(self._generate_model_from_dataclass(cls))
lines.append("") lines.append("")
return "\n".join(lines) return "\n".join(lines)
def _generate_header(self) -> List[str]: def _generate_header(self) -> List[str]:
"""Generate file header."""
return [ return [
'"""', '"""',
"Pydantic Models - GENERATED FILE", "Pydantic Models - GENERATED FILE",
@@ -104,27 +246,23 @@ class PydanticGenerator(BaseGenerator):
] ]
def _generate_enum(self, enum_def: EnumDefinition) -> List[str]: def _generate_enum(self, enum_def: EnumDefinition) -> List[str]:
"""Generate Pydantic enum from EnumDefinition."""
lines = [f"class {enum_def.name}(str, Enum):"] lines = [f"class {enum_def.name}(str, Enum):"]
for name, value in enum_def.values: for name, value in enum_def.values:
lines.append(f' {name} = "{value}"') lines.append(f' {name} = "{value}"')
return lines return lines
def _generate_enum_from_python(self, enum_cls: type) -> List[str]: def _generate_enum_from_python(self, enum_cls: type) -> List[str]:
"""Generate Pydantic enum from Python Enum."""
lines = [f"class {enum_cls.__name__}(str, Enum):"] lines = [f"class {enum_cls.__name__}(str, Enum):"]
for member in enum_cls: for member in enum_cls:
lines.append(f' {member.name} = "{member.value}"') lines.append(f' {member.name} = "{member.value}"')
return lines return lines
def _generate_model_from_definition(self, model_def: ModelDefinition) -> List[str]: def _generate_model_from_definition(self, model_def: ModelDefinition) -> List[str]:
"""Generate Pydantic model from ModelDefinition."""
docstring = model_def.docstring or model_def.name docstring = model_def.docstring or model_def.name
lines = [ lines = [
f"class {model_def.name}(BaseModel):", f"class {model_def.name}(BaseModel):",
f' """{docstring.strip().split(chr(10))[0]}"""', f' """{docstring.strip().split(chr(10))[0]}"""',
] ]
if not model_def.fields: if not model_def.fields:
lines.append(" pass") lines.append(" pass")
else: else:
@@ -132,46 +270,34 @@ class PydanticGenerator(BaseGenerator):
py_type = self._resolve_type(field.type_hint, field.optional) py_type = self._resolve_type(field.type_hint, field.optional)
default = self._format_default(field.default, field.optional) default = self._format_default(field.default, field.optional)
lines.append(f" {field.name}: {py_type}{default}") lines.append(f" {field.name}: {py_type}{default}")
return lines return lines
def _generate_model_from_dataclass(self, cls: type) -> List[str]: def _generate_model_from_dataclass(self, cls: type) -> List[str]:
"""Generate Pydantic model from a dataclass."""
import dataclasses as dc
docstring = cls.__doc__ or cls.__name__ docstring = cls.__doc__ or cls.__name__
lines = [ lines = [
f"class {cls.__name__}(BaseModel):", f"class {cls.__name__}(BaseModel):",
f' """{docstring.strip().split(chr(10))[0]}"""', f' """{docstring.strip().split(chr(10))[0]}"""',
] ]
hints = get_type_hints(cls) hints = get_type_hints(cls)
fields = {f.name: f for f in dc.fields(cls)} fields = {f.name: f for f in dc.fields(cls)}
for name, type_hint in hints.items(): for name, type_hint in hints.items():
if name.startswith("_"): if name.startswith("_"):
continue continue
field = fields.get(name) field = fields.get(name)
default_val = dc.MISSING default_val = dc.MISSING
if field: if field:
if field.default is not dc.MISSING: if field.default is not dc.MISSING:
default_val = field.default default_val = field.default
py_type = self._resolve_type(type_hint, False) py_type = self._resolve_type(type_hint, False)
default = self._format_default(default_val, "Optional" in py_type) default = self._format_default(default_val, "Optional" in py_type)
lines.append(f" {name}: {py_type}{default}") lines.append(f" {name}: {py_type}{default}")
return lines return lines
def _resolve_type(self, type_hint: Any, optional: bool) -> str: def _resolve_type(self, type_hint: Any, optional: bool) -> str:
"""Resolve Python type to Pydantic type string."""
base, is_optional = unwrap_optional(type_hint) base, is_optional = unwrap_optional(type_hint)
optional = optional or is_optional optional = optional or is_optional
origin = get_origin_name(base) origin = get_origin_name(base)
type_name = get_type_name(base) type_name = get_type_name(base)
# Look up resolver
resolver = ( resolver = (
PYDANTIC_RESOLVERS.get(origin) PYDANTIC_RESOLVERS.get(origin)
or PYDANTIC_RESOLVERS.get(type_name) or PYDANTIC_RESOLVERS.get(type_name)
@@ -182,14 +308,10 @@ class PydanticGenerator(BaseGenerator):
else None else None
) )
) )
result = resolver(base) if resolver else "str" result = resolver(base) if resolver else "str"
return f"Optional[{result}]" if optional else result return f"Optional[{result}]" if optional else result
def _format_default(self, default: Any, optional: bool) -> str: def _format_default(self, default: Any, optional: bool) -> str:
"""Format default value for field."""
import dataclasses as dc
if optional: if optional:
return " = None" return " = None"
if default is dc.MISSING or default is None: if default is dc.MISSING or default is None:
@@ -204,7 +326,6 @@ class PydanticGenerator(BaseGenerator):
def _generate_from_config(self, config) -> str: def _generate_from_config(self, config) -> str:
"""Generate from ConfigLoader (soleprint config.json mode).""" """Generate from ConfigLoader (soleprint config.json mode)."""
# Get component names from config
config_comp = config.get_shared_component("config") config_comp = config.get_shared_component("config")
data_comp = config.get_shared_component("data") data_comp = config.get_shared_component("data")

View File

@@ -1,28 +1,29 @@
""" """
Graphene Generator Strawberry Generator
Generates graphene ObjectType and InputObjectType classes from model definitions. Generates strawberry type, input, and enum classes from model definitions.
Only generates type definitions queries, mutations, and resolvers are hand-written. Only generates type definitions queries, mutations, and resolvers are hand-written.
""" """
import dataclasses as dc
from enum import Enum from enum import Enum
from pathlib import Path from pathlib import Path
from typing import Any, List, get_type_hints from typing import Any, List, get_type_hints
from ..helpers import get_origin_name, get_type_name, unwrap_optional from ..helpers import get_origin_name, get_type_name, unwrap_optional
from ..loader.schema import EnumDefinition, FieldDefinition, ModelDefinition from ..loader.schema import EnumDefinition, FieldDefinition, ModelDefinition
from ..types import GRAPHENE_RESOLVERS from ..types import STRAWBERRY_RESOLVERS
from .base import BaseGenerator from .base import BaseGenerator
class GrapheneGenerator(BaseGenerator): class StrawberryGenerator(BaseGenerator):
"""Generates graphene type definition files.""" """Generates strawberry type definition files."""
def file_extension(self) -> str: def file_extension(self) -> str:
return ".py" return ".py"
def generate(self, models, output_path: Path) -> None: def generate(self, models, output_path: Path) -> None:
"""Generate graphene types to output_path.""" """Generate strawberry types to output_path."""
output_path.parent.mkdir(parents=True, exist_ok=True) output_path.parent.mkdir(parents=True, exist_ok=True)
if hasattr(models, "models"): if hasattr(models, "models"):
@@ -47,22 +48,18 @@ class GrapheneGenerator(BaseGenerator):
enums: List[EnumDefinition], enums: List[EnumDefinition],
api_models: List[ModelDefinition], api_models: List[ModelDefinition],
) -> str: ) -> str:
"""Generate from ModelDefinition objects."""
lines = self._generate_header() lines = self._generate_header()
# Generate enums as graphene.Enum
for enum_def in enums: for enum_def in enums:
lines.extend(self._generate_enum(enum_def)) lines.extend(self._generate_enum(enum_def))
lines.append("") lines.append("")
lines.append("") lines.append("")
# Generate domain models as ObjectType
for model_def in models: for model_def in models:
lines.extend(self._generate_object_type(model_def)) lines.extend(self._generate_object_type(model_def))
lines.append("") lines.append("")
lines.append("") lines.append("")
# Generate API models — request types as InputObjectType, others as ObjectType
for model_def in api_models: for model_def in api_models:
if model_def.name.endswith("Request"): if model_def.name.endswith("Request"):
lines.extend(self._generate_input_type(model_def)) lines.extend(self._generate_input_type(model_def))
@@ -74,7 +71,6 @@ class GrapheneGenerator(BaseGenerator):
return "\n".join(lines).rstrip() + "\n" return "\n".join(lines).rstrip() + "\n"
def _generate_from_dataclasses(self, dataclasses: List[type]) -> str: def _generate_from_dataclasses(self, dataclasses: List[type]) -> str:
"""Generate from Python dataclasses."""
lines = self._generate_header() lines = self._generate_header()
enums_generated = set() enums_generated = set()
@@ -99,37 +95,38 @@ class GrapheneGenerator(BaseGenerator):
def _generate_header(self) -> List[str]: def _generate_header(self) -> List[str]:
return [ return [
'"""', '"""',
"Graphene Types - GENERATED FILE", "Strawberry Types - GENERATED FILE",
"", "",
"Do not edit directly. Regenerate using modelgen.", "Do not edit directly. Regenerate using modelgen.",
'"""', '"""',
"", "",
"import graphene", "import strawberry",
"from enum import Enum",
"from typing import List, Optional",
"from uuid import UUID",
"from datetime import datetime",
"from strawberry.scalars import JSON",
"", "",
"", "",
] ]
def _generate_enum(self, enum_def: EnumDefinition) -> List[str]: def _generate_enum(self, enum_def: EnumDefinition) -> List[str]:
"""Generate graphene.Enum from EnumDefinition.""" lines = ["@strawberry.enum", f"class {enum_def.name}(Enum):"]
lines = [f"class {enum_def.name}(graphene.Enum):"]
for name, value in enum_def.values: for name, value in enum_def.values:
lines.append(f' {name} = "{value}"') lines.append(f' {name} = "{value}"')
return lines return lines
def _generate_enum_from_python(self, enum_cls: type) -> List[str]: def _generate_enum_from_python(self, enum_cls: type) -> List[str]:
"""Generate graphene.Enum from Python Enum.""" lines = ["@strawberry.enum", f"class {enum_cls.__name__}(Enum):"]
lines = [f"class {enum_cls.__name__}(graphene.Enum):"]
for member in enum_cls: for member in enum_cls:
lines.append(f' {member.name} = "{member.value}"') lines.append(f' {member.name} = "{member.value}"')
return lines return lines
def _generate_object_type(self, model_def: ModelDefinition) -> List[str]: def _generate_object_type(self, model_def: ModelDefinition) -> List[str]:
"""Generate graphene.ObjectType from ModelDefinition."""
name = model_def.name name = model_def.name
# Append Type suffix if not already present
type_name = f"{name}Type" if not name.endswith("Type") else name type_name = f"{name}Type" if not name.endswith("Type") else name
lines = [f"class {type_name}(graphene.ObjectType):"] lines = ["@strawberry.type", f"class {type_name}:"]
if model_def.docstring: if model_def.docstring:
doc = model_def.docstring.strip().split("\n")[0] doc = model_def.docstring.strip().split("\n")[0]
lines.append(f' """{doc}"""') lines.append(f' """{doc}"""')
@@ -139,23 +136,19 @@ class GrapheneGenerator(BaseGenerator):
lines.append(" pass") lines.append(" pass")
else: else:
for field in model_def.fields: for field in model_def.fields:
graphene_type = self._resolve_type(field.type_hint, field.optional) type_str = self._resolve_type(field.type_hint, optional=True)
lines.append(f" {field.name} = {graphene_type}") lines.append(f" {field.name}: {type_str} = None")
return lines return lines
def _generate_input_type(self, model_def: ModelDefinition) -> List[str]: def _generate_input_type(self, model_def: ModelDefinition) -> List[str]:
"""Generate graphene.InputObjectType from ModelDefinition."""
import dataclasses as dc
name = model_def.name name = model_def.name
# Convert FooRequest -> FooInput
if name.endswith("Request"): if name.endswith("Request"):
input_name = name[: -len("Request")] + "Input" input_name = name[: -len("Request")] + "Input"
else: else:
input_name = f"{name}Input" input_name = f"{name}Input"
lines = [f"class {input_name}(graphene.InputObjectType):"] lines = ["@strawberry.input", f"class {input_name}:"]
if model_def.docstring: if model_def.docstring:
doc = model_def.docstring.strip().split("\n")[0] doc = model_def.docstring.strip().split("\n")[0]
lines.append(f' """{doc}"""') lines.append(f' """{doc}"""')
@@ -164,73 +157,64 @@ class GrapheneGenerator(BaseGenerator):
if not model_def.fields: if not model_def.fields:
lines.append(" pass") lines.append(" pass")
else: else:
# Required fields first, then optional/defaulted
required = []
optional = []
for field in model_def.fields: for field in model_def.fields:
graphene_type = self._resolve_type(field.type_hint, field.optional)
# Required only if not optional AND no default value
has_default = field.default is not dc.MISSING has_default = field.default is not dc.MISSING
if not field.optional and not has_default: if not field.optional and not has_default:
graphene_type = self._make_required(graphene_type) required.append(field)
elif has_default and not field.optional: else:
graphene_type = self._add_default(graphene_type, field.default) optional.append(field)
lines.append(f" {field.name} = {graphene_type}")
for field in required:
type_str = self._resolve_type(field.type_hint, optional=False)
lines.append(f" {field.name}: {type_str}")
for field in optional:
has_default = field.default is not dc.MISSING
if has_default and not callable(field.default):
type_str = self._resolve_type(field.type_hint, optional=False)
lines.append(f" {field.name}: {type_str} = {field.default!r}")
else:
type_str = self._resolve_type(field.type_hint, optional=True)
lines.append(f" {field.name}: {type_str} = None")
return lines return lines
def _generate_object_type_from_dataclass(self, cls: type) -> List[str]: def _generate_object_type_from_dataclass(self, cls: type) -> List[str]:
"""Generate graphene.ObjectType from a dataclass."""
import dataclasses as dc
type_name = f"{cls.__name__}Type" type_name = f"{cls.__name__}Type"
lines = [f"class {type_name}(graphene.ObjectType):"] lines = ["@strawberry.type", f"class {type_name}:"]
hints = get_type_hints(cls) hints = get_type_hints(cls)
for name, type_hint in hints.items(): for name, type_hint in hints.items():
if name.startswith("_"): if name.startswith("_"):
continue continue
graphene_type = self._resolve_type(type_hint, False) type_str = self._resolve_type(type_hint, optional=True)
lines.append(f" {name} = {graphene_type}") lines.append(f" {name}: {type_str} = None")
return lines return lines
def _resolve_type(self, type_hint: Any, optional: bool) -> str: def _resolve_type(self, type_hint: Any, optional: bool) -> str:
"""Resolve Python type to graphene field call string.""" """Resolve Python type hint to a strawberry annotation string."""
base, is_optional = unwrap_optional(type_hint) base, is_optional = unwrap_optional(type_hint)
optional = optional or is_optional optional = optional or is_optional
origin = get_origin_name(base) origin = get_origin_name(base)
type_name = get_type_name(base) type_name = get_type_name(base)
# Look up resolver
resolver = ( resolver = (
GRAPHENE_RESOLVERS.get(origin) STRAWBERRY_RESOLVERS.get(origin)
or GRAPHENE_RESOLVERS.get(type_name) or STRAWBERRY_RESOLVERS.get(type_name)
or GRAPHENE_RESOLVERS.get(base) or STRAWBERRY_RESOLVERS.get(base)
or ( or (
GRAPHENE_RESOLVERS["enum"] STRAWBERRY_RESOLVERS["enum"]
if isinstance(base, type) and issubclass(base, Enum) if isinstance(base, type) and issubclass(base, Enum)
else None else None
) )
) )
result = resolver(base) if resolver else "graphene.String" inner = resolver(base) if resolver else "str"
# List types already have () syntax from resolver if optional:
if result.startswith("graphene.List("): return f"Optional[{inner}]"
return result return inner
# Scalar types: add () call
return f"{result}()"
def _make_required(self, field_str: str) -> str:
"""Add required=True to a graphene field."""
if field_str.endswith("()"):
return field_str[:-1] + "required=True)"
return field_str
def _add_default(self, field_str: str, default: Any) -> str:
"""Add default_value to a graphene field."""
if callable(default):
# default_factory — skip, graphene doesn't support factories
return field_str
if field_str.endswith("()"):
return field_str[:-1] + f"default_value={default!r})"
return field_str

View File

@@ -26,11 +26,10 @@ class TypeScriptGenerator(BaseGenerator):
# Handle different input types # Handle different input types
if hasattr(models, "models"): if hasattr(models, "models"):
# SchemaLoader # SchemaLoader — include api_models if present
all_models = models.models + getattr(models, "api_models", [])
content = self._generate_from_definitions( content = self._generate_from_definitions(
models.models, all_models, getattr(models, "enums", [])
getattr(models, "enums", []),
api_models=getattr(models, "api_models", []),
) )
elif isinstance(models, tuple): elif isinstance(models, tuple):
# (models, enums) tuple # (models, enums) tuple
@@ -44,10 +43,7 @@ class TypeScriptGenerator(BaseGenerator):
output_path.write_text(content) output_path.write_text(content)
def _generate_from_definitions( def _generate_from_definitions(
self, self, models: List[ModelDefinition], enums: List[EnumDefinition]
models: List[ModelDefinition],
enums: List[EnumDefinition],
api_models: List[ModelDefinition] = None,
) -> str: ) -> str:
"""Generate from ModelDefinition objects.""" """Generate from ModelDefinition objects."""
lines = self._generate_header() lines = self._generate_header()
@@ -63,14 +59,6 @@ class TypeScriptGenerator(BaseGenerator):
lines.extend(self._generate_interface_from_definition(model_def)) lines.extend(self._generate_interface_from_definition(model_def))
lines.append("") lines.append("")
# Generate API request/response interfaces
if api_models:
lines.append("// API request/response types")
lines.append("")
for model_def in api_models:
lines.extend(self._generate_interface_from_definition(model_def))
lines.append("")
return "\n".join(lines) return "\n".join(lines)
def _generate_from_dataclasses(self, dataclasses: List[type]) -> str: def _generate_from_dataclasses(self, dataclasses: List[type]) -> str:

View File

@@ -5,6 +5,7 @@ Loads Python dataclasses from a schema/ folder.
Expects the folder to have an __init__.py that exports: Expects the folder to have an __init__.py that exports:
- DATACLASSES: List of dataclass types to generate - DATACLASSES: List of dataclass types to generate
- ENUMS: List of Enum types to include - ENUMS: List of Enum types to include
- API_MODELS: (optional) List of API request/response types
- GRPC_MESSAGES: (optional) List of gRPC message types - GRPC_MESSAGES: (optional) List of gRPC message types
- GRPC_SERVICE: (optional) gRPC service definition dict - GRPC_SERVICE: (optional) gRPC service definition dict
""" """
@@ -88,7 +89,7 @@ class SchemaLoader:
for cls in dataclasses: for cls in dataclasses:
self.models.append(self._parse_dataclass(cls)) self.models.append(self._parse_dataclass(cls))
# Extract API_MODELS (TypeScript-only request/response types) # Extract API_MODELS (request/response types)
if load_all or "api" in include: if load_all or "api" in include:
api_models = getattr(module, "API_MODELS", []) api_models = getattr(module, "API_MODELS", [])
for cls in api_models: for cls in api_models:

View File

@@ -139,34 +139,34 @@ PRISMA_SPECIAL: dict[str, str] = {
} }
# ============================================================================= # =============================================================================
# Graphene Type Resolvers # Strawberry Type Resolvers
# ============================================================================= # =============================================================================
def _resolve_graphene_list(base: Any) -> str: def _resolve_strawberry_list(base: Any) -> str:
"""Resolve graphene List type.""" """Resolve strawberry List type annotation."""
args = get_args(base) args = get_args(base)
if args: if args:
inner = args[0] inner = args[0]
if inner is str: if inner is str:
return "graphene.List(graphene.String)" return "List[str]"
elif inner is int: elif inner is int:
return "graphene.List(graphene.Int)" return "List[int]"
elif inner is float: elif inner is float:
return "graphene.List(graphene.Float)" return "List[float]"
elif inner is bool: elif inner is bool:
return "graphene.List(graphene.Boolean)" return "List[bool]"
return "graphene.List(graphene.String)" return "List[str]"
GRAPHENE_RESOLVERS: dict[Any, Callable[[Any], str]] = { STRAWBERRY_RESOLVERS: dict[Any, Callable[[Any], str]] = {
str: lambda _: "graphene.String", str: lambda _: "str",
int: lambda _: "graphene.Int", int: lambda _: "int",
float: lambda _: "graphene.Float", float: lambda _: "float",
bool: lambda _: "graphene.Boolean", bool: lambda _: "bool",
"UUID": lambda _: "graphene.UUID", "UUID": lambda _: "UUID",
"datetime": lambda _: "graphene.DateTime", "datetime": lambda _: "datetime",
"dict": lambda _: "graphene.JSONString", "dict": lambda _: "JSON",
"list": _resolve_graphene_list, "list": _resolve_strawberry_list,
"enum": lambda base: f"graphene.String", # Enums exposed as strings in GQL "enum": lambda base: base.__name__,
} }

2
requirements-worker.txt Normal file
View File

@@ -0,0 +1,2 @@
-r requirements.txt
ffmpeg-python>=0.2.0

View File

@@ -12,19 +12,19 @@ pydantic>=2.5.0
celery[redis]>=5.3.0 celery[redis]>=5.3.0
redis>=5.0.0 redis>=5.0.0
# FFmpeg
ffmpeg-python>=0.2.0
# gRPC # gRPC
grpcio>=1.60.0 grpcio>=1.60.0
grpcio-tools>=1.60.0 grpcio-tools>=1.60.0
# AWS # AWS
boto3>=1.34.0 boto3>=1.34.0
requests>=2.31.0
# GCP (optional — only needed when MPR_EXECUTOR=gcp)
google-cloud-run>=0.10.0
# GraphQL # GraphQL
graphene>=3.3 strawberry-graphql[fastapi]>=0.311.0
starlette-graphene3>=0.6.0
# Testing # Testing
pytest>=7.4.0 pytest>=7.4.0

View File

@@ -1,718 +0,0 @@
#!/usr/bin/env python3
"""
MPR Model Generator
Generates framework-specific models from schema/models/:
- Django ORM models -> mpr/media_assets/models.py
- Pydantic schemas -> api/schemas/*.py
- TypeScript types -> ui/timeline/src/types.ts
- Protobuf -> grpc/protos/worker.proto
Usage:
python schema/generate.py [--django] [--pydantic] [--typescript] [--proto] [--all]
"""
import argparse
import dataclasses as dc
import subprocess
import sys
from enum import Enum
from pathlib import Path
from typing import Any, Callable, Union, get_args, get_origin, get_type_hints
PROJECT_ROOT = Path(__file__).parent.parent
sys.path.insert(0, str(PROJECT_ROOT))
from schema.models import API_MODELS, DATACLASSES, ENUMS, GRPC_MESSAGES, GRPC_SERVICE
# =============================================================================
# Type Dispatch Tables
# =============================================================================
DJANGO_TYPES: dict[Any, str] = {
str: "models.CharField(max_length={max_length}{opts})",
int: "models.IntegerField({opts})",
float: "models.FloatField({opts})",
bool: "models.BooleanField(default={default})",
"UUID": "models.UUIDField({opts})",
"datetime": "models.DateTimeField({opts})",
"dict": "models.JSONField(default=dict, blank=True)",
"list": "models.JSONField(default=list, blank=True)",
"text": "models.TextField(blank=True, default='')",
"bigint": "models.BigIntegerField({opts})",
"enum": "models.CharField(max_length=20, choices={enum_name}.choices{opts})",
}
DJANGO_SPECIAL: dict[str, str] = {
"id": "models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)",
"created_at": "models.DateTimeField(auto_now_add=True)",
"updated_at": "models.DateTimeField(auto_now=True)",
}
PYDANTIC_RESOLVERS: dict[Any, Callable[[Any], str]] = {
str: lambda _: "str",
int: lambda _: "int",
float: lambda _: "float",
bool: lambda _: "bool",
"UUID": lambda _: "UUID",
"datetime": lambda _: "datetime",
"dict": lambda _: "Dict[str, Any]",
"list": lambda base: f"List[{get_list_inner(base)}]",
"enum": lambda base: base.__name__,
}
TS_RESOLVERS: dict[Any, Callable[[Any], str]] = {
str: lambda _: "string",
int: lambda _: "number",
float: lambda _: "number",
bool: lambda _: "boolean",
"UUID": lambda _: "string",
"datetime": lambda _: "string",
"dict": lambda _: "Record<string, unknown>",
"list": lambda base: (
f"{TS_RESOLVERS.get(get_args(base)[0], lambda _: 'string')(None)}[]"
if get_args(base)
else "string[]"
),
"enum": lambda base: base.__name__,
}
PROTO_RESOLVERS: dict[Any, Callable[[Any], str]] = {
str: lambda _: "string",
int: lambda _: "int32",
float: lambda _: "float",
bool: lambda _: "bool",
"list": lambda base: (
f"repeated {PROTO_RESOLVERS.get(get_args(base)[0], lambda _: 'string')(None)}"
if get_args(base)
else "repeated string"
),
}
# =============================================================================
# Type Helpers
# =============================================================================
def unwrap_optional(type_hint: Any) -> tuple[Any, bool]:
"""Unwrap Optional[T] -> (T, True) or (T, False) if not optional."""
origin = get_origin(type_hint)
if origin is Union:
args = [a for a in get_args(type_hint) if a is not type(None)]
return (args[0] if args else str, True)
return (type_hint, False)
def get_origin_name(type_hint: Any) -> str | None:
"""Get origin type name: 'dict', 'list', or None."""
origin = get_origin(type_hint)
if origin is dict:
return "dict"
if origin is list:
return "list"
return None
def get_type_name(type_hint: Any) -> str | None:
"""Get type name for special types like UUID, datetime."""
if hasattr(type_hint, "__name__"):
return type_hint.__name__
return None
def get_list_inner(type_hint: Any) -> str:
"""Get inner type of List[T]."""
args = get_args(type_hint)
if args and args[0] in (str, int, float, bool):
return {str: "str", int: "int", float: "float", bool: "bool"}[args[0]]
return "str"
def get_field_default(field: dc.Field) -> Any:
"""Get default value from dataclass field."""
if field.default is not dc.MISSING:
return field.default
return dc.MISSING
def format_opts(optional: bool, extra: list[str] | None = None) -> str:
"""Format field options string."""
parts = []
if optional:
parts.append("null=True, blank=True")
if extra:
parts.extend(extra)
return ", ".join(parts)
# =============================================================================
# Django Generator
# =============================================================================
def resolve_django_type(name: str, type_hint: Any, default: Any) -> str:
"""Resolve Python type to Django field."""
# Special fields
if name in DJANGO_SPECIAL:
return DJANGO_SPECIAL[name]
base, optional = unwrap_optional(type_hint)
origin = get_origin_name(base)
type_name = get_type_name(base)
opts = format_opts(optional)
# Container types
if origin == "dict":
return DJANGO_TYPES["dict"]
if origin == "list":
return DJANGO_TYPES["list"]
# UUID / datetime
if type_name == "UUID":
return DJANGO_TYPES["UUID"].format(opts=opts)
if type_name == "datetime":
return DJANGO_TYPES["datetime"].format(opts=opts)
# Enum
if isinstance(base, type) and issubclass(base, Enum):
enum_name = base.__name__
extra = []
if optional:
extra.append("null=True, blank=True")
if default is not dc.MISSING and isinstance(default, Enum):
extra.append(f"default={enum_name}.{default.name}")
return DJANGO_TYPES["enum"].format(
enum_name=enum_name, opts=", " + ", ".join(extra) if extra else ""
)
# Text fields
if base is str and any(x in name for x in ("message", "comments", "description")):
return DJANGO_TYPES["text"]
# BigInt fields
if base is int and name in ("file_size", "bitrate"):
return DJANGO_TYPES["bigint"].format(opts=opts)
# Basic types
if base is str:
max_length = 1000 if "path" in name else 500 if "filename" in name else 255
return DJANGO_TYPES[str].format(
max_length=max_length, opts=", " + opts if opts else ""
)
if base is int:
extra = [opts] if opts else []
if default is not dc.MISSING and not callable(default):
extra.append(f"default={default}")
return DJANGO_TYPES[int].format(opts=", ".join(extra))
if base is float:
extra = [opts] if opts else []
if default is not dc.MISSING and not callable(default):
extra.append(f"default={default}")
return DJANGO_TYPES[float].format(opts=", ".join(extra))
if base is bool:
default_val = default if default is not dc.MISSING else False
return DJANGO_TYPES[bool].format(default=default_val)
# Fallback
return DJANGO_TYPES[str].format(max_length=255, opts=", " + opts if opts else "")
def generate_django_enum(enum_cls: type) -> list[str]:
"""Generate Django TextChoices enum."""
lines = [f"class {enum_cls.__name__}(models.TextChoices):"]
for member in enum_cls:
label = member.name.replace("_", " ").title()
lines.append(f' {member.name} = "{member.value}", "{label}"')
return lines
def generate_django_model(cls: type) -> list[str]:
"""Generate Django model lines from dataclass."""
lines = [
f"class {cls.__name__}(models.Model):",
f' """{(cls.__doc__ or cls.__name__).strip().split(chr(10))[0]}"""',
"",
]
hints = get_type_hints(cls)
fields = {f.name: f for f in dc.fields(cls)}
# Fields
for name, type_hint in hints.items():
if name.startswith("_"):
continue
field = fields.get(name)
default = get_field_default(field) if field else dc.MISSING
django_field = resolve_django_type(name, type_hint, default)
lines.append(f" {name} = {django_field}")
# Meta and __str__
lines.extend(
[
"",
" class Meta:",
' ordering = ["-created_at"]',
"",
" def __str__(self):",
]
)
if "filename" in hints:
lines.append(" return self.filename")
elif "name" in hints:
lines.append(" return self.name")
else:
lines.append(" return str(self.id)")
return lines
def generate_django() -> str:
"""Generate complete Django models file."""
header = [
'"""',
"Django ORM Models - GENERATED FILE",
"",
"Do not edit directly. Modify schema/models/*.py and run:",
" python schema/generate.py --django",
'"""',
"",
"import uuid",
"from django.db import models",
"",
]
# Generate enums first
body = []
for enum_cls in ENUMS:
body.extend(generate_django_enum(enum_cls))
body.extend(["", ""])
# Generate models
for cls in DATACLASSES:
body.extend(generate_django_model(cls))
body.extend(["", ""])
return "\n".join(header + body)
# =============================================================================
# Pydantic Generator
# =============================================================================
def resolve_pydantic_type(type_hint: Any) -> str:
"""Resolve Python type to Pydantic type string."""
base, optional = unwrap_optional(type_hint)
origin = get_origin_name(base)
type_name = get_type_name(base)
# Look up resolver by origin, type name, base type, or enum
resolver = (
PYDANTIC_RESOLVERS.get(origin)
or PYDANTIC_RESOLVERS.get(type_name)
or PYDANTIC_RESOLVERS.get(base)
or (
PYDANTIC_RESOLVERS["enum"]
if isinstance(base, type) and issubclass(base, Enum)
else None
)
)
result = resolver(base) if resolver else "str"
return f"Optional[{result}]" if optional else result
def generate_pydantic_schema(cls: type, suffix: str) -> list[str]:
"""Generate Pydantic schema lines from dataclass."""
name = cls.__name__.replace("Transcode", "").replace("Media", "")
class_name = f"{name}{suffix}"
skip_fields = {
"Create": {"id", "created_at", "updated_at", "status", "error_message"},
"Update": {"id", "created_at", "updated_at"},
"Response": set(),
}
lines = [
f"class {class_name}(BaseSchema):",
f' """{class_name} schema."""',
]
hints = get_type_hints(cls)
fields = {f.name: f for f in dc.fields(cls)}
for name, type_hint in hints.items():
if name.startswith("_") or name in skip_fields.get(suffix, set()):
continue
py_type = resolve_pydantic_type(type_hint)
# Update schemas: all fields optional
if suffix == "Update" and "Optional" not in py_type:
py_type = f"Optional[{py_type}]"
field = fields.get(name)
default = get_field_default(field) if field else dc.MISSING
if "Optional" in py_type:
lines.append(f" {name}: {py_type} = None")
elif default is not dc.MISSING and not callable(default):
if isinstance(default, str):
lines.append(f' {name}: {py_type} = "{default}"')
elif isinstance(default, Enum):
lines.append(
f" {name}: {py_type} = {default.__class__.__name__}.{default.name}"
)
else:
lines.append(f" {name}: {py_type} = {default!r}")
else:
lines.append(f" {name}: {py_type}")
return lines
def generate_pydantic() -> dict[str, str]:
"""Generate all Pydantic schema files."""
files = {}
# base.py
files["base.py"] = "\n".join(
[
'"""Pydantic Base Schema - GENERATED FILE"""',
"",
"from pydantic import BaseModel, ConfigDict",
"",
"",
"class BaseSchema(BaseModel):",
' """Base schema with ORM mode."""',
" model_config = ConfigDict(from_attributes=True)",
"",
]
)
# Schema files per model
for cls in DATACLASSES:
module_name = cls.__name__.replace("Transcode", "").replace("Media", "").lower()
lines = [
f'"""{cls.__name__} Schemas - GENERATED FILE"""',
"",
"from datetime import datetime",
"from enum import Enum",
"from typing import Any, Dict, List, Optional",
"from uuid import UUID",
"",
"from .base import BaseSchema",
"",
]
# Add enum if present
hints = get_type_hints(cls)
for type_hint in hints.values():
base, _ = unwrap_optional(type_hint)
if isinstance(base, type) and issubclass(base, Enum):
lines.extend(
[
"",
f"class {base.__name__}(str, Enum):",
]
)
for m in base:
lines.append(f' {m.name} = "{m.value}"')
lines.append("")
break
# Schemas
for suffix in ["Create", "Update", "Response"]:
lines.append("")
lines.extend(generate_pydantic_schema(cls, suffix))
lines.append("")
files[f"{module_name}.py"] = "\n".join(lines)
# __init__.py
imports = ["from .base import BaseSchema"]
all_exports = ['"BaseSchema"']
for cls in DATACLASSES:
name = cls.__name__.replace("Transcode", "").replace("Media", "")
module = name.lower()
imports.append(
f"from .{module} import {name}Create, {name}Update, {name}Response"
)
all_exports.extend([f'"{name}Create"', f'"{name}Update"', f'"{name}Response"'])
# Add enum export
hints = get_type_hints(cls)
for type_hint in hints.values():
base, _ = unwrap_optional(type_hint)
if isinstance(base, type) and issubclass(base, Enum):
imports.append(f"from .{module} import {base.__name__}")
all_exports.append(f'"{base.__name__}"')
break
files["__init__.py"] = "\n".join(
[
'"""API Schemas - GENERATED FILE"""',
"",
*imports,
"",
f"__all__ = [{', '.join(all_exports)}]",
"",
]
)
return files
# =============================================================================
# TypeScript Generator
# =============================================================================
def resolve_ts_type(type_hint: Any) -> str:
"""Resolve Python type to TypeScript type string."""
base, optional = unwrap_optional(type_hint)
origin = get_origin_name(base)
type_name = get_type_name(base)
# Look up resolver by origin, type name, base type, or enum
resolver = (
TS_RESOLVERS.get(origin)
or TS_RESOLVERS.get(type_name)
or TS_RESOLVERS.get(base)
or (
TS_RESOLVERS["enum"]
if isinstance(base, type) and issubclass(base, Enum)
else None
)
)
result = resolver(base) if resolver else "string"
return f"{result} | null" if optional else result
def generate_ts_interface(cls: type) -> list[str]:
"""Generate TypeScript interface lines from dataclass."""
lines = [f"export interface {cls.__name__} {{"]
for name, type_hint in get_type_hints(cls).items():
if name.startswith("_"):
continue
ts_type = resolve_ts_type(type_hint)
lines.append(f" {name}: {ts_type};")
lines.append("}")
return lines
def generate_typescript() -> str:
"""Generate complete TypeScript file."""
lines = [
"/**",
" * MPR TypeScript Types - GENERATED FILE",
" *",
" * Do not edit directly. Modify schema/models/*.py and run:",
" * python schema/generate.py --typescript",
" */",
"",
]
# Enums as union types
for enum in ENUMS:
values = " | ".join(f'"{m.value}"' for m in enum)
lines.append(f"export type {enum.__name__} = {values};")
lines.append("")
# Interfaces - domain models
for cls in DATACLASSES:
lines.extend(generate_ts_interface(cls))
lines.append("")
# Interfaces - API request/response models
lines.append("// API Request/Response Types")
lines.append("")
for cls in API_MODELS:
lines.extend(generate_ts_interface(cls))
lines.append("")
return "\n".join(lines)
# =============================================================================
# Proto Generator
# =============================================================================
def resolve_proto_type(type_hint: Any) -> tuple[str, bool]:
"""Resolve Python type to proto type. Returns (type, is_optional)."""
base, optional = unwrap_optional(type_hint)
origin = get_origin_name(base)
# Look up resolver by origin or base type
resolver = PROTO_RESOLVERS.get(origin) or PROTO_RESOLVERS.get(base)
if resolver:
result = resolver(base)
is_repeated = result.startswith("repeated")
return result, optional and not is_repeated
return "string", optional
def generate_proto_message(cls: type) -> list[str]:
"""Generate proto message lines from dataclass."""
lines = [f"message {cls.__name__} {{"]
hints = get_type_hints(cls)
if not hints:
lines.append(" // Empty")
else:
for i, (name, type_hint) in enumerate(hints.items(), 1):
proto_type, optional = resolve_proto_type(type_hint)
prefix = (
"optional "
if optional and not proto_type.startswith("repeated")
else ""
)
lines.append(f" {prefix}{proto_type} {name} = {i};")
lines.append("}")
return lines
def generate_proto() -> str:
"""Generate complete proto file."""
lines = [
"// MPR Worker Service - GENERATED FILE",
"//",
"// Do not edit directly. Modify schema/models/grpc.py and run:",
"// python schema/generate.py --proto",
"",
'syntax = "proto3";',
"",
f"package {GRPC_SERVICE['package']};",
"",
f"service {GRPC_SERVICE['name']} {{",
]
# Methods
for m in GRPC_SERVICE["methods"]:
req = m["request"].__name__
resp = m["response"].__name__
returns = f"stream {resp}" if m["stream_response"] else resp
lines.append(f" rpc {m['name']}({req}) returns ({returns});")
lines.extend(["}", ""])
# Messages
for cls in GRPC_MESSAGES:
lines.extend(generate_proto_message(cls))
lines.append("")
return "\n".join(lines)
# =============================================================================
# Writers
# =============================================================================
def write_file(path: Path, content: str) -> None:
"""Write content to file, creating directories as needed."""
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(content)
print(f" {path}")
def write_django(output_dir: Path) -> None:
"""Write Django models."""
write_file(output_dir / "mpr" / "media_assets" / "models.py", generate_django())
def write_pydantic(output_dir: Path) -> None:
"""Write Pydantic schemas."""
schemas_dir = output_dir / "api" / "schemas"
for filename, content in generate_pydantic().items():
write_file(schemas_dir / filename, content)
def write_typescript(output_dir: Path) -> None:
"""Write TypeScript types."""
write_file(
output_dir / "ui" / "timeline" / "src" / "types.ts", generate_typescript()
)
def write_proto(output_dir: Path) -> None:
"""Write proto and generate stubs."""
proto_dir = output_dir / "grpc" / "protos"
proto_path = proto_dir / "worker.proto"
write_file(proto_path, generate_proto())
# Generate Python stubs
grpc_dir = output_dir / "grpc"
result = subprocess.run(
[
sys.executable,
"-m",
"grpc_tools.protoc",
f"-I{proto_dir}",
f"--python_out={grpc_dir}",
f"--grpc_python_out={grpc_dir}",
str(proto_path),
],
capture_output=True,
text=True,
)
if result.returncode == 0:
print(f" {grpc_dir}/worker_pb2.py")
print(f" {grpc_dir}/worker_pb2_grpc.py")
else:
print(" Warning: grpc_tools failed - pip install grpcio-tools")
# =============================================================================
# Main
# =============================================================================
def main() -> None:
parser = argparse.ArgumentParser(description="Generate from schema")
parser.add_argument("--django", action="store_true")
parser.add_argument("--pydantic", action="store_true")
parser.add_argument("--typescript", action="store_true")
parser.add_argument("--proto", action="store_true")
parser.add_argument("--all", action="store_true")
parser.add_argument("--output", type=Path, default=PROJECT_ROOT)
args = parser.parse_args()
if not any([args.django, args.pydantic, args.typescript, args.proto, args.all]):
args.all = True
print(f"Generating to {args.output}\n")
targets: list[tuple[bool, str, Callable]] = [
(args.django or args.all, "Django", write_django),
(args.pydantic or args.all, "Pydantic", write_pydantic),
(args.typescript or args.all, "TypeScript", write_typescript),
(args.proto or args.all, "Proto", write_proto),
]
for enabled, name, writer in targets:
if enabled:
print(f"{name}:")
writer(args.output)
print()
print("Done!")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,2 @@
node_modules/
dist/

View File

@@ -1,5 +1,5 @@
/** /**
* API client for FastAPI backend * GraphQL API client
*/ */
import type { import type {
@@ -8,34 +8,51 @@ import type {
TranscodeJob, TranscodeJob,
CreateJobRequest, CreateJobRequest,
SystemStatus, SystemStatus,
WorkerStatus,
} from "./types"; } from "./types";
const API_BASE = "/api"; const GRAPHQL_URL = "/api/graphql";
async function request<T>(path: string, options?: RequestInit): Promise<T> { async function gql<T>(query: string, variables?: Record<string, unknown>): Promise<T> {
const response = await fetch(`${API_BASE}${path}`, { const response = await fetch(GRAPHQL_URL, {
headers: { method: "POST",
"Content-Type": "application/json", headers: { "Content-Type": "application/json" },
}, body: JSON.stringify({ query, variables }),
...options,
}); });
if (!response.ok) { const json = await response.json();
const error = await response.text();
throw new Error(`API error: ${response.status} - ${error}`); if (json.errors?.length) {
throw new Error(json.errors[0].message);
} }
return response.json(); return json.data as T;
} }
// Assets // Assets
export async function getAssets(): Promise<MediaAsset[]> { export async function getAssets(): Promise<MediaAsset[]> {
return request("/assets/"); const data = await gql<{ assets: MediaAsset[] }>(`
query {
assets {
id filename file_path status error_message file_size duration
video_codec audio_codec width height framerate bitrate
properties comments tags created_at updated_at
}
}
`);
return data.assets;
} }
export async function getAsset(id: string): Promise<MediaAsset> { export async function getAsset(id: string): Promise<MediaAsset> {
return request(`/assets/${id}`); const data = await gql<{ asset: MediaAsset }>(`
query($id: UUID!) {
asset(id: $id) {
id filename file_path status error_message file_size duration
video_codec audio_codec width height framerate bitrate
properties comments tags created_at updated_at
}
}
`, { id });
return data.asset;
} }
export async function scanMediaFolder(): Promise<{ export async function scanMediaFolder(): Promise<{
@@ -44,43 +61,95 @@ export async function scanMediaFolder(): Promise<{
skipped: number; skipped: number;
files: string[]; files: string[];
}> { }> {
return request("/assets/scan", { const data = await gql<{ scan_media_folder: { found: number; registered: number; skipped: number; files: string[] } }>(`
method: "POST", mutation {
}); scan_media_folder { found registered skipped files }
}
`);
return data.scan_media_folder;
} }
// Presets // Presets
export async function getPresets(): Promise<TranscodePreset[]> { export async function getPresets(): Promise<TranscodePreset[]> {
return request("/presets/"); const data = await gql<{ presets: TranscodePreset[] }>(`
query {
presets {
id name description is_builtin container
video_codec video_bitrate video_crf video_preset resolution framerate
audio_codec audio_bitrate audio_channels audio_samplerate
extra_args created_at updated_at
}
}
`);
return data.presets;
} }
// Jobs // Jobs
export async function getJobs(): Promise<TranscodeJob[]> { export async function getJobs(): Promise<TranscodeJob[]> {
return request("/jobs/"); const data = await gql<{ jobs: TranscodeJob[] }>(`
query {
jobs {
id source_asset_id preset_id preset_snapshot trim_start trim_end
output_filename output_path output_asset_id status progress
current_frame current_time speed error_message celery_task_id
execution_arn priority created_at started_at completed_at
}
}
`);
return data.jobs;
} }
export async function getJob(id: string): Promise<TranscodeJob> { export async function getJob(id: string): Promise<TranscodeJob> {
return request(`/jobs/${id}`); const data = await gql<{ job: TranscodeJob }>(`
query($id: UUID!) {
job(id: $id) {
id source_asset_id preset_id preset_snapshot trim_start trim_end
output_filename output_path output_asset_id status progress
current_frame current_time speed error_message celery_task_id
execution_arn priority created_at started_at completed_at
}
}
`, { id });
return data.job;
} }
export async function createJob(data: CreateJobRequest): Promise<TranscodeJob> { export async function createJob(req: CreateJobRequest): Promise<TranscodeJob> {
return request("/jobs/", { const data = await gql<{ create_job: TranscodeJob }>(`
method: "POST", mutation($input: CreateJobInput!) {
body: JSON.stringify(data), create_job(input: $input) {
id source_asset_id status output_filename progress created_at
}
}
`, {
input: {
source_asset_id: req.source_asset_id,
preset_id: req.preset_id,
trim_start: req.trim_start,
trim_end: req.trim_end,
output_filename: req.output_filename ?? null,
priority: req.priority ?? 0,
},
}); });
return data.create_job;
} }
export async function cancelJob(id: string): Promise<TranscodeJob> { export async function cancelJob(id: string): Promise<TranscodeJob> {
return request(`/jobs/${id}/cancel`, { const data = await gql<{ cancel_job: TranscodeJob }>(`
method: "POST", mutation($id: UUID!) {
}); cancel_job(id: $id) {
id status
}
}
`, { id });
return data.cancel_job;
} }
// System // System
export async function getSystemStatus(): Promise<SystemStatus> { export async function getSystemStatus(): Promise<SystemStatus> {
return request("/system/status"); const data = await gql<{ system_status: SystemStatus }>(`
} query {
system_status { status version }
export async function getWorkerStatus(): Promise<WorkerStatus> { }
return request("/system/worker"); `);
return data.system_status;
} }

Some files were not shown because too many files have changed in this diff Show More