Compare commits

...

5 Commits

Author SHA1 Message Date
013587d108 plug task enqueing properly 2026-02-06 10:49:05 -03:00
2cf6c89fbb ui video selector 2026-02-06 09:41:50 -03:00
daabd15c19 update docs 2026-02-06 09:23:36 -03:00
2e6ed4e37a scan media folder 2026-02-06 09:06:10 -03:00
68622bd6b1 fixed model names and generator 2026-02-06 08:51:35 -03:00
27 changed files with 1245 additions and 244 deletions

6
.gitignore vendored
View File

@@ -17,8 +17,10 @@ env/
*.pot *.pot
*.pyc *.pyc
db.sqlite3 db.sqlite3
media/* media/in/*
!media/.gitkeep !media/in/.gitkeep
media/out/*
!media/out/.gitkeep
# Node # Node
node_modules/ node_modules/

View File

@@ -76,23 +76,39 @@ docker compose exec django python manage.py createsuperuser
## Code Generation ## Code Generation
Models are defined in `schema/models/` and generate: Models are defined as dataclasses in `schema/models/` and generated via `modelgen`:
- Django ORM models - **Django ORM** models (`--include dataclasses,enums`)
- Pydantic schemas - **Pydantic** schemas (`--include dataclasses,enums`)
- TypeScript types - **TypeScript** types (`--include dataclasses,enums,api`)
- Protobuf definitions - **Protobuf** definitions (`--include grpc`)
Each target only gets the model groups it needs via the `--include` flag.
```bash ```bash
# Regenerate all # Regenerate all targets
python schema/generate.py --all bash ctrl/generate.sh
# Or specific targets
python schema/generate.py --django
python schema/generate.py --pydantic
python schema/generate.py --typescript
python schema/generate.py --proto
``` ```
## Media Storage
MPR separates media into **input** (`MEDIA_IN`) and **output** (`MEDIA_OUT`) paths, each independently configurable. File paths are stored relative for cloud portability.
### Local Development
- Source files: `/app/media/in/video.mp4`
- Output files: `/app/media/out/video_h264.mp4`
- Served via: `http://mpr.local.ar/media/in/video.mp4` (nginx alias)
### AWS/Cloud Deployment
Input and output can be different buckets/locations:
```bash
MEDIA_IN=s3://source-bucket/media/
MEDIA_OUT=s3://output-bucket/transcoded/
```
**Scan Endpoint**: `POST /api/assets/scan` recursively scans `MEDIA_IN` and registers new files with relative paths.
See [docs/media-storage.md](docs/media-storage.md) for full details.
## Project Structure ## Project Structure
``` ```
@@ -105,18 +121,20 @@ mpr/
├── ctrl/ # Docker & deployment ├── ctrl/ # Docker & deployment
│ ├── docker-compose.yml │ ├── docker-compose.yml
│ └── nginx.conf │ └── nginx.conf
├── docs/ # Architecture diagrams ├── media/
├── grpc/ # gRPC server & client │ ├── in/ # Source media files
│ └── out/ # Transcoded output
├── rpc/ # gRPC server & client
│ └── protos/ # Protobuf definitions (generated) │ └── protos/ # Protobuf definitions (generated)
├── mpr/ # Django project ├── mpr/ # Django project
│ └── media_assets/ # Django app │ └── media_assets/ # Django app
├── schema/ # Source of truth ├── schema/ # Source of truth
│ └── models/ # Dataclass definitions │ └── models/ # Dataclass definitions
├── ui/ # Frontend ├── task/ # Celery job execution
── timeline/ # React app ── executor.py # Executor abstraction
└── worker/ # Job execution │ └── tasks.py # Celery tasks
├── executor.py # Executor abstraction └── ui/ # Frontend
└── tasks.py # Celery tasks └── timeline/ # React app
``` ```
## Environment Variables ## Environment Variables
@@ -130,6 +148,10 @@ See `ctrl/.env.template` for all configuration options.
| `GRPC_HOST` | grpc | gRPC server hostname | | `GRPC_HOST` | grpc | gRPC server hostname |
| `GRPC_PORT` | 50051 | gRPC server port | | `GRPC_PORT` | 50051 | gRPC server port |
| `MPR_EXECUTOR` | local | Executor type (local/lambda) | | `MPR_EXECUTOR` | local | Executor type (local/lambda) |
| `MEDIA_IN` | /app/media/in | Source media files directory |
| `MEDIA_OUT` | /app/media/out | Transcoded output directory |
| `MEDIA_BASE_URL` | /media/ | Base URL for serving media (use S3 URL for cloud) |
| `VITE_ALLOWED_HOSTS` | - | Comma-separated allowed hosts for Vite dev server |
## License ## License

View File

@@ -30,10 +30,19 @@ def create_asset(data: AssetCreate):
if not path.exists(): if not path.exists():
raise HTTPException(status_code=400, detail="File not found") raise HTTPException(status_code=400, detail="File not found")
# Store path relative to media root
import os
media_root = Path(os.environ.get("MEDIA_IN", "/app/media/in"))
try:
rel_path = str(path.relative_to(media_root))
except ValueError:
rel_path = path.name
# Create asset # Create asset
asset = MediaAsset.objects.create( asset = MediaAsset.objects.create(
filename=data.filename or path.name, filename=data.filename or path.name,
file_path=str(path.absolute()), file_path=rel_path,
file_size=path.stat().st_size, file_size=path.stat().st_size,
) )
@@ -88,3 +97,68 @@ def update_asset(asset_id: UUID, data: AssetUpdate, asset=Depends(get_asset)):
def delete_asset(asset_id: UUID, asset=Depends(get_asset)): def delete_asset(asset_id: UUID, asset=Depends(get_asset)):
"""Delete an asset.""" """Delete an asset."""
asset.delete() asset.delete()
@router.post("/scan", response_model=dict)
def scan_media_folder():
"""
Scan the media folder for new video/audio files and register them as assets.
Returns a summary of files found and registered.
"""
import os
from pathlib import Path
from mpr.media_assets.models import MediaAsset
# Get media input folder from environment
media_root = os.environ.get("MEDIA_IN", "/app/media/in")
media_path = Path(media_root)
if not media_path.exists():
raise HTTPException(
status_code=500, detail=f"Media folder not found: {media_root}"
)
# Supported video/audio extensions
video_exts = {".mp4", ".mkv", ".avi", ".mov", ".webm", ".flv", ".wmv", ".m4v"}
audio_exts = {".mp3", ".wav", ".flac", ".aac", ".ogg", ".m4a"}
supported_exts = video_exts | audio_exts
# Get existing filenames to avoid duplicates
existing_filenames = set(MediaAsset.objects.values_list("filename", flat=True))
# Scan for media files
found_files = []
registered_files = []
skipped_files = []
for file_path in media_path.rglob("*"):
if file_path.is_file() and file_path.suffix.lower() in supported_exts:
found_files.append(str(file_path))
# Skip if already registered
if file_path.name in existing_filenames:
skipped_files.append(file_path.name)
continue
# Register new asset with path relative to media root
rel_path = str(file_path.relative_to(media_path))
try:
asset = MediaAsset.objects.create(
filename=file_path.name,
file_path=rel_path,
file_size=file_path.stat().st_size,
)
registered_files.append(file_path.name)
# TODO: Queue probe task to extract metadata
except Exception as e:
print(f"Error registering {file_path.name}: {e}")
return {
"found": len(found_files),
"registered": len(registered_files),
"skipped": len(skipped_files),
"files": registered_files,
}

View File

@@ -30,9 +30,6 @@ def create_job(data: JobCreate):
except MediaAsset.DoesNotExist: except MediaAsset.DoesNotExist:
raise HTTPException(status_code=404, detail="Source asset not found") raise HTTPException(status_code=404, detail="Source asset not found")
if source.status != "ready":
raise HTTPException(status_code=400, detail="Source asset is not ready")
# Get preset if specified # Get preset if specified
preset = None preset = None
preset_snapshot = {} preset_snapshot = {}
@@ -64,27 +61,48 @@ def create_job(data: JobCreate):
status_code=400, detail="Must specify preset_id or trim_start/trim_end" status_code=400, detail="Must specify preset_id or trim_start/trim_end"
) )
# Generate output filename # Generate output filename and path
output_filename = data.output_filename import os
if not output_filename:
from pathlib import Path from pathlib import Path
output_filename = data.output_filename
if not output_filename:
stem = Path(source.filename).stem stem = Path(source.filename).stem
ext = preset_snapshot.get("container", "mp4") if preset else "mp4" ext = preset_snapshot.get("container", "mp4") if preset else "mp4"
output_filename = f"{stem}_output.{ext}" output_filename = f"{stem}_output.{ext}"
media_out = os.environ.get("MEDIA_OUT", "/app/media/out")
output_path = str(Path(media_out) / output_filename)
media_in = os.environ.get("MEDIA_IN", "/app/media/in")
source_path = str(Path(media_in) / source.file_path)
# Create job # Create job
job = TranscodeJob.objects.create( job = TranscodeJob.objects.create(
source_asset=source, source_asset_id=source.id,
preset=preset, preset_id=preset.id if preset else None,
preset_snapshot=preset_snapshot, preset_snapshot=preset_snapshot,
trim_start=data.trim_start, trim_start=data.trim_start,
trim_end=data.trim_end, trim_end=data.trim_end,
output_filename=output_filename, output_filename=output_filename,
output_path=output_path,
priority=data.priority or 0, priority=data.priority or 0,
) )
# TODO: Submit job via gRPC # Dispatch to Celery
from task.tasks import run_transcode_job
result = run_transcode_job.delay(
job_id=str(job.id),
source_path=source_path,
output_path=output_path,
preset=preset_snapshot or None,
trim_start=data.trim_start,
trim_end=data.trim_end,
duration=source.duration,
)
job.celery_task_id = result.id
job.save(update_fields=["celery_task_id"])
return job return job

View File

@@ -17,26 +17,19 @@ class JobStatus(str, Enum):
class JobCreate(BaseSchema): class JobCreate(BaseSchema):
"""JobCreate schema.""" """Client-facing job creation request."""
source_asset_id: UUID source_asset_id: UUID
preset_id: Optional[UUID] = None preset_id: Optional[UUID] = None
preset_snapshot: Dict[str, Any]
trim_start: Optional[float] = None trim_start: Optional[float] = None
trim_end: Optional[float] = None trim_end: Optional[float] = None
output_filename: str = "" output_filename: Optional[str] = None
output_path: Optional[str] = None
output_asset_id: Optional[UUID] = None
progress: float = 0.0
current_frame: Optional[int] = None
current_time: Optional[float] = None
speed: Optional[str] = None
celery_task_id: Optional[str] = None
priority: int = 0 priority: int = 0
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
class JobUpdate(BaseSchema): class JobUpdate(BaseSchema):
"""JobUpdate schema.""" """JobUpdate schema."""
source_asset_id: Optional[UUID] = None source_asset_id: Optional[UUID] = None
preset_id: Optional[UUID] = None preset_id: Optional[UUID] = None
preset_snapshot: Optional[Dict[str, Any]] = None preset_snapshot: Optional[Dict[str, Any]] = None
@@ -56,8 +49,10 @@ class JobUpdate(BaseSchema):
started_at: Optional[datetime] = None started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None completed_at: Optional[datetime] = None
class JobResponse(BaseSchema): class JobResponse(BaseSchema):
"""JobResponse schema.""" """JobResponse schema."""
id: UUID id: UUID
source_asset_id: UUID source_asset_id: UUID
preset_id: Optional[UUID] = None preset_id: Optional[UUID] = None

View File

@@ -27,5 +27,9 @@ GRPC_HOST=grpc
GRPC_PORT=50051 GRPC_PORT=50051
GRPC_MAX_WORKERS=10 GRPC_MAX_WORKERS=10
# Media
MEDIA_IN=/app/media/in
MEDIA_OUT=/app/media/out
# Vite # Vite
VITE_ALLOWED_HOSTS=your-domain.local VITE_ALLOWED_HOSTS=your-domain.local

View File

@@ -5,6 +5,8 @@ x-common-env: &common-env
DEBUG: 1 DEBUG: 1
GRPC_HOST: grpc GRPC_HOST: grpc
GRPC_PORT: 50051 GRPC_PORT: 50051
MEDIA_IN: ${MEDIA_IN:-/app/media/in}
MEDIA_OUT: ${MEDIA_OUT:-/app/media/out}
x-healthcheck-defaults: &healthcheck-defaults x-healthcheck-defaults: &healthcheck-defaults
interval: 5s interval: 5s
@@ -119,7 +121,7 @@ services:
build: build:
context: .. context: ..
dockerfile: ctrl/Dockerfile dockerfile: ctrl/Dockerfile
command: celery -A mpr worker -l info -Q default -c 2 command: celery -A mpr worker -l info -Q transcode -c 2
environment: environment:
<<: *common-env <<: *common-env
MPR_EXECUTOR: local MPR_EXECUTOR: local

View File

@@ -8,29 +8,33 @@ cd "$(dirname "$0")/.."
echo "Generating models from schema/models..." echo "Generating models from schema/models..."
# Django ORM models # Django ORM models: domain models + enums
python -m modelgen from-schema \ python -m modelgen from-schema \
--schema schema/models \ --schema schema/models \
--output mpr/media_assets/models.py \ --output mpr/media_assets/models.py \
--targets django --targets django \
--include dataclasses,enums
# Pydantic schemas for FastAPI # Pydantic schemas for FastAPI: domain models + enums
python -m modelgen from-schema \ python -m modelgen from-schema \
--schema schema/models \ --schema schema/models \
--output api/schemas/models.py \ --output api/schemas/models.py \
--targets pydantic --targets pydantic \
--include dataclasses,enums
# TypeScript types for Timeline UI # TypeScript types for Timeline UI: domain models + enums + API types
python -m modelgen from-schema \ python -m modelgen from-schema \
--schema schema/models \ --schema schema/models \
--output ui/timeline/src/types.ts \ --output ui/timeline/src/types.ts \
--targets typescript --targets typescript \
--include dataclasses,enums,api
# Protobuf for gRPC # Protobuf for gRPC: gRPC messages + service
python -m modelgen from-schema \ python -m modelgen from-schema \
--schema schema/models \ --schema schema/models \
--output rpc/protos/worker.proto \ --output rpc/protos/worker.proto \
--targets proto --targets proto \
--include grpc
# Generate gRPC stubs from proto # Generate gRPC stubs from proto
echo "Generating gRPC stubs..." echo "Generating gRPC stubs..."

View File

@@ -67,9 +67,15 @@ http {
proxy_set_header Host $host; proxy_set_header Host $host;
} }
# Media files # Media files - input (source)
location /media { location /media/in {
alias /app/media; alias /app/media/in;
autoindex on;
}
# Media files - output (transcoded)
location /media/out {
alias /app/media/out;
autoindex on; autoindex on;
} }

150
docs/media-storage.md Normal file
View File

@@ -0,0 +1,150 @@
# Media Storage Architecture
## Overview
MPR separates media into **input** and **output** paths, each independently configurable. File paths are stored **relative to their respective root** to ensure portability between local development and cloud deployments (AWS S3, etc.).
## Storage Strategy
### Input / Output Separation
| Path | Env Var | Purpose |
|------|---------|---------|
| `MEDIA_IN` | `/app/media/in` | Source media files to process |
| `MEDIA_OUT` | `/app/media/out` | Transcoded/trimmed output files |
These can point to different locations or even different servers/buckets in production.
### File Path Storage
- **Database**: Stores only the relative path (e.g., `videos/sample.mp4`)
- **Input Root**: Configurable via `MEDIA_IN` env var
- **Output Root**: Configurable via `MEDIA_OUT` env var
- **Serving**: Base URL configurable via `MEDIA_BASE_URL` env var
### Why Relative Paths?
1. **Portability**: Same database works locally and in cloud
2. **Flexibility**: Easy to switch between storage backends
3. **Simplicity**: No need to update paths when migrating
## Local Development
### Configuration
```bash
MEDIA_IN=/app/media/in
MEDIA_OUT=/app/media/out
```
### File Structure
```
/app/media/
├── in/ # Source files
│ ├── video1.mp4
│ ├── video2.mp4
│ └── subfolder/
│ └── video3.mp4
└── out/ # Transcoded output
├── video1_h264.mp4
└── video2_trimmed.mp4
```
### Database Storage
```
# Source assets (scanned from media/in)
filename: video1.mp4
file_path: video1.mp4
filename: video3.mp4
file_path: subfolder/video3.mp4
```
### URL Serving
- Nginx serves input via `location /media/in { alias /app/media/in; }`
- Nginx serves output via `location /media/out { alias /app/media/out; }`
- Frontend accesses: `http://mpr.local.ar/media/in/video1.mp4`
- Video player: `<video src="/media/in/video1.mp4" />`
## AWS/Cloud Deployment
### S3 Configuration
```bash
# Input and output can be different buckets/paths
MEDIA_IN=s3://source-bucket/media/
MEDIA_OUT=s3://output-bucket/transcoded/
MEDIA_BASE_URL=https://source-bucket.s3.amazonaws.com/media/
```
### S3 Structure
```
s3://source-bucket/media/
├── video1.mp4
└── subfolder/
└── video3.mp4
s3://output-bucket/transcoded/
├── video1_h264.mp4
└── video2_trimmed.mp4
```
### Database Storage (Same!)
```
filename: video1.mp4
file_path: video1.mp4
filename: video3.mp4
file_path: subfolder/video3.mp4
```
## API Endpoints
### Scan Media Folder
```http
POST /api/assets/scan
```
**Behavior:**
1. Recursively scans `MEDIA_IN` directory
2. Finds all video/audio files (mp4, mkv, avi, mov, mp3, wav, etc.)
3. Stores paths **relative to MEDIA_IN**
4. Skips already-registered files (by filename)
5. Returns summary: `{ found, registered, skipped, files }`
### Create Job
```http
POST /api/jobs/
Content-Type: application/json
{
"source_asset_id": "uuid",
"preset_id": "uuid",
"trim_start": 10.0,
"trim_end": 30.0
}
```
**Behavior:**
- Server sets `output_path` using `MEDIA_OUT` + generated filename
- Output goes to the output directory, not alongside source files
## Migration Guide
### Moving from Local to S3
1. **Upload source files to S3:**
```bash
aws s3 sync /app/media/in/ s3://source-bucket/media/
aws s3 sync /app/media/out/ s3://output-bucket/transcoded/
```
2. **Update environment variables:**
```bash
MEDIA_IN=s3://source-bucket/media/
MEDIA_OUT=s3://output-bucket/transcoded/
MEDIA_BASE_URL=https://source-bucket.s3.amazonaws.com/media/
```
3. **Database paths remain unchanged** (already relative)
## Supported File Types
**Video:** `.mp4`, `.mkv`, `.avi`, `.mov`, `.webm`, `.flv`, `.wmv`, `.m4v`
**Audio:** `.mp3`, `.wav`, `.flac`, `.aac`, `.ogg`, `.m4a`

0
media/out/.gitkeep Normal file
View File

View File

@@ -72,10 +72,24 @@ def cmd_from_schema(args):
print("that exports DATACLASSES and ENUMS lists.", file=sys.stderr) print("that exports DATACLASSES and ENUMS lists.", file=sys.stderr)
sys.exit(1) sys.exit(1)
print(f"Loading schema: {schema_path}") # Parse include groups
schema = load_schema(schema_path) include = None
if args.include:
include = {g.strip() for g in args.include.split(",")}
print(f"Found {len(schema.models)} models, {len(schema.enums)} enums") print(f"Loading schema: {schema_path}")
schema = load_schema(schema_path, include=include)
loaded = []
if schema.models:
loaded.append(f"{len(schema.models)} models")
if schema.enums:
loaded.append(f"{len(schema.enums)} enums")
if schema.api_models:
loaded.append(f"{len(schema.api_models)} api models")
if schema.grpc_messages:
loaded.append(f"{len(schema.grpc_messages)} grpc messages")
print(f"Found {', '.join(loaded)}")
# Parse targets # Parse targets
targets = [t.strip() for t in args.targets.split(",")] targets = [t.strip() for t in args.targets.split(",")]
@@ -237,6 +251,12 @@ def main():
default="pydantic", default="pydantic",
help=f"Comma-separated output targets ({formats_str})", help=f"Comma-separated output targets ({formats_str})",
) )
schema_parser.add_argument(
"--include",
type=str,
default=None,
help="Comma-separated model groups to include (dataclasses,enums,api,grpc). Default: all.",
)
schema_parser.set_defaults(func=cmd_from_schema) schema_parser.set_defaults(func=cmd_from_schema)
# extract command # extract command

View File

@@ -217,13 +217,14 @@ class DjangoGenerator(BaseGenerator):
# Enum # Enum
if isinstance(base, type) and issubclass(base, Enum): if isinstance(base, type) and issubclass(base, Enum):
enum_name = base.__name__
extra = [] extra = []
if optional: if optional:
extra.append("null=True, blank=True") extra.append("null=True, blank=True")
if default is not dc.MISSING and isinstance(default, Enum): if default is not dc.MISSING and isinstance(default, Enum):
extra.append(f"default=Status.{default.name}") extra.append(f"default={enum_name}.{default.name}")
return DJANGO_TYPES["enum"].format( return DJANGO_TYPES["enum"].format(
opts=", " + ", ".join(extra) if extra else "" enum_name=enum_name, opts=", " + ", ".join(extra) if extra else ""
) )
# Text fields (based on name heuristics) # Text fields (based on name heuristics)

View File

@@ -28,7 +28,9 @@ class TypeScriptGenerator(BaseGenerator):
if hasattr(models, "models"): if hasattr(models, "models"):
# SchemaLoader # SchemaLoader
content = self._generate_from_definitions( content = self._generate_from_definitions(
models.models, getattr(models, "enums", []) models.models,
getattr(models, "enums", []),
api_models=getattr(models, "api_models", []),
) )
elif isinstance(models, tuple): elif isinstance(models, tuple):
# (models, enums) tuple # (models, enums) tuple
@@ -42,7 +44,10 @@ class TypeScriptGenerator(BaseGenerator):
output_path.write_text(content) output_path.write_text(content)
def _generate_from_definitions( def _generate_from_definitions(
self, models: List[ModelDefinition], enums: List[EnumDefinition] self,
models: List[ModelDefinition],
enums: List[EnumDefinition],
api_models: List[ModelDefinition] = None,
) -> str: ) -> str:
"""Generate from ModelDefinition objects.""" """Generate from ModelDefinition objects."""
lines = self._generate_header() lines = self._generate_header()
@@ -58,6 +63,14 @@ class TypeScriptGenerator(BaseGenerator):
lines.extend(self._generate_interface_from_definition(model_def)) lines.extend(self._generate_interface_from_definition(model_def))
lines.append("") lines.append("")
# Generate API request/response interfaces
if api_models:
lines.append("// API request/response types")
lines.append("")
for model_def in api_models:
lines.extend(self._generate_interface_from_definition(model_def))
lines.append("")
return "\n".join(lines) return "\n".join(lines)
def _generate_from_dataclasses(self, dataclasses: List[type]) -> str: def _generate_from_dataclasses(self, dataclasses: List[type]) -> str:

View File

@@ -60,12 +60,18 @@ class SchemaLoader:
def __init__(self, schema_path: Path): def __init__(self, schema_path: Path):
self.schema_path = Path(schema_path) self.schema_path = Path(schema_path)
self.models: List[ModelDefinition] = [] self.models: List[ModelDefinition] = []
self.api_models: List[ModelDefinition] = []
self.enums: List[EnumDefinition] = [] self.enums: List[EnumDefinition] = []
self.grpc_messages: List[ModelDefinition] = [] self.grpc_messages: List[ModelDefinition] = []
self.grpc_service: Optional[GrpcServiceDefinition] = None self.grpc_service: Optional[GrpcServiceDefinition] = None
def load(self) -> "SchemaLoader": def load(self, include: Optional[set] = None) -> "SchemaLoader":
"""Load schema definitions from the schema folder.""" """Load schema definitions from the schema folder.
Args:
include: Set of groups to load (dataclasses, enums, api, grpc).
None means load all groups.
"""
init_path = self.schema_path / "__init__.py" init_path = self.schema_path / "__init__.py"
if not init_path.exists(): if not init_path.exists():
@@ -74,22 +80,34 @@ class SchemaLoader:
# Import the schema module # Import the schema module
module = self._import_module(init_path) module = self._import_module(init_path)
load_all = include is None
# Extract DATACLASSES # Extract DATACLASSES
if load_all or "dataclasses" in include:
dataclasses = getattr(module, "DATACLASSES", []) dataclasses = getattr(module, "DATACLASSES", [])
for cls in dataclasses: for cls in dataclasses:
self.models.append(self._parse_dataclass(cls)) self.models.append(self._parse_dataclass(cls))
# Extract API_MODELS (TypeScript-only request/response types)
if load_all or "api" in include:
api_models = getattr(module, "API_MODELS", [])
for cls in api_models:
self.api_models.append(self._parse_dataclass(cls))
# Extract ENUMS # Extract ENUMS
if load_all or "enums" in include:
enums = getattr(module, "ENUMS", []) enums = getattr(module, "ENUMS", [])
for enum_cls in enums: for enum_cls in enums:
self.enums.append(self._parse_enum(enum_cls)) self.enums.append(self._parse_enum(enum_cls))
# Extract GRPC_MESSAGES (optional) # Extract GRPC_MESSAGES (optional)
if load_all or "grpc" in include:
grpc_messages = getattr(module, "GRPC_MESSAGES", []) grpc_messages = getattr(module, "GRPC_MESSAGES", [])
for cls in grpc_messages: for cls in grpc_messages:
self.grpc_messages.append(self._parse_dataclass(cls)) self.grpc_messages.append(self._parse_dataclass(cls))
# Extract GRPC_SERVICE (optional) # Extract GRPC_SERVICE (optional)
if load_all or "grpc" in include:
grpc_service = getattr(module, "GRPC_SERVICE", None) grpc_service = getattr(module, "GRPC_SERVICE", None)
if grpc_service: if grpc_service:
self.grpc_service = GrpcServiceDefinition( self.grpc_service = GrpcServiceDefinition(
@@ -163,7 +181,7 @@ class SchemaLoader:
return False return False
def load_schema(schema_path: str | Path) -> SchemaLoader: def load_schema(schema_path: str | Path, include: Optional[set] = None) -> SchemaLoader:
"""Load schema definitions from folder.""" """Load schema definitions from folder."""
loader = SchemaLoader(schema_path) loader = SchemaLoader(schema_path)
return loader.load() return loader.load(include=include)

View File

@@ -22,7 +22,7 @@ DJANGO_TYPES: dict[Any, str] = {
"list": "models.JSONField(default=list, blank=True)", "list": "models.JSONField(default=list, blank=True)",
"text": "models.TextField(blank=True, default='')", "text": "models.TextField(blank=True, default='')",
"bigint": "models.BigIntegerField({opts})", "bigint": "models.BigIntegerField({opts})",
"enum": "models.CharField(max_length=20, choices=Status.choices{opts})", "enum": "models.CharField(max_length=20, choices={enum_name}.choices{opts})",
} }
DJANGO_SPECIAL: dict[str, str] = { DJANGO_SPECIAL: dict[str, str] = {

View File

@@ -7,3 +7,4 @@ os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mpr.settings")
app = Celery("mpr") app = Celery("mpr")
app.config_from_object("django.conf:settings", namespace="CELERY") app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks() app.autodiscover_tasks()
app.autodiscover_tasks(["task"])

View File

@@ -25,7 +25,7 @@ class MediaAsset(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
filename = models.CharField(max_length=500) filename = models.CharField(max_length=500)
file_path = models.CharField(max_length=1000) file_path = models.CharField(max_length=1000)
status = models.CharField(max_length=20, choices=Status.choices, default=Status.PENDING) status = models.CharField(max_length=20, choices=AssetStatus.choices, default=AssetStatus.PENDING)
error_message = models.TextField(blank=True, default='') error_message = models.TextField(blank=True, default='')
file_size = models.BigIntegerField(null=True, blank=True) file_size = models.BigIntegerField(null=True, blank=True)
duration = models.FloatField(null=True, blank=True, default=None) duration = models.FloatField(null=True, blank=True, default=None)
@@ -89,7 +89,7 @@ class TranscodeJob(models.Model):
output_filename = models.CharField(max_length=500) output_filename = models.CharField(max_length=500)
output_path = models.CharField(max_length=1000, null=True, blank=True) output_path = models.CharField(max_length=1000, null=True, blank=True)
output_asset_id = models.UUIDField(null=True, blank=True) output_asset_id = models.UUIDField(null=True, blank=True)
status = models.CharField(max_length=20, choices=Status.choices, default=Status.PENDING) status = models.CharField(max_length=20, choices=JobStatus.choices, default=JobStatus.PENDING)
progress = models.FloatField(default=0.0) progress = models.FloatField(default=0.0)
current_frame = models.IntegerField(null=True, blank=True, default=None) current_frame = models.IntegerField(null=True, blank=True, default=None)
current_time = models.FloatField(null=True, blank=True, default=None) current_time = models.FloatField(null=True, blank=True, default=None)

View File

@@ -201,7 +201,7 @@ def update_job_progress(
""" """
Update job progress (called from worker tasks). Update job progress (called from worker tasks).
This updates the in-memory state that StreamProgress reads from. Updates both the in-memory gRPC state and the Django database.
""" """
if job_id in _active_jobs: if job_id in _active_jobs:
_active_jobs[job_id].update( _active_jobs[job_id].update(
@@ -215,6 +215,36 @@ def update_job_progress(
} }
) )
# Update Django database
try:
from django.utils import timezone
from mpr.media_assets.models import TranscodeJob
update_fields = ["progress", "current_frame", "current_time", "speed", "status"]
updates = {
"progress": progress,
"current_frame": current_frame,
"current_time": current_time,
"speed": str(speed),
"status": status,
}
if error:
updates["error_message"] = error
update_fields.append("error_message")
if status == "processing":
updates["started_at"] = timezone.now()
update_fields.append("started_at")
elif status in ("completed", "failed"):
updates["completed_at"] = timezone.now()
update_fields.append("completed_at")
TranscodeJob.objects.filter(id=job_id).update(**updates)
except Exception as e:
logger.warning(f"Failed to update job {job_id} in DB: {e}")
def serve(port: int = None, celery_app=None) -> grpc.Server: def serve(port: int = None, celery_app=None) -> grpc.Server:
""" """

View File

@@ -17,7 +17,7 @@ logger = logging.getLogger(__name__)
MEDIA_ROOT = os.environ.get("MEDIA_ROOT", "/app/media") MEDIA_ROOT = os.environ.get("MEDIA_ROOT", "/app/media")
@shared_task(bind=True, max_retries=3, default_retry_delay=60) @shared_task(bind=True, queue="transcode", max_retries=3, default_retry_delay=60)
def run_transcode_job( def run_transcode_job(
self, self,
job_id: str, job_id: str,

View File

@@ -5,7 +5,8 @@
} }
body { body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif; font-family:
-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif;
background: #1a1a1a; background: #1a1a1a;
color: #e0e0e0; color: #e0e0e0;
} }
@@ -46,16 +47,91 @@ body {
background: #202020; background: #202020;
border-right: 1px solid #333; border-right: 1px solid #333;
overflow-y: auto; overflow-y: auto;
display: flex;
flex-direction: column;
} }
.sidebar h2 { .sidebar-section {
border-bottom: 1px solid #333;
}
.sidebar-section:first-child {
flex: 1;
min-height: 0;
overflow-y: auto;
}
.sidebar-count {
font-size: 0.7rem;
background: #333;
color: #888;
padding: 0.125rem 0.375rem;
border-radius: 8px;
}
.sidebar-list {
max-height: 200px;
overflow-y: auto;
}
.sidebar-empty {
padding: 0.5rem 1rem;
font-size: 0.8rem;
color: #555;
}
.output-item {
display: block;
padding: 0.5rem 1rem;
font-size: 0.8rem;
color: #10b981;
text-decoration: none;
border-bottom: 1px solid #2a2a2a;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.output-item:hover {
background: #2a2a2a;
}
.sidebar-header {
padding: 1rem; padding: 1rem;
display: flex;
justify-content: space-between;
align-items: center;
gap: 0.5rem;
}
.sidebar-header h2 {
font-size: 0.875rem; font-size: 0.875rem;
text-transform: uppercase; text-transform: uppercase;
letter-spacing: 0.05em; letter-spacing: 0.05em;
color: #888; color: #888;
} }
.scan-button {
padding: 0.375rem 0.75rem;
font-size: 0.75rem;
background: #3b82f6;
color: white;
border: none;
border-radius: 4px;
cursor: pointer;
transition: background 0.2s;
}
.scan-button:hover:not(:disabled) {
background: #2563eb;
}
.scan-button:disabled {
background: #4b5563;
cursor: not-allowed;
opacity: 0.6;
}
.asset-list { .asset-list {
list-style: none; list-style: none;
} }
@@ -148,41 +224,221 @@ body {
} }
.timeline-container { .timeline-container {
height: 120px;
background: #252525; background: #252525;
border-top: 1px solid #333; border-top: 1px solid #333;
padding: 0.75rem 1rem;
} }
.timeline-placeholder { /* Timeline component */
.timeline {
user-select: none;
}
.timeline-times {
display: flex; display: flex;
align-items: center; justify-content: space-between;
justify-content: center; font-size: 0.75rem;
height: 100%; color: #aaa;
color: #666; margin-bottom: 0.5rem;
font-variant-numeric: tabular-nums;
} }
.info { .timeline-track {
padding: 1rem; position: relative;
height: 40px;
background: #333;
border-radius: 4px;
cursor: pointer;
overflow: hidden;
}
.timeline-dim {
position: absolute;
top: 0;
height: 100%;
background: rgba(0, 0, 0, 0.5);
pointer-events: none;
}
.timeline-selection {
position: absolute;
top: 0;
height: 100%;
background: rgba(59, 130, 246, 0.15);
pointer-events: none;
}
.timeline-playhead {
position: absolute;
top: 0;
width: 2px;
height: 100%;
background: #fff;
pointer-events: none;
transform: translateX(-1px);
z-index: 2;
}
.timeline-handle {
position: absolute;
top: 0;
width: 12px;
height: 100%;
cursor: ew-resize;
transform: translateX(-6px);
z-index: 3;
border-radius: 2px;
transition: background 0.1s;
}
.timeline-handle::after {
content: "";
position: absolute;
top: 0;
left: 5px;
width: 2px;
height: 100%;
background: #3b82f6;
}
.timeline-handle:hover,
.timeline-handle.dragging {
background: rgba(59, 130, 246, 0.3);
}
.timeline-handle.dragging {
cursor: grabbing;
}
.timeline-duration {
display: flex;
justify-content: space-between;
font-size: 0.625rem;
color: #666;
margin-top: 0.25rem;
}
/* Job panel */
.job-panel {
padding: 0.75rem 1rem;
background: #202020; background: #202020;
border-top: 1px solid #333; border-top: 1px solid #333;
} }
.info h3 { .job-controls {
margin-bottom: 0.5rem; display: flex;
font-size: 1rem; gap: 0.5rem;
align-items: center;
} }
.info dl { .preset-select {
display: grid; flex: 1;
grid-template-columns: auto 1fr; padding: 0.375rem 0.5rem;
gap: 0.25rem 1rem; font-size: 0.8rem;
font-size: 0.875rem; background: #333;
}
.info dt {
color: #888;
}
.info dd {
color: #e0e0e0; color: #e0e0e0;
border: 1px solid #444;
border-radius: 4px;
cursor: pointer;
}
.preset-select:focus {
outline: none;
border-color: #3b82f6;
}
.enqueue-button {
padding: 0.375rem 1rem;
font-size: 0.8rem;
background: #10b981;
color: #000;
border: none;
border-radius: 4px;
cursor: pointer;
font-weight: 500;
white-space: nowrap;
transition: background 0.2s;
}
.enqueue-button:hover:not(:disabled) {
background: #059669;
}
.enqueue-button:disabled {
background: #4b5563;
color: #888;
cursor: not-allowed;
}
/* Job items */
.job-item {
padding: 0.5rem 1rem;
border-bottom: 1px solid #2a2a2a;
font-size: 0.8rem;
}
.job-item-header {
display: flex;
justify-content: space-between;
align-items: center;
}
.job-filename {
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
color: #ccc;
}
.job-status {
font-size: 0.7rem;
padding: 0.125rem 0.375rem;
border-radius: 3px;
text-transform: uppercase;
font-weight: 500;
flex-shrink: 0;
margin-left: 0.5rem;
}
.job-status.pending {
background: #f59e0b;
color: #000;
}
.job-status.processing {
background: #3b82f6;
color: #fff;
}
.job-status.completed {
background: #10b981;
color: #000;
}
.job-status.failed {
background: #ef4444;
color: #fff;
}
.job-status.cancelled {
background: #6b7280;
color: #fff;
}
.job-progress-bar {
height: 4px;
background: #444;
border-radius: 2px;
margin-top: 0.375rem;
overflow: hidden;
}
.job-progress-fill {
height: 100%;
background: #3b82f6;
border-radius: 2px;
transition: width 0.3s;
} }

View File

@@ -1,14 +1,25 @@
import { useState, useEffect } from 'react' import { useState, useEffect, useRef, useCallback } from "react";
import { getAssets, getSystemStatus } from './api' import { getAssets, getJobs, getSystemStatus, scanMediaFolder } from "./api";
import type { MediaAsset, SystemStatus } from './types' import type { MediaAsset, TranscodeJob, SystemStatus } from "./types";
import './App.css' import Timeline from "./Timeline";
import JobPanel from "./JobPanel";
import "./App.css";
function App() { function App() {
const [assets, setAssets] = useState<MediaAsset[]>([]) const [assets, setAssets] = useState<MediaAsset[]>([]);
const [status, setStatus] = useState<SystemStatus | null>(null) const [jobs, setJobs] = useState<TranscodeJob[]>([]);
const [selectedAsset, setSelectedAsset] = useState<MediaAsset | null>(null) const [status, setStatus] = useState<SystemStatus | null>(null);
const [loading, setLoading] = useState(true) const [selectedAsset, setSelectedAsset] = useState<MediaAsset | null>(null);
const [error, setError] = useState<string | null>(null) const [loading, setLoading] = useState(true);
const [error, setError] = useState<string | null>(null);
const [scanning, setScanning] = useState(false);
// Video sync state
const videoRef = useRef<HTMLVideoElement>(null);
const [currentTime, setCurrentTime] = useState(0);
const [duration, setDuration] = useState(0);
const [trimStart, setTrimStart] = useState(0);
const [trimEnd, setTrimEnd] = useState(0);
useEffect(() => { useEffect(() => {
async function load() { async function load() {
@@ -16,25 +27,99 @@ function App() {
const [assetsData, statusData] = await Promise.all([ const [assetsData, statusData] = await Promise.all([
getAssets(), getAssets(),
getSystemStatus(), getSystemStatus(),
]) ]);
setAssets(assetsData) setAssets(
setStatus(statusData) assetsData.sort((a, b) => a.filename.localeCompare(b.filename)),
);
setStatus(statusData);
} catch (e) { } catch (e) {
setError(e instanceof Error ? e.message : 'Failed to load') setError(e instanceof Error ? e.message : "Failed to load");
} finally { } finally {
setLoading(false) setLoading(false);
} }
} }
load() load();
}, []) }, []);
if (loading) { // Poll jobs
return <div className="loading">Loading...</div> useEffect(() => {
let active = true;
const fetchJobs = () => {
getJobs()
.then((data) => {
if (active) setJobs(data);
})
.catch(console.error);
};
fetchJobs();
const interval = setInterval(fetchJobs, 3000);
return () => {
active = false;
clearInterval(interval);
};
}, []);
// Reset trim state when asset changes
useEffect(() => {
setTrimStart(0);
setTrimEnd(0);
setCurrentTime(0);
setDuration(0);
}, [selectedAsset?.id]);
const handleTimeUpdate = useCallback(() => {
if (videoRef.current) setCurrentTime(videoRef.current.currentTime);
}, []);
const handleLoadedMetadata = useCallback(() => {
if (videoRef.current) {
const dur = videoRef.current.duration;
setDuration(dur);
setTrimEnd(dur);
}
}, []);
const handleSeek = useCallback((time: number) => {
if (videoRef.current) {
videoRef.current.currentTime = time;
setCurrentTime(time);
}
}, []);
const handleTrimChange = useCallback((start: number, end: number) => {
setTrimStart(start);
setTrimEnd(end);
}, []);
async function handleScan() {
setScanning(true);
setError(null);
try {
const result = await scanMediaFolder();
alert(
`Scan complete!\nFound: ${result.found}\nRegistered: ${result.registered}\nSkipped: ${result.skipped}`,
);
const assetsData = await getAssets();
setAssets(
assetsData.sort((a, b) => a.filename.localeCompare(b.filename)),
);
} catch (e) {
setError(e instanceof Error ? e.message : "Scan failed");
} finally {
setScanning(false);
}
} }
if (error) { const refreshJobs = async () => {
return <div className="error">Error: {error}</div> const data = await getJobs();
} setJobs(data);
};
const assetJobs = jobs.filter((j) => j.source_asset_id === selectedAsset?.id);
const completedJobs = jobs.filter((j) => j.status === "completed");
if (loading) return <div className="loading">Loading...</div>;
if (error) return <div className="error">Error: {error}</div>;
return ( return (
<div className="app"> <div className="app">
@@ -49,21 +134,88 @@ function App() {
<div className="layout"> <div className="layout">
<aside className="sidebar"> <aside className="sidebar">
<div className="sidebar-section">
<div className="sidebar-header">
<h2>Assets</h2> <h2>Assets</h2>
<button
onClick={handleScan}
disabled={scanning}
className="scan-button"
>
{scanning ? "Scanning..." : "Scan Folder"}
</button>
</div>
<ul className="asset-list"> <ul className="asset-list">
{assets.map((asset) => ( {assets.map((asset) => (
<li <li
key={asset.id} key={asset.id}
className={selectedAsset?.id === asset.id ? 'selected' : ''} className={selectedAsset?.id === asset.id ? "selected" : ""}
onClick={() => setSelectedAsset(asset)} onClick={() => setSelectedAsset(asset)}
title={asset.filename}
> >
<span className="filename">{asset.filename}</span> <span className="filename">{asset.filename}</span>
<span className={`status-badge ${asset.status}`}>
{asset.status}
</span>
</li> </li>
))} ))}
</ul> </ul>
</div>
<div className="sidebar-section">
<div className="sidebar-header">
<h2>Jobs</h2>
<span className="sidebar-count">{jobs.length}</span>
</div>
<div className="sidebar-list">
{jobs.length === 0 ? (
<div className="sidebar-empty">No jobs</div>
) : (
jobs.map((job) => (
<div key={job.id} className="job-item">
<div className="job-item-header">
<span className="job-filename">
{job.output_filename}
</span>
<span className={`job-status ${job.status}`}>
{job.status}
</span>
</div>
{job.status === "processing" && (
<div className="job-progress-bar">
<div
className="job-progress-fill"
style={{ width: `${job.progress}%` }}
/>
</div>
)}
</div>
))
)}
</div>
</div>
<div className="sidebar-section">
<div className="sidebar-header">
<h2>Output</h2>
<span className="sidebar-count">{completedJobs.length}</span>
</div>
<div className="sidebar-list">
{completedJobs.length === 0 ? (
<div className="sidebar-empty">No output files</div>
) : (
completedJobs.map((job) => (
<a
key={job.id}
className="output-item"
href={`/media/out/${job.output_filename}`}
target="_blank"
rel="noreferrer"
title={job.output_filename}
>
<span className="filename">{job.output_filename}</span>
</a>
))
)}
</div>
</div>
</aside> </aside>
<main className="main"> <main className="main">
@@ -71,29 +223,29 @@ function App() {
<div className="editor"> <div className="editor">
<div className="video-container"> <div className="video-container">
<video <video
ref={videoRef}
controls controls
src={`/media/${selectedAsset.file_path}`} src={`/media/in/${selectedAsset.file_path}`}
onTimeUpdate={handleTimeUpdate}
onLoadedMetadata={handleLoadedMetadata}
/> />
</div> </div>
<div className="timeline-container"> <div className="timeline-container">
{/* Timeline component will go here */} <Timeline
<div className="timeline-placeholder"> duration={duration}
Timeline: {selectedAsset.duration?.toFixed(1)}s currentTime={currentTime}
</div> trimStart={trimStart}
</div> trimEnd={trimEnd}
<div className="info"> onTrimChange={handleTrimChange}
<h3>{selectedAsset.filename}</h3> onSeek={handleSeek}
<dl> />
<dt>Duration</dt>
<dd>{selectedAsset.duration?.toFixed(2)}s</dd>
<dt>Resolution</dt>
<dd>{selectedAsset.width}x{selectedAsset.height}</dd>
<dt>Video</dt>
<dd>{selectedAsset.video_codec}</dd>
<dt>Audio</dt>
<dd>{selectedAsset.audio_codec}</dd>
</dl>
</div> </div>
<JobPanel
asset={selectedAsset}
trimStart={trimStart}
trimEnd={trimEnd}
onJobCreated={refreshJobs}
/>
</div> </div>
) : ( ) : (
<div className="empty">Select an asset to begin</div> <div className="empty">Select an asset to begin</div>
@@ -101,7 +253,7 @@ function App() {
</main> </main>
</div> </div>
</div> </div>
) );
} }
export default App export default App;

View File

@@ -0,0 +1,79 @@
import { useState, useEffect } from "react";
import { getPresets, createJob } from "./api";
import type { MediaAsset, TranscodePreset } from "./types";
interface JobPanelProps {
asset: MediaAsset;
trimStart: number;
trimEnd: number;
onJobCreated: () => void;
}
export default function JobPanel({
asset,
trimStart,
trimEnd,
onJobCreated,
}: JobPanelProps) {
const [presets, setPresets] = useState<TranscodePreset[]>([]);
const [selectedPresetId, setSelectedPresetId] = useState<string>("");
const [submitting, setSubmitting] = useState(false);
useEffect(() => {
getPresets().then(setPresets).catch(console.error);
}, []);
const hasTrim =
trimStart > 0 || (asset.duration != null && trimEnd < asset.duration);
const hasPreset = selectedPresetId !== "";
const canSubmit = hasTrim || hasPreset;
const buttonLabel = hasPreset
? "Transcode"
: hasTrim
? "Trim (Copy)"
: "Select trim or preset";
async function handleSubmit() {
setSubmitting(true);
try {
await createJob({
source_asset_id: asset.id,
preset_id: selectedPresetId || null,
trim_start: hasTrim ? trimStart : null,
trim_end: hasTrim ? trimEnd : null,
});
onJobCreated();
} catch (e) {
alert(e instanceof Error ? e.message : "Failed to create job");
} finally {
setSubmitting(false);
}
}
return (
<div className="job-panel">
<div className="job-controls">
<select
value={selectedPresetId}
onChange={(e) => setSelectedPresetId(e.target.value)}
className="preset-select"
>
<option value="">No preset (trim only)</option>
{presets.map((p) => (
<option key={p.id} value={p.id}>
{p.name}
</option>
))}
</select>
<button
onClick={handleSubmit}
disabled={!canSubmit || submitting}
className="enqueue-button"
>
{submitting ? "Submitting..." : buttonLabel}
</button>
</div>
</div>
);
}

View File

@@ -0,0 +1,121 @@
import { useRef, useCallback, useState, useEffect } from "react";
interface TimelineProps {
duration: number;
currentTime: number;
trimStart: number;
trimEnd: number;
onTrimChange: (start: number, end: number) => void;
onSeek: (time: number) => void;
}
function formatTime(seconds: number): string {
const m = Math.floor(seconds / 60);
const s = Math.floor(seconds % 60);
const ms = Math.floor((seconds % 1) * 10);
return `${m}:${s.toString().padStart(2, "0")}.${ms}`;
}
export default function Timeline({
duration,
currentTime,
trimStart,
trimEnd,
onTrimChange,
onSeek,
}: TimelineProps) {
const trackRef = useRef<HTMLDivElement>(null);
const [dragging, setDragging] = useState<"in" | "out" | null>(null);
const timeToPercent = (t: number) => (duration > 0 ? (t / duration) * 100 : 0);
const positionToTime = useCallback(
(clientX: number) => {
const track = trackRef.current;
if (!track || duration <= 0) return 0;
const rect = track.getBoundingClientRect();
const ratio = Math.max(0, Math.min(1, (clientX - rect.left) / rect.width));
return ratio * duration;
},
[duration],
);
const handleTrackClick = (e: React.MouseEvent) => {
if (dragging) return;
onSeek(positionToTime(e.clientX));
};
const handleMouseDown = (handle: "in" | "out") => (e: React.MouseEvent) => {
e.stopPropagation();
setDragging(handle);
};
useEffect(() => {
if (!dragging) return;
const minGap = 0.1;
const handleMove = (e: MouseEvent) => {
const time = positionToTime(e.clientX);
if (dragging === "in") {
onTrimChange(Math.min(time, trimEnd - minGap), trimEnd);
} else {
onTrimChange(trimStart, Math.max(time, trimStart + minGap));
}
};
const handleUp = () => setDragging(null);
document.addEventListener("mousemove", handleMove);
document.addEventListener("mouseup", handleUp);
return () => {
document.removeEventListener("mousemove", handleMove);
document.removeEventListener("mouseup", handleUp);
};
}, [dragging, trimStart, trimEnd, positionToTime, onTrimChange]);
const inPct = timeToPercent(trimStart);
const outPct = timeToPercent(trimEnd);
const playheadPct = timeToPercent(currentTime);
const selectionDuration = trimEnd - trimStart;
return (
<div className="timeline">
<div className="timeline-times">
<span>In: {formatTime(trimStart)}</span>
<span>Selection: {formatTime(selectionDuration)}</span>
<span>Out: {formatTime(trimEnd)}</span>
</div>
<div className="timeline-track" ref={trackRef} onClick={handleTrackClick}>
{/* Dimmed regions */}
<div className="timeline-dim" style={{ left: 0, width: `${inPct}%` }} />
<div className="timeline-dim" style={{ left: `${outPct}%`, width: `${100 - outPct}%` }} />
{/* Selection highlight */}
<div
className="timeline-selection"
style={{ left: `${inPct}%`, width: `${outPct - inPct}%` }}
/>
{/* Playhead */}
<div className="timeline-playhead" style={{ left: `${playheadPct}%` }} />
{/* Handles */}
<div
className={`timeline-handle timeline-handle-in ${dragging === "in" ? "dragging" : ""}`}
style={{ left: `${inPct}%` }}
onMouseDown={handleMouseDown("in")}
/>
<div
className={`timeline-handle timeline-handle-out ${dragging === "out" ? "dragging" : ""}`}
style={{ left: `${outPct}%` }}
onMouseDown={handleMouseDown("out")}
/>
</div>
<div className="timeline-duration">
<span>0:00</span>
<span>{formatTime(duration)}</span>
</div>
</div>
);
}

View File

@@ -38,6 +38,17 @@ export async function getAsset(id: string): Promise<MediaAsset> {
return request(`/assets/${id}`); return request(`/assets/${id}`);
} }
export async function scanMediaFolder(): Promise<{
found: number;
registered: number;
skipped: number;
files: string[];
}> {
return request("/assets/scan", {
method: "POST",
});
}
// Presets // Presets
export async function getPresets(): Promise<TranscodePreset[]> { export async function getPresets(): Promise<TranscodePreset[]> {
return request("/presets/"); return request("/presets/");

View File

@@ -71,3 +71,25 @@ export interface TranscodeJob {
started_at: string | null; started_at: string | null;
completed_at: string | null; completed_at: string | null;
} }
// API request/response types
export interface CreateJobRequest {
source_asset_id: string;
preset_id: string | null;
trim_start: number | null;
trim_end: number | null;
output_filename: string | null;
}
export interface SystemStatus {
status: string;
version: string;
}
export interface WorkerStatus {
available: boolean;
active_jobs: number;
supported_codecs: string[];
gpu_available: boolean;
}