plug task enqueing properly

This commit is contained in:
2026-02-06 10:49:05 -03:00
parent 2cf6c89fbb
commit 013587d108
20 changed files with 413 additions and 356 deletions

6
.gitignore vendored
View File

@@ -17,8 +17,10 @@ env/
*.pot
*.pyc
db.sqlite3
media/*
!media/.gitkeep
media/in/*
!media/in/.gitkeep
media/out/*
!media/out/.gitkeep
# Node
node_modules/

View File

@@ -76,43 +76,38 @@ docker compose exec django python manage.py createsuperuser
## Code Generation
Models are defined in `schema/models/` and generate:
- Django ORM models
- Pydantic schemas
- TypeScript types
- Protobuf definitions
Models are defined as dataclasses in `schema/models/` and generated via `modelgen`:
- **Django ORM** models (`--include dataclasses,enums`)
- **Pydantic** schemas (`--include dataclasses,enums`)
- **TypeScript** types (`--include dataclasses,enums,api`)
- **Protobuf** definitions (`--include grpc`)
Each target only gets the model groups it needs via the `--include` flag.
```bash
# Regenerate all
python schema/generate.py --all
# Or specific targets
python schema/generate.py --django
python schema/generate.py --pydantic
python schema/generate.py --typescript
python schema/generate.py --proto
# Regenerate all targets
bash ctrl/generate.sh
```
## Media Storage
MPR stores media file paths **relative to the media root** for cloud portability.
MPR separates media into **input** (`MEDIA_IN`) and **output** (`MEDIA_OUT`) paths, each independently configurable. File paths are stored relative for cloud portability.
### Local Development
- Files: `/app/media/video.mp4`
- Stored path: `video.mp4`
- Served via: `http://mpr.local.ar/media/video.mp4` (nginx alias)
- Source files: `/app/media/in/video.mp4`
- Output files: `/app/media/out/video_h264.mp4`
- Served via: `http://mpr.local.ar/media/in/video.mp4` (nginx alias)
### AWS/Cloud Deployment
For S3 or cloud storage, set `MEDIA_BASE_URL`:
Input and output can be different buckets/locations:
```bash
MEDIA_BASE_URL=https://bucket.s3.amazonaws.com/
MEDIA_IN=s3://source-bucket/media/
MEDIA_OUT=s3://output-bucket/transcoded/
```
- Files: S3 bucket
- Stored path: `video.mp4` (same relative path)
- Served via: `https://bucket.s3.amazonaws.com/video.mp4`
**Scan Endpoint**: `POST /api/assets/scan` recursively scans `MEDIA_IN` and registers new files with relative paths.
**Scan Endpoint**: `POST /api/assets/scan` recursively scans the media folder and registers new files with relative paths.
See [docs/media-storage.md](docs/media-storage.md) for full details.
## Project Structure
@@ -126,7 +121,9 @@ mpr/
├── ctrl/ # Docker & deployment
│ ├── docker-compose.yml
│ └── nginx.conf
├── media/ # Media files (local storage)
├── media/
│ ├── in/ # Source media files
│ └── out/ # Transcoded output
├── rpc/ # gRPC server & client
│ └── protos/ # Protobuf definitions (generated)
├── mpr/ # Django project
@@ -151,7 +148,8 @@ See `ctrl/.env.template` for all configuration options.
| `GRPC_HOST` | grpc | gRPC server hostname |
| `GRPC_PORT` | 50051 | gRPC server port |
| `MPR_EXECUTOR` | local | Executor type (local/lambda) |
| `MEDIA_ROOT` | /app/media | Media files directory |
| `MEDIA_IN` | /app/media/in | Source media files directory |
| `MEDIA_OUT` | /app/media/out | Transcoded output directory |
| `MEDIA_BASE_URL` | /media/ | Base URL for serving media (use S3 URL for cloud) |
| `VITE_ALLOWED_HOSTS` | - | Comma-separated allowed hosts for Vite dev server |

View File

@@ -33,7 +33,7 @@ def create_asset(data: AssetCreate):
# Store path relative to media root
import os
media_root = Path(os.environ.get("MEDIA_ROOT", "/app/media"))
media_root = Path(os.environ.get("MEDIA_IN", "/app/media/in"))
try:
rel_path = str(path.relative_to(media_root))
except ValueError:
@@ -111,8 +111,8 @@ def scan_media_folder():
from mpr.media_assets.models import MediaAsset
# Get media root from environment
media_root = os.environ.get("MEDIA_ROOT", "/app/media")
# Get media input folder from environment
media_root = os.environ.get("MEDIA_IN", "/app/media/in")
media_path = Path(media_root)
if not media_path.exists():

View File

@@ -30,9 +30,6 @@ def create_job(data: JobCreate):
except MediaAsset.DoesNotExist:
raise HTTPException(status_code=404, detail="Source asset not found")
if source.status != "ready":
raise HTTPException(status_code=400, detail="Source asset is not ready")
# Get preset if specified
preset = None
preset_snapshot = {}
@@ -64,27 +61,48 @@ def create_job(data: JobCreate):
status_code=400, detail="Must specify preset_id or trim_start/trim_end"
)
# Generate output filename
output_filename = data.output_filename
if not output_filename:
# Generate output filename and path
import os
from pathlib import Path
output_filename = data.output_filename
if not output_filename:
stem = Path(source.filename).stem
ext = preset_snapshot.get("container", "mp4") if preset else "mp4"
output_filename = f"{stem}_output.{ext}"
media_out = os.environ.get("MEDIA_OUT", "/app/media/out")
output_path = str(Path(media_out) / output_filename)
media_in = os.environ.get("MEDIA_IN", "/app/media/in")
source_path = str(Path(media_in) / source.file_path)
# Create job
job = TranscodeJob.objects.create(
source_asset=source,
preset=preset,
source_asset_id=source.id,
preset_id=preset.id if preset else None,
preset_snapshot=preset_snapshot,
trim_start=data.trim_start,
trim_end=data.trim_end,
output_filename=output_filename,
output_path=output_path,
priority=data.priority or 0,
)
# TODO: Submit job via gRPC
# Dispatch to Celery
from task.tasks import run_transcode_job
result = run_transcode_job.delay(
job_id=str(job.id),
source_path=source_path,
output_path=output_path,
preset=preset_snapshot or None,
trim_start=data.trim_start,
trim_end=data.trim_end,
duration=source.duration,
)
job.celery_task_id = result.id
job.save(update_fields=["celery_task_id"])
return job

View File

@@ -17,26 +17,19 @@ class JobStatus(str, Enum):
class JobCreate(BaseSchema):
"""JobCreate schema."""
"""Client-facing job creation request."""
source_asset_id: UUID
preset_id: Optional[UUID] = None
preset_snapshot: Dict[str, Any]
trim_start: Optional[float] = None
trim_end: Optional[float] = None
output_filename: str = ""
output_path: Optional[str] = None
output_asset_id: Optional[UUID] = None
progress: float = 0.0
current_frame: Optional[int] = None
current_time: Optional[float] = None
speed: Optional[str] = None
celery_task_id: Optional[str] = None
output_filename: Optional[str] = None
priority: int = 0
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
class JobUpdate(BaseSchema):
"""JobUpdate schema."""
source_asset_id: Optional[UUID] = None
preset_id: Optional[UUID] = None
preset_snapshot: Optional[Dict[str, Any]] = None
@@ -56,8 +49,10 @@ class JobUpdate(BaseSchema):
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
class JobResponse(BaseSchema):
"""JobResponse schema."""
id: UUID
source_asset_id: UUID
preset_id: Optional[UUID] = None

View File

@@ -27,5 +27,9 @@ GRPC_HOST=grpc
GRPC_PORT=50051
GRPC_MAX_WORKERS=10
# Media
MEDIA_IN=/app/media/in
MEDIA_OUT=/app/media/out
# Vite
VITE_ALLOWED_HOSTS=your-domain.local

View File

@@ -5,6 +5,8 @@ x-common-env: &common-env
DEBUG: 1
GRPC_HOST: grpc
GRPC_PORT: 50051
MEDIA_IN: ${MEDIA_IN:-/app/media/in}
MEDIA_OUT: ${MEDIA_OUT:-/app/media/out}
x-healthcheck-defaults: &healthcheck-defaults
interval: 5s
@@ -119,7 +121,7 @@ services:
build:
context: ..
dockerfile: ctrl/Dockerfile
command: celery -A mpr worker -l info -Q default -c 2
command: celery -A mpr worker -l info -Q transcode -c 2
environment:
<<: *common-env
MPR_EXECUTOR: local

View File

@@ -8,29 +8,33 @@ cd "$(dirname "$0")/.."
echo "Generating models from schema/models..."
# Django ORM models
# Django ORM models: domain models + enums
python -m modelgen from-schema \
--schema schema/models \
--output mpr/media_assets/models.py \
--targets django
--targets django \
--include dataclasses,enums
# Pydantic schemas for FastAPI
# Pydantic schemas for FastAPI: domain models + enums
python -m modelgen from-schema \
--schema schema/models \
--output api/schemas/models.py \
--targets pydantic
--targets pydantic \
--include dataclasses,enums
# TypeScript types for Timeline UI
# TypeScript types for Timeline UI: domain models + enums + API types
python -m modelgen from-schema \
--schema schema/models \
--output ui/timeline/src/types.ts \
--targets typescript
--targets typescript \
--include dataclasses,enums,api
# Protobuf for gRPC
# Protobuf for gRPC: gRPC messages + service
python -m modelgen from-schema \
--schema schema/models \
--output rpc/protos/worker.proto \
--targets proto
--targets proto \
--include grpc
# Generate gRPC stubs from proto
echo "Generating gRPC stubs..."

View File

@@ -67,9 +67,15 @@ http {
proxy_set_header Host $host;
}
# Media files
location /media {
alias /app/media;
# Media files - input (source)
location /media/in {
alias /app/media/in;
autoindex on;
}
# Media files - output (transcoded)
location /media/out {
alias /app/media/out;
autoindex on;
}

View File

@@ -2,13 +2,23 @@
## Overview
MPR stores media file paths **relative to the media root** to ensure portability between local development and cloud deployments (AWS S3, etc.).
MPR separates media into **input** and **output** paths, each independently configurable. File paths are stored **relative to their respective root** to ensure portability between local development and cloud deployments (AWS S3, etc.).
## Storage Strategy
### Input / Output Separation
| Path | Env Var | Purpose |
|------|---------|---------|
| `MEDIA_IN` | `/app/media/in` | Source media files to process |
| `MEDIA_OUT` | `/app/media/out` | Transcoded/trimmed output files |
These can point to different locations or even different servers/buckets in production.
### File Path Storage
- **Database**: Stores only the relative path (e.g., `videos/sample.mp4`)
- **Media Root**: Configurable base directory via `MEDIA_ROOT` env var
- **Input Root**: Configurable via `MEDIA_IN` env var
- **Output Root**: Configurable via `MEDIA_OUT` env var
- **Serving**: Base URL configurable via `MEDIA_BASE_URL` env var
### Why Relative Paths?
@@ -20,20 +30,26 @@ MPR stores media file paths **relative to the media root** to ensure portability
### Configuration
```bash
MEDIA_ROOT=/app/media
MEDIA_IN=/app/media/in
MEDIA_OUT=/app/media/out
```
### File Structure
```
/app/media/
├── video1.mp4
├── video2.mp4
└── subfolder/
└── video3.mp4
├── in/ # Source files
├── video1.mp4
│ ├── video2.mp4
└── subfolder/
│ └── video3.mp4
└── out/ # Transcoded output
├── video1_h264.mp4
└── video2_trimmed.mp4
```
### Database Storage
```
# Source assets (scanned from media/in)
filename: video1.mp4
file_path: video1.mp4
@@ -42,25 +58,31 @@ file_path: subfolder/video3.mp4
```
### URL Serving
- Nginx serves via `location /media { alias /app/media; }`
- Frontend accesses: `http://mpr.local.ar/media/video1.mp4`
- Video player: `<video src="/media/video1.mp4" />`
- Nginx serves input via `location /media/in { alias /app/media/in; }`
- Nginx serves output via `location /media/out { alias /app/media/out; }`
- Frontend accesses: `http://mpr.local.ar/media/in/video1.mp4`
- Video player: `<video src="/media/in/video1.mp4" />`
## AWS/Cloud Deployment
### S3 Configuration
```bash
MEDIA_ROOT=s3://my-bucket/media/
MEDIA_BASE_URL=https://my-bucket.s3.amazonaws.com/media/
# Input and output can be different buckets/paths
MEDIA_IN=s3://source-bucket/media/
MEDIA_OUT=s3://output-bucket/transcoded/
MEDIA_BASE_URL=https://source-bucket.s3.amazonaws.com/media/
```
### S3 Structure
```
s3://my-bucket/media/
s3://source-bucket/media/
├── video1.mp4
├── video2.mp4
└── subfolder/
└── video3.mp4
s3://output-bucket/transcoded/
├── video1_h264.mp4
└── video2_trimmed.mp4
```
### Database Storage (Same!)
@@ -72,10 +94,6 @@ filename: video3.mp4
file_path: subfolder/video3.mp4
```
### URL Serving
- Frontend prepends `MEDIA_BASE_URL`: `https://my-bucket.s3.amazonaws.com/media/video1.mp4`
- Video player: `<video src="https://my-bucket.s3.amazonaws.com/media/video1.mp4" />`
## API Endpoints
### Scan Media Folder
@@ -84,126 +102,49 @@ POST /api/assets/scan
```
**Behavior:**
1. Recursively scans `MEDIA_ROOT` directory
1. Recursively scans `MEDIA_IN` directory
2. Finds all video/audio files (mp4, mkv, avi, mov, mp3, wav, etc.)
3. Stores paths **relative to MEDIA_ROOT**
3. Stores paths **relative to MEDIA_IN**
4. Skips already-registered files (by filename)
5. Returns summary: `{ found, registered, skipped, files }`
**Example:**
```bash
curl -X POST http://mpr.local.ar/api/assets/scan
```
```json
{
"found": 15,
"registered": 12,
"skipped": 3,
"files": ["video1.mp4", "video2.mp4", ...]
}
```
### Create Asset
### Create Job
```http
POST /api/assets/
POST /api/jobs/
Content-Type: application/json
{
"file_path": "/app/media/video.mp4",
"filename": "video.mp4"
"source_asset_id": "uuid",
"preset_id": "uuid",
"trim_start": 10.0,
"trim_end": 30.0
}
```
**Behavior:**
- Validates file exists
- Converts absolute path to relative (relative to `MEDIA_ROOT`)
- Stores relative path in database
- Server sets `output_path` using `MEDIA_OUT` + generated filename
- Output goes to the output directory, not alongside source files
## Migration Guide
### Moving from Local to S3
1. **Upload files to S3:**
1. **Upload source files to S3:**
```bash
aws s3 sync /app/media/ s3://my-bucket/media/
aws s3 sync /app/media/in/ s3://source-bucket/media/
aws s3 sync /app/media/out/ s3://output-bucket/transcoded/
```
2. **Update environment variables:**
```bash
MEDIA_ROOT=s3://my-bucket/media/
MEDIA_BASE_URL=https://my-bucket.s3.amazonaws.com/media/
MEDIA_IN=s3://source-bucket/media/
MEDIA_OUT=s3://output-bucket/transcoded/
MEDIA_BASE_URL=https://source-bucket.s3.amazonaws.com/media/
```
3. **Database paths remain unchanged** (already relative)
4. **Update frontend** to use `MEDIA_BASE_URL` from config
### Moving from S3 to Local
1. **Download files from S3:**
```bash
aws s3 sync s3://my-bucket/media/ /app/media/
```
2. **Update environment variables:**
```bash
MEDIA_ROOT=/app/media
# Remove MEDIA_BASE_URL or set to /media/
```
3. **Database paths remain unchanged** (already relative)
## Implementation Details
### Backend (FastAPI)
**File path normalization** (`api/routes/assets.py`):
```python
import os
from pathlib import Path
media_root = Path(os.environ.get("MEDIA_ROOT", "/app/media"))
# Convert absolute to relative
file_path = Path("/app/media/subfolder/video.mp4")
rel_path = str(file_path.relative_to(media_root))
# Result: "subfolder/video.mp4"
```
### Frontend (React)
**Current implementation:**
```typescript
<video src={`/media/${asset.file_path}`} />
```
**Future cloud implementation:**
```typescript
const MEDIA_BASE_URL = import.meta.env.VITE_MEDIA_BASE_URL || '/media/';
<video src={`${MEDIA_BASE_URL}${asset.file_path}`} />
```
## Supported File Types
**Video formats:**
- `.mp4`, `.mkv`, `.avi`, `.mov`, `.webm`, `.flv`, `.wmv`, `.m4v`
**Audio formats:**
- `.mp3`, `.wav`, `.flac`, `.aac`, `.ogg`, `.m4a`
## Best Practices
1. **Always use relative paths** when storing file references
2. **Use environment variables** for media root and base URL
3. **Validate file existence** before creating assets
4. **Scan periodically** to discover new files
5. **Use presigned URLs** for S3 private buckets (TODO: implement)
## Future Enhancements
- [ ] S3 presigned URL generation for private buckets
- [ ] CloudFront CDN integration
- [ ] Multi-region S3 replication
- [ ] Automatic metadata extraction on upload
- [ ] Thumbnail generation and storage
**Video:** `.mp4`, `.mkv`, `.avi`, `.mov`, `.webm`, `.flv`, `.wmv`, `.m4v`
**Audio:** `.mp3`, `.wav`, `.flac`, `.aac`, `.ogg`, `.m4a`

0
media/out/.gitkeep Normal file
View File

View File

@@ -72,10 +72,24 @@ def cmd_from_schema(args):
print("that exports DATACLASSES and ENUMS lists.", file=sys.stderr)
sys.exit(1)
print(f"Loading schema: {schema_path}")
schema = load_schema(schema_path)
# Parse include groups
include = None
if args.include:
include = {g.strip() for g in args.include.split(",")}
print(f"Found {len(schema.models)} models, {len(schema.enums)} enums")
print(f"Loading schema: {schema_path}")
schema = load_schema(schema_path, include=include)
loaded = []
if schema.models:
loaded.append(f"{len(schema.models)} models")
if schema.enums:
loaded.append(f"{len(schema.enums)} enums")
if schema.api_models:
loaded.append(f"{len(schema.api_models)} api models")
if schema.grpc_messages:
loaded.append(f"{len(schema.grpc_messages)} grpc messages")
print(f"Found {', '.join(loaded)}")
# Parse targets
targets = [t.strip() for t in args.targets.split(",")]
@@ -237,6 +251,12 @@ def main():
default="pydantic",
help=f"Comma-separated output targets ({formats_str})",
)
schema_parser.add_argument(
"--include",
type=str,
default=None,
help="Comma-separated model groups to include (dataclasses,enums,api,grpc). Default: all.",
)
schema_parser.set_defaults(func=cmd_from_schema)
# extract command

View File

@@ -65,8 +65,13 @@ class SchemaLoader:
self.grpc_messages: List[ModelDefinition] = []
self.grpc_service: Optional[GrpcServiceDefinition] = None
def load(self) -> "SchemaLoader":
"""Load schema definitions from the schema folder."""
def load(self, include: Optional[set] = None) -> "SchemaLoader":
"""Load schema definitions from the schema folder.
Args:
include: Set of groups to load (dataclasses, enums, api, grpc).
None means load all groups.
"""
init_path = self.schema_path / "__init__.py"
if not init_path.exists():
@@ -75,27 +80,34 @@ class SchemaLoader:
# Import the schema module
module = self._import_module(init_path)
load_all = include is None
# Extract DATACLASSES
if load_all or "dataclasses" in include:
dataclasses = getattr(module, "DATACLASSES", [])
for cls in dataclasses:
self.models.append(self._parse_dataclass(cls))
# Extract API_MODELS (TypeScript-only request/response types)
if load_all or "api" in include:
api_models = getattr(module, "API_MODELS", [])
for cls in api_models:
self.api_models.append(self._parse_dataclass(cls))
# Extract ENUMS
if load_all or "enums" in include:
enums = getattr(module, "ENUMS", [])
for enum_cls in enums:
self.enums.append(self._parse_enum(enum_cls))
# Extract GRPC_MESSAGES (optional)
if load_all or "grpc" in include:
grpc_messages = getattr(module, "GRPC_MESSAGES", [])
for cls in grpc_messages:
self.grpc_messages.append(self._parse_dataclass(cls))
# Extract GRPC_SERVICE (optional)
if load_all or "grpc" in include:
grpc_service = getattr(module, "GRPC_SERVICE", None)
if grpc_service:
self.grpc_service = GrpcServiceDefinition(
@@ -169,7 +181,7 @@ class SchemaLoader:
return False
def load_schema(schema_path: str | Path) -> SchemaLoader:
def load_schema(schema_path: str | Path, include: Optional[set] = None) -> SchemaLoader:
"""Load schema definitions from folder."""
loader = SchemaLoader(schema_path)
return loader.load()
return loader.load(include=include)

View File

@@ -7,3 +7,4 @@ os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mpr.settings")
app = Celery("mpr")
app.config_from_object("django.conf:settings", namespace="CELERY")
app.autodiscover_tasks()
app.autodiscover_tasks(["task"])

View File

@@ -201,7 +201,7 @@ def update_job_progress(
"""
Update job progress (called from worker tasks).
This updates the in-memory state that StreamProgress reads from.
Updates both the in-memory gRPC state and the Django database.
"""
if job_id in _active_jobs:
_active_jobs[job_id].update(
@@ -215,6 +215,36 @@ def update_job_progress(
}
)
# Update Django database
try:
from django.utils import timezone
from mpr.media_assets.models import TranscodeJob
update_fields = ["progress", "current_frame", "current_time", "speed", "status"]
updates = {
"progress": progress,
"current_frame": current_frame,
"current_time": current_time,
"speed": str(speed),
"status": status,
}
if error:
updates["error_message"] = error
update_fields.append("error_message")
if status == "processing":
updates["started_at"] = timezone.now()
update_fields.append("started_at")
elif status in ("completed", "failed"):
updates["completed_at"] = timezone.now()
update_fields.append("completed_at")
TranscodeJob.objects.filter(id=job_id).update(**updates)
except Exception as e:
logger.warning(f"Failed to update job {job_id} in DB: {e}")
def serve(port: int = None, celery_app=None) -> grpc.Server:
"""

View File

@@ -17,7 +17,7 @@ logger = logging.getLogger(__name__)
MEDIA_ROOT = os.environ.get("MEDIA_ROOT", "/app/media")
@shared_task(bind=True, max_retries=3, default_retry_delay=60)
@shared_task(bind=True, queue="transcode", max_retries=3, default_retry_delay=60)
def run_transcode_job(
self,
job_id: str,

View File

@@ -47,6 +47,53 @@ body {
background: #202020;
border-right: 1px solid #333;
overflow-y: auto;
display: flex;
flex-direction: column;
}
.sidebar-section {
border-bottom: 1px solid #333;
}
.sidebar-section:first-child {
flex: 1;
min-height: 0;
overflow-y: auto;
}
.sidebar-count {
font-size: 0.7rem;
background: #333;
color: #888;
padding: 0.125rem 0.375rem;
border-radius: 8px;
}
.sidebar-list {
max-height: 200px;
overflow-y: auto;
}
.sidebar-empty {
padding: 0.5rem 1rem;
font-size: 0.8rem;
color: #555;
}
.output-item {
display: block;
padding: 0.5rem 1rem;
font-size: 0.8rem;
color: #10b981;
text-decoration: none;
border-bottom: 1px solid #2a2a2a;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.output-item:hover {
background: #2a2a2a;
}
.sidebar-header {
@@ -325,27 +372,11 @@ body {
cursor: not-allowed;
}
/* Job list */
.job-list {
margin-top: 0.75rem;
border-top: 1px solid #333;
padding-top: 0.5rem;
}
.job-list h3 {
font-size: 0.75rem;
text-transform: uppercase;
letter-spacing: 0.05em;
color: #888;
margin-bottom: 0.5rem;
}
/* Job items */
.job-item {
padding: 0.5rem;
background: #2a2a2a;
border-radius: 4px;
margin-bottom: 0.375rem;
padding: 0.5rem 1rem;
border-bottom: 1px solid #2a2a2a;
font-size: 0.8rem;
}
@@ -411,25 +442,3 @@ body {
border-radius: 2px;
transition: width 0.3s;
}
.job-cancel {
margin-top: 0.375rem;
padding: 0.125rem 0.5rem;
font-size: 0.7rem;
background: transparent;
color: #888;
border: 1px solid #555;
border-radius: 3px;
cursor: pointer;
}
.job-cancel:hover {
color: #ef4444;
border-color: #ef4444;
}
.job-error {
margin-top: 0.25rem;
font-size: 0.7rem;
color: #ef4444;
}

View File

@@ -1,12 +1,13 @@
import { useState, useEffect, useRef, useCallback } from "react";
import { getAssets, getSystemStatus, scanMediaFolder } from "./api";
import type { MediaAsset, SystemStatus } from "./types";
import { getAssets, getJobs, getSystemStatus, scanMediaFolder } from "./api";
import type { MediaAsset, TranscodeJob, SystemStatus } from "./types";
import Timeline from "./Timeline";
import JobPanel from "./JobPanel";
import "./App.css";
function App() {
const [assets, setAssets] = useState<MediaAsset[]>([]);
const [jobs, setJobs] = useState<TranscodeJob[]>([]);
const [status, setStatus] = useState<SystemStatus | null>(null);
const [selectedAsset, setSelectedAsset] = useState<MediaAsset | null>(null);
const [loading, setLoading] = useState(true);
@@ -40,6 +41,24 @@ function App() {
load();
}, []);
// Poll jobs
useEffect(() => {
let active = true;
const fetchJobs = () => {
getJobs()
.then((data) => {
if (active) setJobs(data);
})
.catch(console.error);
};
fetchJobs();
const interval = setInterval(fetchJobs, 3000);
return () => {
active = false;
clearInterval(interval);
};
}, []);
// Reset trim state when asset changes
useEffect(() => {
setTrimStart(0);
@@ -48,11 +67,8 @@ function App() {
setDuration(0);
}, [selectedAsset?.id]);
// Video event handlers
const handleTimeUpdate = useCallback(() => {
if (videoRef.current) {
setCurrentTime(videoRef.current.currentTime);
}
if (videoRef.current) setCurrentTime(videoRef.current.currentTime);
}, []);
const handleLoadedMetadata = useCallback(() => {
@@ -83,7 +99,6 @@ function App() {
alert(
`Scan complete!\nFound: ${result.found}\nRegistered: ${result.registered}\nSkipped: ${result.skipped}`,
);
const assetsData = await getAssets();
setAssets(
assetsData.sort((a, b) => a.filename.localeCompare(b.filename)),
@@ -95,13 +110,16 @@ function App() {
}
}
if (loading) {
return <div className="loading">Loading...</div>;
}
const refreshJobs = async () => {
const data = await getJobs();
setJobs(data);
};
if (error) {
return <div className="error">Error: {error}</div>;
}
const assetJobs = jobs.filter((j) => j.source_asset_id === selectedAsset?.id);
const completedJobs = jobs.filter((j) => j.status === "completed");
if (loading) return <div className="loading">Loading...</div>;
if (error) return <div className="error">Error: {error}</div>;
return (
<div className="app">
@@ -116,6 +134,7 @@ function App() {
<div className="layout">
<aside className="sidebar">
<div className="sidebar-section">
<div className="sidebar-header">
<h2>Assets</h2>
<button
@@ -138,6 +157,65 @@ function App() {
</li>
))}
</ul>
</div>
<div className="sidebar-section">
<div className="sidebar-header">
<h2>Jobs</h2>
<span className="sidebar-count">{jobs.length}</span>
</div>
<div className="sidebar-list">
{jobs.length === 0 ? (
<div className="sidebar-empty">No jobs</div>
) : (
jobs.map((job) => (
<div key={job.id} className="job-item">
<div className="job-item-header">
<span className="job-filename">
{job.output_filename}
</span>
<span className={`job-status ${job.status}`}>
{job.status}
</span>
</div>
{job.status === "processing" && (
<div className="job-progress-bar">
<div
className="job-progress-fill"
style={{ width: `${job.progress}%` }}
/>
</div>
)}
</div>
))
)}
</div>
</div>
<div className="sidebar-section">
<div className="sidebar-header">
<h2>Output</h2>
<span className="sidebar-count">{completedJobs.length}</span>
</div>
<div className="sidebar-list">
{completedJobs.length === 0 ? (
<div className="sidebar-empty">No output files</div>
) : (
completedJobs.map((job) => (
<a
key={job.id}
className="output-item"
href={`/media/out/${job.output_filename}`}
target="_blank"
rel="noreferrer"
title={job.output_filename}
>
<span className="filename">{job.output_filename}</span>
</a>
))
)}
</div>
</div>
</aside>
<main className="main">
@@ -147,7 +225,7 @@ function App() {
<video
ref={videoRef}
controls
src={`/media/${selectedAsset.file_path}`}
src={`/media/in/${selectedAsset.file_path}`}
onTimeUpdate={handleTimeUpdate}
onLoadedMetadata={handleLoadedMetadata}
/>
@@ -166,6 +244,7 @@ function App() {
asset={selectedAsset}
trimStart={trimStart}
trimEnd={trimEnd}
onJobCreated={refreshJobs}
/>
</div>
) : (

View File

@@ -1,49 +1,30 @@
import { useState, useEffect } from "react";
import { getPresets, getJobs, createJob, cancelJob } from "./api";
import type { MediaAsset, TranscodePreset, TranscodeJob } from "./types";
import { getPresets, createJob } from "./api";
import type { MediaAsset, TranscodePreset } from "./types";
interface JobPanelProps {
asset: MediaAsset;
trimStart: number;
trimEnd: number;
onJobCreated: () => void;
}
export default function JobPanel({ asset, trimStart, trimEnd }: JobPanelProps) {
export default function JobPanel({
asset,
trimStart,
trimEnd,
onJobCreated,
}: JobPanelProps) {
const [presets, setPresets] = useState<TranscodePreset[]>([]);
const [jobs, setJobs] = useState<TranscodeJob[]>([]);
const [selectedPresetId, setSelectedPresetId] = useState<string>("");
const [submitting, setSubmitting] = useState(false);
// Load presets on mount
useEffect(() => {
getPresets().then(setPresets).catch(console.error);
}, []);
// Poll jobs for this asset
useEffect(() => {
let active = true;
const fetchJobs = () => {
getJobs()
.then((allJobs) => {
if (active) {
setJobs(
allJobs.filter((j) => j.source_asset_id === asset.id),
);
}
})
.catch(console.error);
};
fetchJobs();
const interval = setInterval(fetchJobs, 3000);
return () => {
active = false;
clearInterval(interval);
};
}, [asset.id]);
const hasTrim = trimStart > 0 || (asset.duration != null && trimEnd < asset.duration);
const hasTrim =
trimStart > 0 || (asset.duration != null && trimEnd < asset.duration);
const hasPreset = selectedPresetId !== "";
const canSubmit = hasTrim || hasPreset;
@@ -62,9 +43,7 @@ export default function JobPanel({ asset, trimStart, trimEnd }: JobPanelProps) {
trim_start: hasTrim ? trimStart : null,
trim_end: hasTrim ? trimEnd : null,
});
// Refresh jobs immediately
const allJobs = await getJobs();
setJobs(allJobs.filter((j) => j.source_asset_id === asset.id));
onJobCreated();
} catch (e) {
alert(e instanceof Error ? e.message : "Failed to create job");
} finally {
@@ -72,16 +51,6 @@ export default function JobPanel({ asset, trimStart, trimEnd }: JobPanelProps) {
}
}
async function handleCancel(jobId: string) {
try {
await cancelJob(jobId);
const allJobs = await getJobs();
setJobs(allJobs.filter((j) => j.source_asset_id === asset.id));
} catch (e) {
console.error("Cancel failed:", e);
}
}
return (
<div className="job-panel">
<div className="job-controls">
@@ -105,39 +74,6 @@ export default function JobPanel({ asset, trimStart, trimEnd }: JobPanelProps) {
{submitting ? "Submitting..." : buttonLabel}
</button>
</div>
{jobs.length > 0 && (
<div className="job-list">
<h3>Jobs</h3>
{jobs.map((job) => (
<div key={job.id} className="job-item">
<div className="job-item-header">
<span className="job-filename">{job.output_filename}</span>
<span className={`job-status ${job.status}`}>{job.status}</span>
</div>
{job.status === "processing" && (
<div className="job-progress-bar">
<div
className="job-progress-fill"
style={{ width: `${job.progress}%` }}
/>
</div>
)}
{(job.status === "pending" || job.status === "processing") && (
<button
className="job-cancel"
onClick={() => handleCancel(job.id)}
>
Cancel
</button>
)}
{job.status === "failed" && job.error_message && (
<div className="job-error">{job.error_message}</div>
)}
</div>
))}
</div>
)}
</div>
);
}