This commit is contained in:
2026-03-23 11:13:30 -03:00
parent 8186bb5fe6
commit 71fd0510de
34 changed files with 1373 additions and 104 deletions

View File

@@ -40,7 +40,7 @@ app = FastAPI(
# CORS # CORS
app.add_middleware( app.add_middleware(
CORSMiddleware, CORSMiddleware,
allow_origins=["http://mpr.local.ar", "http://localhost:5173"], allow_origins=["http://mpr.local.ar", "http://k8s.mpr.local.ar", "http://localhost:5173"],
allow_credentials=True, allow_credentials=True,
allow_methods=["*"], allow_methods=["*"],
allow_headers=["*"], allow_headers=["*"],
@@ -57,6 +57,11 @@ app.include_router(chunker_router)
app.include_router(detect_router) app.include_router(detect_router)
@app.get("/health")
def health():
return {"status": "ok"}
@app.get("/") @app.get("/")
def root(): def root():
"""API root.""" """API root."""

View File

@@ -1,11 +1,13 @@
FROM python:3.11-slim FROM python:3.11-slim
RUN pip install --no-cache-dir uv
WORKDIR /app WORKDIR /app
COPY requirements.txt . COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt RUN uv pip install --system --no-cache -r requirements.txt
# No COPY . . — code is volume-mounted in dev (..:/app) # Copy code into image (k8s uses this, docker-compose volume-mounts over it)
# This image only provides the Python runtime + dependencies COPY . .
CMD ["python", "admin/manage.py", "runserver", "0.0.0.0:8000"] CMD ["python", "admin/manage.py", "runserver", "0.0.0.0:8000"]

View File

@@ -1,5 +1,7 @@
FROM python:3.11-slim FROM python:3.11-slim
RUN pip install --no-cache-dir uv
RUN apt-get update && apt-get install -y \ RUN apt-get update && apt-get install -y \
ffmpeg \ ffmpeg \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
@@ -7,9 +9,9 @@ RUN apt-get update && apt-get install -y \
WORKDIR /app WORKDIR /app
COPY requirements.txt requirements-worker.txt ./ COPY requirements.txt requirements-worker.txt ./
RUN pip install --no-cache-dir -r requirements-worker.txt RUN uv pip install --system --no-cache -r requirements-worker.txt
# No COPY . . — code is volume-mounted in dev (..:/app) # Copy code into image (k8s uses this, docker-compose volume-mounts over it)
# This image only provides Python runtime + FFmpeg + dependencies COPY . .
CMD ["celery", "-A", "admin.mpr", "worker", "--loglevel=info"] CMD ["celery", "-A", "admin.mpr", "worker", "--loglevel=info"]

43
ctrl/Tiltfile Normal file
View File

@@ -0,0 +1,43 @@
# MPR — Tilt development environment
# Usage: cd ctrl && tilt up
# Cluster: kind (name: mpr)
allow_k8s_contexts('kind-mpr')
# Apply k8s manifests via kustomize (dev overlay)
k8s_yaml(kustomize('k8s/overlays/dev'))
# --- Images — reuse existing Dockerfiles ---
# FastAPI (Python backend)
docker_build(
'mpr-fastapi',
context='..',
dockerfile='Dockerfile',
live_update=[
sync('..', '/app'),
],
)
# Detection UI (Vue 3)
docker_build(
'mpr-detection',
context='../ui/detection-app',
dockerfile='../ui/detection-app/Dockerfile',
live_update=[
sync('../ui/detection-app/src', '/app/src'),
sync('../ui/detection-app/index.html', '/app/index.html'),
sync('../ui/detection-app/vite.config.ts', '/app/vite.config.ts'),
],
)
# Framework changes trigger a full rebuild (live_update can't reach outside context)
watch_file('../ui/framework/src')
# --- Resources ---
k8s_resource('redis')
k8s_resource('fastapi', resource_deps=['redis'])
k8s_resource('detection-ui')
k8s_resource('gateway', resource_deps=['fastapi', 'detection-ui'],
port_forwards=['8080:8080'])

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mpr-config
namespace: mpr
data:
REDIS_URL: redis://redis:6379/0
DEBUG: "1"
FASTAPI_PORT: "8702"
DETECTION_UI_PORT: "5175"
GATEWAY_PORT: "8080"

View File

@@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: detection-ui
namespace: mpr
spec:
replicas: 1
selector:
matchLabels:
app: detection-ui
template:
metadata:
labels:
app: detection-ui
spec:
containers:
- name: detection-ui
image: mpr-detection
ports:
- containerPort: 5175
envFrom:
- configMapRef:
name: mpr-config
env:
- name: VITE_ALLOWED_HOSTS
value: "k8s.mpr.local.ar"
resources:
requests:
memory: 64Mi
cpu: 50m
limits:
memory: 256Mi
---
apiVersion: v1
kind: Service
metadata:
name: detection-ui
namespace: mpr
spec:
selector:
app: detection-ui
ports:
- port: 5175
targetPort: 5175

View File

@@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fastapi
namespace: mpr
spec:
replicas: 1
selector:
matchLabels:
app: fastapi
template:
metadata:
labels:
app: fastapi
spec:
containers:
- name: fastapi
image: mpr-fastapi
command: ["sh", "-c", "uvicorn core.api.main:app --host 0.0.0.0 --port $FASTAPI_PORT --reload"]
ports:
- containerPort: 8702
envFrom:
- configMapRef:
name: mpr-config
readinessProbe:
httpGet:
path: /health
port: 8702
initialDelaySeconds: 5
periodSeconds: 10
resources:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 512Mi
---
apiVersion: v1
kind: Service
metadata:
name: fastapi
namespace: mpr
spec:
selector:
app: fastapi
ports:
- port: 8702
targetPort: 8702

128
ctrl/k8s/base/gateway.yaml Normal file
View File

@@ -0,0 +1,128 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: envoy-gateway-config
namespace: mpr
data:
envoy.yaml: |
static_resources:
listeners:
- name: http
address:
socket_address:
address: 0.0.0.0
port_value: 8080
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress
codec_type: AUTO
route_config:
name: local_routes
virtual_hosts:
- name: mpr
domains: ["k8s.mpr.local.ar", "*"]
routes:
# SSE — long timeout, no buffering
- match:
prefix: "/api/detect/stream/"
route:
cluster: fastapi
timeout: 3600s
idle_timeout: 3600s
# FastAPI — strip /api/ prefix
- match:
prefix: "/api/"
route:
cluster: fastapi
prefix_rewrite: "/"
# Detection UI
- match:
prefix: "/detection/"
route:
cluster: detection-ui
# Default
- match:
prefix: "/"
route:
cluster: detection-ui
prefix_rewrite: "/detection/"
http_filters:
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
clusters:
- name: fastapi
connect_timeout: 5s
type: STRICT_DNS
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: fastapi
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: fastapi
port_value: 8702
- name: detection-ui
connect_timeout: 5s
type: STRICT_DNS
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: detection-ui
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: detection-ui
port_value: 5175
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gateway
namespace: mpr
spec:
replicas: 1
selector:
matchLabels:
app: gateway
template:
metadata:
labels:
app: gateway
spec:
containers:
- name: envoy
image: envoyproxy/envoy:v1.28-latest
ports:
- containerPort: 8080
volumeMounts:
- name: config
mountPath: /etc/envoy
resources:
requests:
memory: 64Mi
cpu: 50m
limits:
memory: 256Mi
volumes:
- name: config
configMap:
name: envoy-gateway-config
---
apiVersion: v1
kind: Service
metadata:
name: gateway
namespace: mpr
spec:
selector:
app: gateway
ports:
- port: 80
targetPort: 8080

View File

@@ -0,0 +1,12 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: mpr
resources:
- namespace.yaml
- configmap.yaml
- redis.yaml
- fastapi.yaml
- detection-ui.yaml
- gateway.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: mpr

43
ctrl/k8s/base/redis.yaml Normal file
View File

@@ -0,0 +1,43 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: mpr
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:7-alpine
ports:
- containerPort: 6379
readinessProbe:
exec:
command: ["redis-cli", "ping"]
initialDelaySeconds: 2
periodSeconds: 5
resources:
requests:
memory: 64Mi
cpu: 50m
limits:
memory: 256Mi
---
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: mpr
spec:
selector:
app: redis
ports:
- port: 6379
targetPort: 6379

View File

@@ -0,0 +1,38 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: detection-ui
namespace: mpr
spec:
replicas: 1
selector:
matchLabels:
app: detection-ui
template:
metadata:
labels:
app: detection-ui
spec:
containers:
- name: detection-ui
image: mpr-detection
ports:
- containerPort: 5175
resources:
requests:
memory: 64Mi
cpu: 50m
limits:
memory: 256Mi
---
apiVersion: v1
kind: Service
metadata:
name: detection-ui
namespace: mpr
spec:
selector:
app: detection-ui
ports:
- port: 5175
targetPort: 5175

View File

@@ -0,0 +1,52 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fastapi
namespace: mpr
spec:
replicas: 1
selector:
matchLabels:
app: fastapi
template:
metadata:
labels:
app: fastapi
spec:
containers:
- name: fastapi
image: mpr-fastapi
command: ["uvicorn", "core.api.main:app", "--host", "0.0.0.0", "--port", "8702", "--reload"]
ports:
- containerPort: 8702
env:
- name: REDIS_URL
value: redis://redis:6379/0
- name: DJANGO_ALLOW_ASYNC_UNSAFE
value: "true"
- name: DEBUG
value: "1"
readinessProbe:
httpGet:
path: /health
port: 8702
initialDelaySeconds: 5
periodSeconds: 10
resources:
requests:
memory: 128Mi
cpu: 100m
limits:
memory: 512Mi
---
apiVersion: v1
kind: Service
metadata:
name: fastapi
namespace: mpr
spec:
selector:
app: fastapi
ports:
- port: 8702
targetPort: 8702

View File

@@ -0,0 +1,128 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: envoy-gateway-config
namespace: mpr
data:
envoy.yaml: |
static_resources:
listeners:
- name: http
address:
socket_address:
address: 0.0.0.0
port_value: 8080
filter_chains:
- filters:
- name: envoy.filters.network.http_connection_manager
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager
stat_prefix: ingress
codec_type: AUTO
route_config:
name: local_routes
virtual_hosts:
- name: mpr
domains: ["k8s.mpr.local.ar", "*"]
routes:
# SSE — disable buffering
- match:
prefix: "/api/detect/stream/"
route:
cluster: fastapi
timeout: 3600s
idle_timeout: 3600s
# FastAPI — strip /api/ prefix
- match:
prefix: "/api/"
route:
cluster: fastapi
prefix_rewrite: "/"
# Detection UI
- match:
prefix: "/detection/"
route:
cluster: detection-ui
# Default — detection UI
- match:
prefix: "/"
route:
cluster: detection-ui
prefix_rewrite: "/detection/"
http_filters:
- name: envoy.filters.http.router
typed_config:
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
clusters:
- name: fastapi
connect_timeout: 5s
type: STRICT_DNS
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: fastapi
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: fastapi
port_value: 8702
- name: detection-ui
connect_timeout: 5s
type: STRICT_DNS
lb_policy: ROUND_ROBIN
load_assignment:
cluster_name: detection-ui
endpoints:
- lb_endpoints:
- endpoint:
address:
socket_address:
address: detection-ui
port_value: 5175
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gateway
namespace: mpr
spec:
replicas: 1
selector:
matchLabels:
app: gateway
template:
metadata:
labels:
app: gateway
spec:
containers:
- name: envoy
image: envoyproxy/envoy:v1.28-latest
ports:
- containerPort: 8080
volumeMounts:
- name: config
mountPath: /etc/envoy
resources:
requests:
memory: 64Mi
cpu: 50m
limits:
memory: 256Mi
volumes:
- name: config
configMap:
name: envoy-gateway-config
---
apiVersion: v1
kind: Service
metadata:
name: gateway
namespace: mpr
spec:
selector:
app: gateway
ports:
- port: 80
targetPort: 8080

View File

@@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: mpr
resources:
- namespace.yaml
- redis.yaml
- fastapi.yaml
- detection-ui.yaml
- gateway.yaml

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: mpr

View File

@@ -0,0 +1,43 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: mpr
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:7-alpine
ports:
- containerPort: 6379
readinessProbe:
exec:
command: ["redis-cli", "ping"]
initialDelaySeconds: 2
periodSeconds: 5
resources:
requests:
memory: 64Mi
cpu: 50m
limits:
memory: 256Mi
---
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: mpr
spec:
selector:
app: redis
ports:
- port: 6379
targetPort: 6379

View File

@@ -0,0 +1,20 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
# Gateway as cloud LoadBalancer
- target:
kind: Service
name: gateway
patch: |
- op: replace
path: /spec/type
value: LoadBalancer
- op: add
path: /metadata/annotations
value:
service.beta.kubernetes.io/aws-load-balancer-type: nlb
service.beta.kubernetes.io/aws-load-balancer-scheme: internal

View File

@@ -0,0 +1,30 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
# Gateway as NodePort for local access
- target:
kind: Service
name: gateway
patch: |
- op: replace
path: /spec/type
value: NodePort
- op: add
path: /spec/ports/0/nodePort
value: 30080
# Redis as NodePort for redis-cli access from host
- target:
kind: Service
name: redis
patch: |
- op: replace
path: /spec/type
value: NodePort
- op: add
path: /spec/ports/0/nodePort
value: 30379

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
# Gateway as LoadBalancer — MetalLB assigns a LAN IP
- target:
kind: Service
name: gateway
patch: |
- op: replace
path: /spec/type
value: LoadBalancer

15
ctrl/k8s/kind-config.yaml Normal file
View File

@@ -0,0 +1,15 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
name: mpr
nodes:
- role: control-plane
extraPortMappings:
# Gateway → http://k8s.mpr.local.ar (bind to 127.0.0.2 to avoid conflict with docker-compose on 127.0.0.1:80)
- containerPort: 30080
hostPort: 80
listenAddress: "127.0.0.2"
protocol: TCP
# Redis
- containerPort: 30379
hostPort: 6382
protocol: TCP

View File

@@ -0,0 +1,20 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
# Gateway as cloud LoadBalancer
- target:
kind: Service
name: gateway
patch: |
- op: replace
path: /spec/type
value: LoadBalancer
- op: add
path: /metadata/annotations
value:
service.beta.kubernetes.io/aws-load-balancer-type: nlb
service.beta.kubernetes.io/aws-load-balancer-scheme: internal

View File

@@ -0,0 +1,30 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
# Gateway as NodePort for local access
- target:
kind: Service
name: gateway
patch: |
- op: replace
path: /spec/type
value: NodePort
- op: add
path: /spec/ports/0/nodePort
value: 30080
# Redis as NodePort for redis-cli access from host
- target:
kind: Service
name: redis
patch: |
- op: replace
path: /spec/type
value: NodePort
- op: add
path: /spec/ports/0/nodePort
value: 30379

View File

@@ -0,0 +1,15 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
# Gateway as LoadBalancer — MetalLB assigns a LAN IP
- target:
kind: Service
name: gateway
patch: |
- op: replace
path: /spec/type
value: LoadBalancer

View File

@@ -0,0 +1,23 @@
from .base import (
ContentTypeProfile,
BrandDictionary,
CropContext,
DetectionConfig,
FrameExtractionConfig,
OCRConfig,
ResolverConfig,
SceneFilterConfig,
)
from .soccer import SoccerBroadcastProfile
__all__ = [
"ContentTypeProfile",
"BrandDictionary",
"CropContext",
"DetectionConfig",
"FrameExtractionConfig",
"OCRConfig",
"ResolverConfig",
"SceneFilterConfig",
"SoccerBroadcastProfile",
]

71
detect/profiles/base.py Normal file
View File

@@ -0,0 +1,71 @@
"""
ContentTypeProfile protocol and config dataclasses.
The pipeline graph is fixed — what varies per content type is configuration
and hooks. Each profile provides stage configs, a brand dictionary,
VLM prompt templates, and an aggregation strategy.
"""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Protocol
from detect.models import BrandDetection, DetectionReport
@dataclass
class FrameExtractionConfig:
fps: float = 2.0
max_frames: int = 500
@dataclass
class SceneFilterConfig:
hamming_threshold: int = 8
enabled: bool = True
@dataclass
class DetectionConfig:
model_name: str = "yolov8n.pt"
confidence_threshold: float = 0.3
target_classes: list[str] = field(default_factory=lambda: ["logo", "text"])
@dataclass
class OCRConfig:
languages: list[str] = field(default_factory=lambda: ["en"])
min_confidence: float = 0.5
@dataclass
class ResolverConfig:
fuzzy_threshold: int = 75
@dataclass
class BrandDictionary:
"""Maps canonical brand name → list of known aliases/spellings."""
brands: dict[str, list[str]] = field(default_factory=dict)
@dataclass
class CropContext:
image: bytes
surrounding_text: str = ""
position_hint: str = ""
class ContentTypeProfile(Protocol):
name: str
def frame_extraction_config(self) -> FrameExtractionConfig: ...
def scene_filter_config(self) -> SceneFilterConfig: ...
def detection_config(self) -> DetectionConfig: ...
def ocr_config(self) -> OCRConfig: ...
def brand_dictionary(self) -> BrandDictionary: ...
def resolver_config(self) -> ResolverConfig: ...
def vlm_prompt(self, crop_context: CropContext) -> str: ...
def aggregate(self, detections: list[BrandDetection]) -> DetectionReport: ...
def auxiliary_detections(self, source: str) -> list[BrandDetection]: ...

92
detect/profiles/soccer.py Normal file
View File

@@ -0,0 +1,92 @@
"""Soccer broadcast profile — pitch hoardings, kits, scoreboards."""
from __future__ import annotations
from detect.models import BrandDetection, BrandStats, DetectionReport, PipelineStats
from .base import (
BrandDictionary,
CropContext,
DetectionConfig,
FrameExtractionConfig,
OCRConfig,
ResolverConfig,
SceneFilterConfig,
)
class SoccerBroadcastProfile:
name = "soccer_broadcast"
def frame_extraction_config(self) -> FrameExtractionConfig:
return FrameExtractionConfig(fps=2.0, max_frames=500)
def scene_filter_config(self) -> SceneFilterConfig:
return SceneFilterConfig(hamming_threshold=8, enabled=True)
def detection_config(self) -> DetectionConfig:
return DetectionConfig(
model_name="yolov8n.pt",
confidence_threshold=0.3,
target_classes=["logo", "text", "banner", "scoreboard"],
)
def ocr_config(self) -> OCRConfig:
return OCRConfig(languages=["en", "es"], min_confidence=0.5)
def brand_dictionary(self) -> BrandDictionary:
return BrandDictionary(brands={
"Nike": ["nike", "NIKE", "swoosh"],
"Adidas": ["adidas", "ADIDAS", "adi"],
"Puma": ["puma", "PUMA"],
"Emirates": ["emirates", "fly emirates", "EMIRATES"],
"Coca-Cola": ["coca-cola", "coca cola", "coke", "COCA-COLA"],
"Pepsi": ["pepsi", "PEPSI"],
"Mastercard": ["mastercard", "MASTERCARD"],
"Heineken": ["heineken", "HEINEKEN"],
"Santander": ["santander", "SANTANDER"],
"Gazprom": ["gazprom", "GAZPROM"],
"Qatar Airways": ["qatar airways", "QATAR AIRWAYS"],
"Lay's": ["lays", "lay's", "LAYS", "LAY'S"],
})
def resolver_config(self) -> ResolverConfig:
return ResolverConfig(fuzzy_threshold=75)
def vlm_prompt(self, crop_context: CropContext) -> str:
hint = f" Position: {crop_context.position_hint}." if crop_context.position_hint else ""
text = f" Nearby text: '{crop_context.surrounding_text}'." if crop_context.surrounding_text else ""
return (
f"Identify the brand or sponsor visible in this cropped region "
f"from a soccer broadcast.{hint}{text} "
f"Respond with: brand, confidence (0-1), reasoning."
)
def aggregate(self, detections: list[BrandDetection]) -> DetectionReport:
brands: dict[str, BrandStats] = {}
for d in detections:
if d.brand not in brands:
brands[d.brand] = BrandStats()
s = brands[d.brand]
s.total_appearances += 1
s.total_screen_time += d.duration
s.avg_confidence = (
(s.avg_confidence * (s.total_appearances - 1) + d.confidence)
/ s.total_appearances
)
if s.first_seen == 0.0 or d.timestamp < s.first_seen:
s.first_seen = d.timestamp
if d.timestamp > s.last_seen:
s.last_seen = d.timestamp
return DetectionReport(
video_source="",
content_type=self.name,
duration_seconds=0.0,
brands=brands,
timeline=sorted(detections, key=lambda d: d.timestamp),
pipeline_stats=PipelineStats(),
)
def auxiliary_detections(self, source: str) -> list[BrandDetection]:
return []

108
detect/profiles/stubs.py Normal file
View File

@@ -0,0 +1,108 @@
"""Stub profiles — interfaces defined, not yet implemented."""
from __future__ import annotations
from detect.models import BrandDetection, DetectionReport
from .base import (
BrandDictionary,
CropContext,
DetectionConfig,
FrameExtractionConfig,
OCRConfig,
ResolverConfig,
SceneFilterConfig,
)
class NewsBroadcastProfile:
name = "news_broadcast"
def frame_extraction_config(self) -> FrameExtractionConfig:
raise NotImplementedError
def scene_filter_config(self) -> SceneFilterConfig:
raise NotImplementedError
def detection_config(self) -> DetectionConfig:
raise NotImplementedError
def ocr_config(self) -> OCRConfig:
raise NotImplementedError
def brand_dictionary(self) -> BrandDictionary:
raise NotImplementedError
def resolver_config(self) -> ResolverConfig:
raise NotImplementedError
def vlm_prompt(self, crop_context: CropContext) -> str:
raise NotImplementedError
def aggregate(self, detections: list[BrandDetection]) -> DetectionReport:
raise NotImplementedError
def auxiliary_detections(self, source: str) -> list[BrandDetection]:
raise NotImplementedError
class AdvertisingProfile:
name = "advertising"
def frame_extraction_config(self) -> FrameExtractionConfig:
raise NotImplementedError
def scene_filter_config(self) -> SceneFilterConfig:
raise NotImplementedError
def detection_config(self) -> DetectionConfig:
raise NotImplementedError
def ocr_config(self) -> OCRConfig:
raise NotImplementedError
def brand_dictionary(self) -> BrandDictionary:
raise NotImplementedError
def resolver_config(self) -> ResolverConfig:
raise NotImplementedError
def vlm_prompt(self, crop_context: CropContext) -> str:
raise NotImplementedError
def aggregate(self, detections: list[BrandDetection]) -> DetectionReport:
raise NotImplementedError
def auxiliary_detections(self, source: str) -> list[BrandDetection]:
raise NotImplementedError
class TranscriptProfile:
name = "transcript"
def frame_extraction_config(self) -> FrameExtractionConfig:
raise NotImplementedError
def scene_filter_config(self) -> SceneFilterConfig:
raise NotImplementedError
def detection_config(self) -> DetectionConfig:
raise NotImplementedError
def ocr_config(self) -> OCRConfig:
raise NotImplementedError
def brand_dictionary(self) -> BrandDictionary:
raise NotImplementedError
def resolver_config(self) -> ResolverConfig:
raise NotImplementedError
def vlm_prompt(self, crop_context: CropContext) -> str:
raise NotImplementedError
def aggregate(self, detections: list[BrandDetection]) -> DetectionReport:
raise NotImplementedError
def auxiliary_detections(self, source: str) -> list[BrandDetection]:
raise NotImplementedError

View File

@@ -0,0 +1,73 @@
"""Tests for ContentTypeProfile implementations."""
import pytest
from detect.models import BrandDetection
from detect.profiles.base import ContentTypeProfile, CropContext
from detect.profiles.soccer import SoccerBroadcastProfile
from detect.profiles.stubs import AdvertisingProfile, NewsBroadcastProfile, TranscriptProfile
def test_soccer_satisfies_protocol():
profile: ContentTypeProfile = SoccerBroadcastProfile()
assert profile.name == "soccer_broadcast"
def test_soccer_frame_extraction_config():
cfg = SoccerBroadcastProfile().frame_extraction_config()
assert cfg.fps > 0
assert cfg.max_frames > 0
def test_soccer_detection_config():
cfg = SoccerBroadcastProfile().detection_config()
assert 0 < cfg.confidence_threshold < 1
assert len(cfg.target_classes) > 0
def test_soccer_brand_dictionary_non_empty():
bd = SoccerBroadcastProfile().brand_dictionary()
assert len(bd.brands) > 0
for canonical, aliases in bd.brands.items():
assert len(aliases) > 0
def test_soccer_vlm_prompt():
ctx = CropContext(image=b"fake", surrounding_text="Emirates", position_hint="top-center")
prompt = SoccerBroadcastProfile().vlm_prompt(ctx)
assert "brand" in prompt.lower()
assert "Emirates" in prompt
def test_soccer_aggregate_empty():
report = SoccerBroadcastProfile().aggregate([])
assert len(report.brands) == 0
assert len(report.timeline) == 0
def test_soccer_aggregate_groups():
detections = [
BrandDetection(brand="Nike", timestamp=1.0, duration=0.5, confidence=0.9, source="ocr"),
BrandDetection(brand="Nike", timestamp=2.0, duration=0.5, confidence=0.8, source="ocr"),
BrandDetection(brand="Adidas", timestamp=3.0, duration=0.5, confidence=0.7, source="logo_match"),
]
report = SoccerBroadcastProfile().aggregate(detections)
assert "Nike" in report.brands
assert "Adidas" in report.brands
assert report.brands["Nike"].total_appearances == 2
assert report.brands["Adidas"].total_appearances == 1
assert report.timeline == sorted(report.timeline, key=lambda d: d.timestamp)
def test_soccer_auxiliary_returns_empty():
assert SoccerBroadcastProfile().auxiliary_detections("test.mp4") == []
@pytest.mark.parametrize("stub_cls", [NewsBroadcastProfile, AdvertisingProfile, TranscriptProfile])
def test_stubs_raise(stub_cls):
stub = stub_cls()
assert isinstance(stub.name, str)
with pytest.raises(NotImplementedError):
stub.frame_extraction_config()
with pytest.raises(NotImplementedError):
stub.brand_dictionary()

View File

@@ -1,12 +1,13 @@
<script setup lang="ts"> <script setup lang="ts">
import { ref } from 'vue' import { ref } from 'vue'
import { SSEDataSource } from 'mpr-ui-framework' import { SSEDataSource, Panel, LayoutGrid } from 'mpr-ui-framework'
import 'mpr-ui-framework/src/tokens.css'
import type { LogEvent, StatsUpdate } from './types/sse-contract' import type { LogEvent, StatsUpdate } from './types/sse-contract'
const jobId = ref(new URLSearchParams(window.location.search).get('job') || 'test-job') const jobId = ref(new URLSearchParams(window.location.search).get('job') || 'test-job')
const logs = ref<LogEvent[]>([]) const logs = ref<LogEvent[]>([])
const stats = ref<StatsUpdate | null>(null) const stats = ref<StatsUpdate | null>(null)
const status = ref('idle') const status = ref<'idle' | 'live' | 'processing' | 'error'>('idle')
const source = new SSEDataSource({ const source = new SSEDataSource({
id: 'detect-stream', id: 'detect-stream',
@@ -23,8 +24,13 @@ source.on<StatsUpdate>('stats_update', (e) => {
stats.value = e stats.value = e
}) })
// Expose status reactively const statusMap: Record<string, 'idle' | 'live' | 'processing' | 'error'> = {
const checkStatus = () => { status.value = source.status.value } idle: 'idle',
connecting: 'processing',
live: 'live',
error: 'error',
}
const checkStatus = () => { status.value = statusMap[source.status.value] ?? 'idle' }
setInterval(checkStatus, 500) setInterval(checkStatus, 500)
source.connect() source.connect()
@@ -34,145 +40,118 @@ source.connect()
<div class="app"> <div class="app">
<header> <header>
<h1>Detection Pipeline</h1> <h1>Detection Pipeline</h1>
<span class="status" :class="status">{{ status }}</span> <span class="status-badge" :class="status">{{ status }}</span>
<span class="job-id">job: {{ jobId }}</span> <span class="job-id">job: {{ jobId }}</span>
</header> </header>
<section class="stats" v-if="stats"> <LayoutGrid :columns="2" :rows="1" gap="var(--space-2)">
<div class="stat"> <Panel title="Stats" :status="status">
<span class="label">Frames</span> <div class="stats" v-if="stats">
<span class="value">{{ stats.frames_extracted }}</span> <div class="stat" v-for="s in [
</div> { label: 'Frames', value: stats.frames_extracted },
<div class="stat"> { label: 'After filter', value: stats.frames_after_scene_filter },
<span class="label">After filter</span> { label: 'Regions', value: stats.regions_detected },
<span class="value">{{ stats.frames_after_scene_filter }}</span> { label: 'OCR resolved', value: stats.regions_resolved_by_ocr },
</div> { label: 'Cloud calls', value: stats.cloud_llm_calls },
<div class="stat"> { label: 'Cost', value: `$${stats.estimated_cloud_cost_usd.toFixed(4)}` },
<span class="label">Regions</span> ]" :key="s.label">
<span class="value">{{ stats.regions_detected }}</span> <span class="label">{{ s.label }}</span>
</div> <span class="value">{{ s.value }}</span>
<div class="stat"> </div>
<span class="label">OCR resolved</span>
<span class="value">{{ stats.regions_resolved_by_ocr }}</span>
</div>
<div class="stat">
<span class="label">Cloud calls</span>
<span class="value">{{ stats.cloud_llm_calls }}</span>
</div>
<div class="stat">
<span class="label">Cost</span>
<span class="value">${{ stats.estimated_cloud_cost_usd.toFixed(4) }}</span>
</div>
</section>
<section class="logs">
<h2>Log</h2>
<div class="log-scroll">
<div v-for="(log, i) in logs" :key="i" class="log-line" :class="log.level.toLowerCase()">
<span class="ts">{{ log.ts }}</span>
<span class="level">{{ log.level }}</span>
<span class="stage">{{ log.stage }}</span>
<span class="msg">{{ log.msg }}</span>
</div> </div>
<div v-if="logs.length === 0" class="empty">Waiting for events...</div> <div v-else class="empty">Waiting for stats...</div>
</div> </Panel>
</section>
<Panel title="Log" :status="status">
<div class="log-scroll">
<div v-for="(log, i) in logs" :key="i" class="log-line" :class="log.level.toLowerCase()">
<span class="ts">{{ log.ts }}</span>
<span class="level">{{ log.level }}</span>
<span class="stage">{{ log.stage }}</span>
<span class="msg">{{ log.msg }}</span>
</div>
<div v-if="logs.length === 0" class="empty">Waiting for events...</div>
</div>
</Panel>
</LayoutGrid>
</div> </div>
</template> </template>
<style> <style>
:root {
--bg: #0d0d0f;
--surface: #16161a;
--border: #2e2e38;
--text: #e8e8f0;
--dim: #555568;
--green: #3ecf8e;
--blue: #4f9cf9;
--amber: #f5a623;
--red: #f06565;
}
* { margin: 0; padding: 0; box-sizing: border-box; } * { margin: 0; padding: 0; box-sizing: border-box; }
body { body {
background: var(--bg); background: var(--surface-0);
color: var(--text); color: var(--text-primary);
font-family: 'JetBrains Mono', 'Fira Code', monospace; font-family: var(--font-mono);
font-size: 13px; font-size: var(--font-size-base);
} }
.app { .app {
max-width: 1200px; height: 100vh;
margin: 0 auto; display: flex;
padding: 16px; flex-direction: column;
padding: var(--space-4);
gap: var(--space-2);
} }
header { header {
display: flex; display: flex;
align-items: center; align-items: center;
gap: 16px; gap: var(--space-4);
padding: 12px 0; padding: var(--space-3) 0;
border-bottom: 1px solid var(--border); border-bottom: var(--panel-border);
margin-bottom: 16px; flex-shrink: 0;
} }
header h1 { font-size: 15px; font-weight: 600; } header h1 { font-size: var(--font-size-lg); font-weight: 600; }
.status { .status-badge {
padding: 2px 8px; padding: 2px var(--space-2);
border-radius: 4px; border-radius: 4px;
font-size: 11px; font-size: var(--font-size-sm);
text-transform: uppercase; text-transform: uppercase;
} }
.status.idle { background: var(--dim); } .status-badge.idle { background: var(--status-idle); }
.status.connecting { background: var(--blue); color: #000; } .status-badge.processing { background: var(--status-processing); color: #000; }
.status.live { background: var(--green); color: #000; } .status-badge.live { background: var(--status-live); color: #000; }
.status.error { background: var(--red); color: #000; } .status-badge.error { background: var(--status-error); color: #000; }
.job-id { color: var(--dim); font-size: 11px; margin-left: auto; } .job-id { color: var(--text-dim); font-size: var(--font-size-sm); margin-left: auto; }
.stats { .stats {
display: grid; display: grid;
grid-template-columns: repeat(auto-fit, minmax(140px, 1fr)); grid-template-columns: repeat(3, 1fr);
gap: 8px; gap: var(--space-2);
margin-bottom: 16px;
} }
.stat { .stat {
background: var(--surface); background: var(--surface-2);
border: 1px solid var(--border); border-radius: var(--panel-radius);
border-radius: 6px; padding: var(--space-3);
padding: 12px;
} }
.stat .label { display: block; color: var(--dim); font-size: 11px; margin-bottom: 4px; } .stat .label { display: block; color: var(--text-dim); font-size: var(--font-size-sm); margin-bottom: var(--space-1); }
.stat .value { font-size: 20px; font-weight: 600; } .stat .value { font-size: 20px; font-weight: 600; }
.logs h2 { font-size: 13px; margin-bottom: 8px; color: var(--dim); }
.log-scroll { .log-scroll {
background: var(--surface);
border: 1px solid var(--border);
border-radius: 6px;
padding: 8px;
max-height: 500px; max-height: 500px;
overflow-y: auto; overflow-y: auto;
} }
.log-line { .log-line {
display: flex; display: flex;
gap: 8px; gap: var(--space-2);
padding: 2px 0; padding: 2px 0;
font-size: 12px; font-size: 12px;
line-height: 1.5; line-height: 1.5;
} }
.log-line .ts { color: var(--dim); min-width: 80px; } .log-line .ts { color: var(--text-dim); min-width: 80px; }
.log-line .level { min-width: 56px; font-weight: 600; } .log-line .level { min-width: 56px; font-weight: 600; }
.log-line .stage { color: var(--blue); min-width: 120px; } .log-line .stage { color: var(--status-processing); min-width: 120px; }
.log-line.info .level { color: var(--green); } .log-line.info .level { color: var(--status-live); }
.log-line.warning .level { color: var(--amber); } .log-line.warning .level { color: var(--status-escalating); }
.log-line.error .level { color: var(--red); } .log-line.error .level { color: var(--status-error); }
.log-line.debug .level { color: var(--dim); } .log-line.debug .level { color: var(--text-dim); }
.empty { color: var(--dim); padding: 20px; text-align: center; } .empty { color: var(--text-dim); padding: var(--space-6); text-align: center; }
</style> </style>

View File

@@ -0,0 +1,32 @@
<script setup lang="ts">
const props = withDefaults(defineProps<{
columns?: number
rows?: number
gap?: string
}>(), {
columns: 2,
rows: 2,
gap: 'var(--space-2)',
})
</script>
<template>
<div
class="layout-grid"
:style="{
gridTemplateColumns: `repeat(${props.columns}, 1fr)`,
gridTemplateRows: `repeat(${props.rows}, 1fr)`,
gap: props.gap,
}"
>
<slot />
</div>
</template>
<style scoped>
.layout-grid {
display: grid;
width: 100%;
height: 100%;
}
</style>

View File

@@ -0,0 +1,79 @@
<script setup lang="ts">
defineProps<{
title: string
status?: 'idle' | 'live' | 'processing' | 'error'
}>()
</script>
<template>
<div class="panel">
<div class="panel-header">
<span class="panel-title">{{ title }}</span>
<span class="panel-status" :class="status ?? 'idle'" />
</div>
<div class="panel-body">
<slot />
</div>
<div class="panel-overlay">
<slot name="overlay" />
</div>
</div>
</template>
<style scoped>
.panel {
position: relative;
background: var(--surface-1);
border: var(--panel-border);
border-radius: var(--panel-radius);
overflow: hidden;
display: flex;
flex-direction: column;
}
.panel-header {
display: flex;
align-items: center;
gap: var(--space-2);
height: var(--panel-header-height);
padding: 0 var(--space-3);
background: var(--surface-2);
border-bottom: var(--panel-border);
flex-shrink: 0;
}
.panel-title {
font-family: var(--font-ui);
font-size: var(--font-size-sm);
font-weight: 600;
color: var(--text-secondary);
text-transform: uppercase;
letter-spacing: 0.04em;
}
.panel-status {
width: 8px;
height: 8px;
border-radius: 50%;
margin-left: auto;
}
.panel-status.idle { background: var(--status-idle); }
.panel-status.live { background: var(--status-live); }
.panel-status.processing { background: var(--status-processing); }
.panel-status.error { background: var(--status-error); }
.panel-body {
flex: 1;
overflow: auto;
padding: var(--space-2);
}
.panel-overlay {
position: absolute;
inset: var(--panel-header-height) 0 0 0;
pointer-events: none;
}
.panel-overlay > :deep(*) {
pointer-events: auto;
}
</style>

View File

@@ -3,3 +3,7 @@ export { DataSource, type DataSourceStatus } from './datasources/DataSource'
export { SSEDataSource } from './datasources/SSEDataSource' export { SSEDataSource } from './datasources/SSEDataSource'
export { StaticDataSource } from './datasources/StaticDataSource' export { StaticDataSource } from './datasources/StaticDataSource'
export { useDataSource } from './composables/useDataSource' export { useDataSource } from './composables/useDataSource'
// Components
export { default as Panel } from './components/Panel.vue'
export { default as LayoutGrid } from './components/LayoutGrid.vue'

View File

@@ -0,0 +1,45 @@
/* Framework design tokens — retheme by replacing this file */
:root {
/* spacing scale (4px base) */
--space-1: 4px;
--space-2: 8px;
--space-3: 12px;
--space-4: 16px;
--space-6: 24px;
--space-8: 32px;
/* color — dark theme (observability UIs are always dark) */
--surface-0: #0d0d0f;
--surface-1: #16161a;
--surface-2: #1e1e24;
--surface-3: #26262f;
--border: #2e2e38;
--text-primary: #e8e8f0;
--text-secondary: #8888a0;
--text-dim: #555568;
/* status colors */
--status-idle: #555568;
--status-live: #3ecf8e;
--status-processing: #4f9cf9;
--status-escalating: #f5a623;
--status-error: #f06565;
/* confidence color scale (low → high) */
--conf-low: #f06565;
--conf-mid: #f5a623;
--conf-high: #3ecf8e;
/* typography */
--font-mono: 'JetBrains Mono', 'Fira Code', monospace;
--font-ui: 'Inter', system-ui, sans-serif;
--font-size-sm: 11px;
--font-size-base: 13px;
--font-size-lg: 15px;
/* panel chrome */
--panel-radius: 6px;
--panel-border: 1px solid var(--border);
--panel-header-height: 36px;
}