phase 4
This commit is contained in:
68
detect/emit.py
Normal file
68
detect/emit.py
Normal file
@@ -0,0 +1,68 @@
|
||||
"""
|
||||
Event emission helpers for detection pipeline stages.
|
||||
|
||||
Single place that knows how to build event payloads.
|
||||
Stages call these instead of constructing dicts or dataclasses directly.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from detect.events import push_detect_event
|
||||
from detect.models import PipelineStats
|
||||
|
||||
|
||||
def log(job_id: str | None, stage: str, level: str, msg: str) -> None:
|
||||
"""Emit a log event."""
|
||||
if not job_id:
|
||||
return
|
||||
payload = {
|
||||
"level": level,
|
||||
"stage": stage,
|
||||
"msg": msg,
|
||||
"ts": datetime.now(timezone.utc).isoformat(),
|
||||
}
|
||||
push_detect_event(job_id, "log", payload)
|
||||
|
||||
|
||||
def stats(job_id: str | None, **kwargs) -> None:
|
||||
"""Emit a stats_update event. Pass only the fields that changed."""
|
||||
if not job_id:
|
||||
return
|
||||
s = PipelineStats(**kwargs)
|
||||
push_detect_event(job_id, "stats_update", dataclasses.asdict(s))
|
||||
|
||||
|
||||
def detection(
|
||||
job_id: str | None,
|
||||
brand: str,
|
||||
confidence: float,
|
||||
source: str,
|
||||
timestamp: float,
|
||||
duration: float = 0.0,
|
||||
content_type: str = "",
|
||||
frame_ref: int | None = None,
|
||||
) -> None:
|
||||
"""Emit a brand detection event."""
|
||||
if not job_id:
|
||||
return
|
||||
payload = {
|
||||
"brand": brand,
|
||||
"confidence": confidence,
|
||||
"source": source,
|
||||
"timestamp": timestamp,
|
||||
"duration": duration,
|
||||
"content_type": content_type,
|
||||
"frame_ref": frame_ref,
|
||||
}
|
||||
push_detect_event(job_id, "detection", payload)
|
||||
|
||||
|
||||
def job_complete(job_id: str | None, report: dict) -> None:
|
||||
"""Emit a job_complete event with the final report."""
|
||||
if not job_id:
|
||||
return
|
||||
payload = {"job_id": job_id, "report": report}
|
||||
push_detect_event(job_id, "job_complete", payload)
|
||||
@@ -1,25 +1,41 @@
|
||||
"""
|
||||
Stage 1 — Frame Extraction
|
||||
|
||||
Extracts frames from a video at a configurable FPS using FFmpeg.
|
||||
Extracts frames from a video at a configurable FPS using the core ffmpeg module.
|
||||
Emits log + stats_update SSE events as it works.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import ffmpeg
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from core.ffmpeg.probe import probe_file
|
||||
from detect.events import push_detect_event
|
||||
from detect import emit
|
||||
from detect.models import Frame
|
||||
from detect.profiles.base import FrameExtractionConfig
|
||||
|
||||
|
||||
def _load_frames(tmpdir: Path, fps: float) -> list[Frame]:
|
||||
"""Load extracted JPEG files into Frame objects."""
|
||||
frame_files = sorted(tmpdir.glob("frame_*.jpg"))
|
||||
frames = []
|
||||
for i, fpath in enumerate(frame_files):
|
||||
img = Image.open(fpath)
|
||||
frame = Frame(
|
||||
sequence=i,
|
||||
chunk_id=0,
|
||||
timestamp=i / fps,
|
||||
image=np.array(img),
|
||||
)
|
||||
frames.append(frame)
|
||||
return frames
|
||||
|
||||
|
||||
def extract_frames(
|
||||
video_path: str,
|
||||
config: FrameExtractionConfig,
|
||||
@@ -28,75 +44,37 @@ def extract_frames(
|
||||
"""
|
||||
Extract frames from video at the configured FPS.
|
||||
|
||||
Uses FFmpeg to decode frames as raw images, then loads them
|
||||
as numpy arrays. Caps at config.max_frames.
|
||||
Uses ffmpeg-python to build the extraction pipeline,
|
||||
outputs JPEG files to a temp dir, then loads as numpy arrays.
|
||||
"""
|
||||
probe = probe_file(video_path)
|
||||
duration = probe.duration or 0.0
|
||||
|
||||
if job_id:
|
||||
push_detect_event(job_id, "log", {
|
||||
"level": "INFO",
|
||||
"stage": "FrameExtractor",
|
||||
"msg": f"Starting extraction: {Path(video_path).name} "
|
||||
f"({duration:.1f}s, {probe.width}x{probe.height}, fps={config.fps})",
|
||||
})
|
||||
|
||||
frames: list[Frame] = []
|
||||
emit.log(job_id, "FrameExtractor", "INFO",
|
||||
f"Starting extraction: {Path(video_path).name} "
|
||||
f"({duration:.1f}s, {probe.width}x{probe.height}, fps={config.fps})")
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
pattern = str(Path(tmpdir) / "frame_%06d.jpg")
|
||||
|
||||
cmd = [
|
||||
"ffmpeg", "-i", video_path,
|
||||
"-vf", f"fps={config.fps}",
|
||||
"-q:v", "2",
|
||||
"-frames:v", str(config.max_frames),
|
||||
pattern,
|
||||
"-y", "-loglevel", "warning",
|
||||
]
|
||||
stream = (
|
||||
ffmpeg
|
||||
.input(video_path)
|
||||
.filter("fps", fps=config.fps)
|
||||
.output(pattern, qscale=2, frames=config.max_frames)
|
||||
.overwrite_output()
|
||||
)
|
||||
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
try:
|
||||
stream.run(capture_stdout=True, capture_stderr=True, quiet=True)
|
||||
except ffmpeg.Error as e:
|
||||
stderr = e.stderr.decode() if e.stderr else "unknown error"
|
||||
emit.log(job_id, "FrameExtractor", "ERROR", f"FFmpeg failed: {stderr[:200]}")
|
||||
raise RuntimeError(f"FFmpeg failed: {stderr}") from e
|
||||
|
||||
if result.returncode != 0:
|
||||
if job_id:
|
||||
push_detect_event(job_id, "log", {
|
||||
"level": "ERROR",
|
||||
"stage": "FrameExtractor",
|
||||
"msg": f"FFmpeg failed: {result.stderr[:200]}",
|
||||
})
|
||||
raise RuntimeError(f"FFmpeg failed: {result.stderr}")
|
||||
frames = _load_frames(Path(tmpdir), config.fps)
|
||||
|
||||
frame_files = sorted(Path(tmpdir).glob("frame_*.jpg"))
|
||||
|
||||
for i, fpath in enumerate(frame_files):
|
||||
img = Image.open(fpath)
|
||||
arr = np.array(img)
|
||||
timestamp = i / config.fps
|
||||
|
||||
frames.append(Frame(
|
||||
sequence=i,
|
||||
chunk_id=0,
|
||||
timestamp=timestamp,
|
||||
image=arr,
|
||||
))
|
||||
|
||||
if job_id:
|
||||
push_detect_event(job_id, "log", {
|
||||
"level": "INFO",
|
||||
"stage": "FrameExtractor",
|
||||
"msg": f"Extracted {len(frames)} frames",
|
||||
})
|
||||
push_detect_event(job_id, "stats_update", {
|
||||
"frames_extracted": len(frames),
|
||||
"frames_after_scene_filter": 0,
|
||||
"regions_detected": 0,
|
||||
"regions_resolved_by_ocr": 0,
|
||||
"regions_escalated_to_local_vlm": 0,
|
||||
"regions_escalated_to_cloud_llm": 0,
|
||||
"cloud_llm_calls": 0,
|
||||
"processing_time_seconds": 0.0,
|
||||
"estimated_cloud_cost_usd": 0.0,
|
||||
})
|
||||
emit.log(job_id, "FrameExtractor", "INFO", f"Extracted {len(frames)} frames")
|
||||
emit.stats(job_id, frames_extracted=len(frames))
|
||||
|
||||
return frames
|
||||
|
||||
76
detect/stages/scene_filter.py
Normal file
76
detect/stages/scene_filter.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""
|
||||
Stage 2 — Scene Filter
|
||||
|
||||
Removes near-duplicate frames using perceptual hashing (pHash).
|
||||
Frames with a hamming distance below the threshold are considered
|
||||
duplicates and dropped. This dramatically reduces work for downstream
|
||||
CV stages without losing unique visual content.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import imagehash
|
||||
from PIL import Image
|
||||
|
||||
from detect import emit
|
||||
from detect.models import Frame
|
||||
from detect.profiles.base import SceneFilterConfig
|
||||
|
||||
|
||||
def _compute_hashes(frames: list[Frame]) -> list[imagehash.ImageHash]:
|
||||
"""Compute perceptual hashes for all frames."""
|
||||
hashes = []
|
||||
for f in frames:
|
||||
img = Image.fromarray(f.image)
|
||||
h = imagehash.phash(img)
|
||||
f.perceptual_hash = str(h)
|
||||
hashes.append(h)
|
||||
return hashes
|
||||
|
||||
|
||||
def _dedup(frames: list[Frame], hashes: list[imagehash.ImageHash], threshold: int) -> list[Frame]:
|
||||
"""Greedy dedup: keep a frame if it's sufficiently different from all kept frames."""
|
||||
kept = [frames[0]]
|
||||
kept_hashes = [hashes[0]]
|
||||
|
||||
for i in range(1, len(frames)):
|
||||
is_duplicate = any(hashes[i] - kh < threshold for kh in kept_hashes)
|
||||
if not is_duplicate:
|
||||
kept.append(frames[i])
|
||||
kept_hashes.append(hashes[i])
|
||||
|
||||
return kept
|
||||
|
||||
|
||||
def scene_filter(
|
||||
frames: list[Frame],
|
||||
config: SceneFilterConfig,
|
||||
job_id: str | None = None,
|
||||
) -> list[Frame]:
|
||||
"""
|
||||
Filter near-duplicate frames based on perceptual hash distance.
|
||||
|
||||
Keeps the first frame in each group of similar frames.
|
||||
Returns a new list — does not mutate the input.
|
||||
"""
|
||||
if not config.enabled:
|
||||
emit.log(job_id, "SceneFilter", "INFO", "Scene filter disabled, passing all frames through")
|
||||
return frames
|
||||
|
||||
if not frames:
|
||||
return []
|
||||
|
||||
emit.log(job_id, "SceneFilter", "INFO",
|
||||
f"Filtering {len(frames)} frames (hamming_threshold={config.hamming_threshold})")
|
||||
|
||||
hashes = _compute_hashes(frames)
|
||||
kept = _dedup(frames, hashes, config.hamming_threshold)
|
||||
|
||||
dropped = len(frames) - len(kept)
|
||||
pct = (dropped / len(frames) * 100) if frames else 0
|
||||
|
||||
emit.log(job_id, "SceneFilter", "INFO",
|
||||
f"Kept {len(kept)} frames, dropped {dropped} ({pct:.0f}% reduction)")
|
||||
emit.stats(job_id, frames_extracted=len(frames), frames_after_scene_filter=len(kept))
|
||||
|
||||
return kept
|
||||
@@ -10,12 +10,16 @@ Opens: http://mpr.local.ar/detection/?job=<JOB_ID>
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import redis
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format="%(levelname)-7s %(name)s — %(message)s")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
STAGES = ["FrameExtractor", "SceneFilter", "YOLODetector", "OCRStage", "BrandResolver", "VLMLocal", "Aggregator"]
|
||||
LEVELS = ["INFO", "INFO", "INFO", "INFO", "WARNING", "DEBUG", "ERROR"] # weighted toward INFO
|
||||
MESSAGES = {
|
||||
@@ -70,9 +74,9 @@ def main():
|
||||
r = redis.Redis(port=args.port, decode_responses=True)
|
||||
key = f"detect_events:{args.job}"
|
||||
|
||||
print(f"Pushing {args.count} log events to {key} (redis port {args.port})")
|
||||
print(f"Open: http://mpr.local.ar/detection/?job={args.job}")
|
||||
print()
|
||||
logger.info("Pushing %d log events to %s (redis port %d)", args.count, key, args.port)
|
||||
logger.info("Open: http://mpr.local.ar/detection/?job=%s", args.job)
|
||||
input("\nPress Enter to start...")
|
||||
|
||||
for i in range(args.count):
|
||||
stage = random.choice(STAGES)
|
||||
@@ -88,10 +92,10 @@ def main():
|
||||
}
|
||||
|
||||
r.rpush(key, json.dumps(event))
|
||||
print(f" {level:7s} {stage:16s} {msg[:60]}")
|
||||
logger.log(getattr(logging, level, logging.INFO), "[%s] %s", stage, msg)
|
||||
time.sleep(args.delay)
|
||||
|
||||
print(f"\nDone. {args.count} events pushed.")
|
||||
logger.info("Done. %d events pushed.", args.count)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -10,11 +10,15 @@ Opens: http://mpr.local.ar/detection/?job=<JOB_ID>
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
|
||||
import redis
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format="%(levelname)-7s %(name)s — %(message)s")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def ts():
|
||||
return datetime.now(timezone.utc).isoformat()
|
||||
@@ -25,7 +29,7 @@ def push(r, key, event):
|
||||
r.rpush(key, json.dumps(event))
|
||||
etype = event["event"]
|
||||
detail = event.get("msg", event.get("stage", ""))
|
||||
print(f" [{etype:14s}] {detail}")
|
||||
logger.info("[%s] %s", etype, detail)
|
||||
return event
|
||||
|
||||
|
||||
@@ -39,12 +43,11 @@ def main():
|
||||
r = redis.Redis(port=args.port, decode_responses=True)
|
||||
key = f"detect_events:{args.job}"
|
||||
|
||||
# Clear previous events for this job
|
||||
r.delete(key)
|
||||
|
||||
print(f"Simulating pipeline run → {key}")
|
||||
print(f"Open: http://mpr.local.ar/detection/?job={args.job}")
|
||||
print()
|
||||
logger.info("Simulating pipeline run → %s", key)
|
||||
logger.info("Open: http://mpr.local.ar/detection/?job=%s", args.job)
|
||||
input("\nPress Enter to start...")
|
||||
|
||||
delay = args.delay
|
||||
|
||||
@@ -171,7 +174,7 @@ def main():
|
||||
},
|
||||
}})
|
||||
|
||||
print(f"\nPipeline simulation complete.")
|
||||
logger.info("Pipeline simulation complete.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
60
tests/detect/manual/run_extract_filter.py
Normal file
60
tests/detect/manual/run_extract_filter.py
Normal file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Run FrameExtractor → SceneFilter on a real video and push events to Redis.
|
||||
|
||||
Usage:
|
||||
python tests/detect/manual/run_extract_filter.py [--job JOB_ID] [--port PORT]
|
||||
|
||||
Opens: http://mpr.local.ar/detection/?job=<JOB_ID>
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
# Parse args early so we can set REDIS_URL before imports
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--job", default="extract-filter-test")
|
||||
parser.add_argument("--port", type=int, default=6382)
|
||||
args = parser.parse_args()
|
||||
|
||||
os.environ["REDIS_URL"] = f"redis://localhost:{args.port}/0"
|
||||
logging.basicConfig(level=logging.INFO, format="%(levelname)-7s %(name)s — %(message)s")
|
||||
|
||||
sys.path.insert(0, ".")
|
||||
|
||||
from detect.profiles.soccer import SoccerBroadcastProfile
|
||||
from detect.stages.frame_extractor import extract_frames
|
||||
from detect.stages.scene_filter import scene_filter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
VIDEO = "media/out/chunks/95043d50-4df6-4ac8-bbd5-2ba873117c6e/chunk_0000.mp4"
|
||||
|
||||
|
||||
def main():
|
||||
profile = SoccerBroadcastProfile()
|
||||
|
||||
logger.info("Job: %s", args.job)
|
||||
logger.info("Open: http://mpr.local.ar/detection/?job=%s", args.job)
|
||||
input("\nPress Enter to start...")
|
||||
|
||||
# Stage 1: Extract frames
|
||||
extract_config = profile.frame_extraction_config()
|
||||
extract_config.max_frames = 30
|
||||
logger.info("Extracting frames (fps=%s, max=%d)...", extract_config.fps, extract_config.max_frames)
|
||||
frames = extract_frames(VIDEO, extract_config, job_id=args.job)
|
||||
logger.info(" → %d frames extracted", len(frames))
|
||||
|
||||
# Stage 2: Scene filter
|
||||
filter_config = profile.scene_filter_config()
|
||||
logger.info("Filtering scenes (hamming_threshold=%d)...", filter_config.hamming_threshold)
|
||||
kept = scene_filter(frames, filter_config, job_id=args.job)
|
||||
logger.info(" → %d frames kept (%d dropped)", len(kept), len(frames) - len(kept))
|
||||
|
||||
logger.info("Done.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
34
tests/detect/manual/test_frame_extractor_e2e.py
Normal file
34
tests/detect/manual/test_frame_extractor_e2e.py
Normal file
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
End-to-end test: run FrameExtractor and verify SSE events are emitted.
|
||||
|
||||
Usage:
|
||||
python tests/detect/manual/test_frame_extractor_e2e.py
|
||||
|
||||
Requires Redis running. Events appear at: http://mpr.local.ar/detection/?job=e2e-test
|
||||
"""
|
||||
|
||||
import logging
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, ".")
|
||||
|
||||
from detect.profiles.soccer import SoccerBroadcastProfile
|
||||
from detect.stages.frame_extractor import extract_frames
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
VIDEO = "media/out/chunks/95043d50-4df6-4ac8-bbd5-2ba873117c6e/chunk_0000.mp4"
|
||||
JOB_ID = "e2e-test"
|
||||
|
||||
profile = SoccerBroadcastProfile()
|
||||
config = profile.frame_extraction_config()
|
||||
config.max_frames = 20
|
||||
|
||||
logger.info("Extracting frames from %s (fps=%s, max=%d)", VIDEO, config.fps, config.max_frames)
|
||||
logger.info("Open: http://mpr.local.ar/detection/?job=%s", JOB_ID)
|
||||
input("\nPress Enter to start...")
|
||||
|
||||
frames = extract_frames(VIDEO, config, job_id=JOB_ID)
|
||||
logger.info("Done: %d frames extracted", len(frames))
|
||||
logger.info("Open http://mpr.local.ar/detection/?job=%s to see the events", JOB_ID)
|
||||
@@ -61,7 +61,7 @@ def test_extract_frames_with_events(monkeypatch):
|
||||
def mock_push(job_id, event_type, data):
|
||||
events.append((job_id, event_type, data))
|
||||
|
||||
monkeypatch.setattr("detect.stages.frame_extractor.push_detect_event", mock_push)
|
||||
monkeypatch.setattr("detect.emit.push_detect_event", mock_push)
|
||||
|
||||
video = _get_sample_video()
|
||||
config = FrameExtractionConfig(fps=1, max_frames=5)
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
"""
|
||||
End-to-end test: run FrameExtractor and verify SSE events are emitted.
|
||||
|
||||
Usage (manual):
|
||||
python tests/detect/test_frame_extractor_e2e.py
|
||||
|
||||
Requires Redis running on localhost:6381.
|
||||
Push events will appear at: http://mpr.local.ar/detection/?job=e2e-test
|
||||
"""
|
||||
|
||||
import sys
|
||||
sys.path.insert(0, ".")
|
||||
|
||||
from detect.profiles.soccer import SoccerBroadcastProfile
|
||||
from detect.stages.frame_extractor import extract_frames
|
||||
|
||||
VIDEO = "media/out/chunks/95043d50-4df6-4ac8-bbd5-2ba873117c6e/chunk_0000.mp4"
|
||||
JOB_ID = "e2e-test"
|
||||
|
||||
profile = SoccerBroadcastProfile()
|
||||
config = profile.frame_extraction_config()
|
||||
config.max_frames = 20 # keep it quick
|
||||
|
||||
print(f"Extracting frames from {VIDEO} (fps={config.fps}, max={config.max_frames})")
|
||||
frames = extract_frames(VIDEO, config, job_id=JOB_ID)
|
||||
print(f"Done: {len(frames)} frames extracted")
|
||||
print(f"Open http://mpr.local.ar/detection/?job={JOB_ID} to see the events")
|
||||
84
tests/detect/test_scene_filter.py
Normal file
84
tests/detect/test_scene_filter.py
Normal file
@@ -0,0 +1,84 @@
|
||||
"""Tests for SceneFilter stage."""
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
|
||||
from detect.models import Frame
|
||||
from detect.profiles.base import SceneFilterConfig
|
||||
from detect.stages.scene_filter import scene_filter
|
||||
|
||||
|
||||
def _make_frame(seq: int, color: tuple[int, int, int] = (128, 128, 128)) -> Frame:
|
||||
"""Create a solid-color test frame."""
|
||||
img = np.full((64, 64, 3), color, dtype=np.uint8)
|
||||
return Frame(sequence=seq, chunk_id=0, timestamp=seq * 0.5, image=img)
|
||||
|
||||
|
||||
def test_identical_frames_deduped():
|
||||
frames = [_make_frame(i) for i in range(10)]
|
||||
config = SceneFilterConfig(hamming_threshold=8)
|
||||
kept = scene_filter(frames, config)
|
||||
|
||||
# All identical → only first survives
|
||||
assert len(kept) == 1
|
||||
assert kept[0].sequence == 0
|
||||
|
||||
|
||||
def _make_noisy_frame(seq: int, seed: int) -> Frame:
|
||||
"""Create a frame with random noise — each is visually unique."""
|
||||
rng = np.random.RandomState(seed)
|
||||
img = rng.randint(0, 256, (64, 64, 3), dtype=np.uint8)
|
||||
return Frame(sequence=seq, chunk_id=0, timestamp=seq * 0.5, image=img)
|
||||
|
||||
|
||||
def test_different_frames_kept():
|
||||
frames = [_make_noisy_frame(i, seed=i * 1000) for i in range(5)]
|
||||
config = SceneFilterConfig(hamming_threshold=8)
|
||||
kept = scene_filter(frames, config)
|
||||
|
||||
# Random noise frames are visually distinct → most should survive
|
||||
assert len(kept) >= 3
|
||||
|
||||
|
||||
def test_disabled_passes_all():
|
||||
frames = [_make_frame(i) for i in range(5)]
|
||||
config = SceneFilterConfig(enabled=False)
|
||||
kept = scene_filter(frames, config)
|
||||
|
||||
assert len(kept) == 5
|
||||
|
||||
|
||||
def test_empty_input():
|
||||
config = SceneFilterConfig(hamming_threshold=8)
|
||||
kept = scene_filter([], config)
|
||||
assert kept == []
|
||||
|
||||
|
||||
def test_single_frame():
|
||||
frames = [_make_frame(0)]
|
||||
config = SceneFilterConfig(hamming_threshold=8)
|
||||
kept = scene_filter(frames, config)
|
||||
assert len(kept) == 1
|
||||
|
||||
|
||||
def test_hashes_populated():
|
||||
frames = [_make_frame(i, color=(i * 50, 100, 200)) for i in range(3)]
|
||||
config = SceneFilterConfig(hamming_threshold=8)
|
||||
scene_filter(frames, config)
|
||||
|
||||
for f in frames:
|
||||
assert f.perceptual_hash != ""
|
||||
|
||||
|
||||
def test_events_emitted(monkeypatch):
|
||||
events = []
|
||||
monkeypatch.setattr("detect.emit.push_detect_event",
|
||||
lambda job_id, etype, data: events.append((etype, data)))
|
||||
|
||||
frames = [_make_frame(i) for i in range(5)]
|
||||
config = SceneFilterConfig(hamming_threshold=8)
|
||||
scene_filter(frames, config, job_id="test-job")
|
||||
|
||||
event_types = [e[0] for e in events]
|
||||
assert "log" in event_types
|
||||
assert "stats_update" in event_types
|
||||
@@ -9,6 +9,7 @@ COPY framework/ ./framework/
|
||||
COPY detection-app/ ./detection-app/
|
||||
|
||||
WORKDIR /ui/detection-app
|
||||
ENV CI=true
|
||||
RUN pnpm install
|
||||
|
||||
EXPOSE 5175
|
||||
|
||||
@@ -3,6 +3,7 @@ import { ref } from 'vue'
|
||||
import { SSEDataSource, Panel, LayoutGrid } from 'mpr-ui-framework'
|
||||
import 'mpr-ui-framework/src/tokens.css'
|
||||
import LogPanel from './panels/LogPanel.vue'
|
||||
import FunnelPanel from './panels/FunnelPanel.vue'
|
||||
import type { StatsUpdate } from './types/sse-contract'
|
||||
|
||||
const jobId = ref(new URLSearchParams(window.location.search).get('job') || 'test-job')
|
||||
@@ -39,7 +40,7 @@ source.connect()
|
||||
<span class="job-id">job: {{ jobId }}</span>
|
||||
</header>
|
||||
|
||||
<LayoutGrid :columns="2" :rows="1" gap="var(--space-2)">
|
||||
<LayoutGrid :columns="2" :rows="2" gap="var(--space-2)">
|
||||
<Panel title="Stats" :status="status">
|
||||
<div class="stats" v-if="stats">
|
||||
<div class="stat" v-for="s in [
|
||||
@@ -57,6 +58,8 @@ source.connect()
|
||||
<div v-else class="empty">Waiting for stats...</div>
|
||||
</Panel>
|
||||
|
||||
<FunnelPanel :source="source" :status="status" />
|
||||
|
||||
<LogPanel :source="source" :status="status" />
|
||||
</LayoutGrid>
|
||||
</div>
|
||||
|
||||
56
ui/detection-app/src/panels/FunnelPanel.vue
Normal file
56
ui/detection-app/src/panels/FunnelPanel.vue
Normal file
@@ -0,0 +1,56 @@
|
||||
<script setup lang="ts">
|
||||
import { ref, computed } from 'vue'
|
||||
import { Panel } from 'mpr-ui-framework'
|
||||
import TimeSeriesRenderer from 'mpr-ui-framework/src/renderers/TimeSeriesRenderer.vue'
|
||||
import type { DataSource } from 'mpr-ui-framework'
|
||||
import type { StatsUpdate } from '../types/sse-contract'
|
||||
|
||||
const props = defineProps<{
|
||||
source: DataSource
|
||||
status?: 'idle' | 'live' | 'processing' | 'error'
|
||||
}>()
|
||||
|
||||
// Accumulate stats snapshots over time
|
||||
const snapshots = ref<{ ts: number; stats: StatsUpdate }[]>([])
|
||||
const startTime = Date.now() / 1000
|
||||
|
||||
props.source.on<StatsUpdate>('stats_update', (e) => {
|
||||
snapshots.value.push({ ts: Date.now() / 1000 - startTime, stats: e })
|
||||
})
|
||||
|
||||
const series = [
|
||||
{ label: 'Frames', color: '#4f9cf9' },
|
||||
{ label: 'After filter', color: '#3ecf8e' },
|
||||
{ label: 'Regions', color: '#f5a623' },
|
||||
{ label: 'OCR resolved', color: '#a78bfa' },
|
||||
]
|
||||
|
||||
const chartData = computed(() => {
|
||||
const timestamps = snapshots.value.map((s) => s.ts)
|
||||
const frames = snapshots.value.map((s) => s.stats.frames_extracted)
|
||||
const filtered = snapshots.value.map((s) => s.stats.frames_after_scene_filter)
|
||||
const regions = snapshots.value.map((s) => s.stats.regions_detected)
|
||||
const ocr = snapshots.value.map((s) => s.stats.regions_resolved_by_ocr)
|
||||
return [timestamps, frames, filtered, regions, ocr] as const
|
||||
})
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<Panel title="Processing Funnel" :status="status">
|
||||
<TimeSeriesRenderer
|
||||
v-if="snapshots.length > 0"
|
||||
:series="series"
|
||||
:data="chartData"
|
||||
:stacked="true"
|
||||
/>
|
||||
<div v-else class="empty">Waiting for stats...</div>
|
||||
</Panel>
|
||||
</template>
|
||||
|
||||
<style scoped>
|
||||
.empty {
|
||||
color: var(--text-dim);
|
||||
padding: var(--space-6);
|
||||
text-align: center;
|
||||
}
|
||||
</style>
|
||||
@@ -11,7 +11,8 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"vue": "^3.5",
|
||||
"pinia": "^2.2"
|
||||
"pinia": "^2.2",
|
||||
"uplot": "^1.6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"typescript": "^5.6",
|
||||
|
||||
8
ui/framework/pnpm-lock.yaml
generated
8
ui/framework/pnpm-lock.yaml
generated
@@ -11,6 +11,9 @@ importers:
|
||||
pinia:
|
||||
specifier: ^2.2
|
||||
version: 2.3.1(typescript@5.9.3)(vue@3.5.30(typescript@5.9.3))
|
||||
uplot:
|
||||
specifier: ^1.6
|
||||
version: 1.6.32
|
||||
vue:
|
||||
specifier: ^3.5
|
||||
version: 3.5.30(typescript@5.9.3)
|
||||
@@ -748,6 +751,9 @@ packages:
|
||||
engines: {node: '>=14.17'}
|
||||
hasBin: true
|
||||
|
||||
uplot@1.6.32:
|
||||
resolution: {integrity: sha512-KIMVnG68zvu5XXUbC4LQEPnhwOxBuLyW1AHtpm6IKTXImkbLgkMy+jabjLgSLMasNuGGzQm/ep3tOkyTxpiQIw==}
|
||||
|
||||
vite-node@2.1.9:
|
||||
resolution: {integrity: sha512-AM9aQ/IPrW/6ENLQg3AGY4K1N2TGZdR5e4gu/MmmR2xR3Ll1+dib+nook92g4TV3PXVyeyxdWwtaCAiUL0hMxA==}
|
||||
engines: {node: ^18.0.0 || >=20.0.0}
|
||||
@@ -1460,6 +1466,8 @@ snapshots:
|
||||
|
||||
typescript@5.9.3: {}
|
||||
|
||||
uplot@1.6.32: {}
|
||||
|
||||
vite-node@2.1.9:
|
||||
dependencies:
|
||||
cac: 6.7.14
|
||||
|
||||
@@ -10,3 +10,4 @@ export { default as LayoutGrid } from './components/LayoutGrid.vue'
|
||||
|
||||
// Renderers
|
||||
export { default as LogRenderer } from './renderers/LogRenderer.vue'
|
||||
export { default as TimeSeriesRenderer } from './renderers/TimeSeriesRenderer.vue'
|
||||
|
||||
101
ui/framework/src/renderers/TimeSeriesRenderer.vue
Normal file
101
ui/framework/src/renderers/TimeSeriesRenderer.vue
Normal file
@@ -0,0 +1,101 @@
|
||||
<script setup lang="ts">
|
||||
import { ref, onMounted, onUnmounted, watch, nextTick } from 'vue'
|
||||
import uPlot from 'uplot'
|
||||
import 'uplot/dist/uPlot.min.css'
|
||||
|
||||
export interface TimeSeriesSeries {
|
||||
label: string
|
||||
color: string
|
||||
}
|
||||
|
||||
const props = withDefaults(defineProps<{
|
||||
/** Array of series configs (label + color) */
|
||||
series: TimeSeriesSeries[]
|
||||
/** Data: [timestamps[], series1[], series2[], ...] */
|
||||
data: uPlot.AlignedData
|
||||
/** Chart title (optional) */
|
||||
title?: string
|
||||
/** Stacked area mode */
|
||||
stacked?: boolean
|
||||
}>(), {
|
||||
stacked: false,
|
||||
})
|
||||
|
||||
const container = ref<HTMLElement | null>(null)
|
||||
let chart: uPlot | null = null
|
||||
|
||||
function buildOpts(): uPlot.Options {
|
||||
const seriesOpts: uPlot.Series[] = [
|
||||
{ label: 'Time' },
|
||||
...props.series.map((s) => ({
|
||||
label: s.label,
|
||||
stroke: s.color,
|
||||
fill: props.stacked ? s.color + '40' : undefined,
|
||||
width: 2,
|
||||
})),
|
||||
]
|
||||
|
||||
return {
|
||||
width: container.value?.clientWidth ?? 400,
|
||||
height: container.value?.clientHeight ?? 200,
|
||||
series: seriesOpts,
|
||||
axes: [
|
||||
{ stroke: '#555568', grid: { stroke: '#2e2e3822' } },
|
||||
{ stroke: '#555568', grid: { stroke: '#2e2e3822' } },
|
||||
],
|
||||
cursor: { show: true },
|
||||
legend: { show: true },
|
||||
}
|
||||
}
|
||||
|
||||
function createChart() {
|
||||
if (!container.value) return
|
||||
if (chart) chart.destroy()
|
||||
chart = new uPlot(buildOpts(), props.data, container.value)
|
||||
}
|
||||
|
||||
function resize() {
|
||||
if (!chart || !container.value) return
|
||||
chart.setSize({
|
||||
width: container.value.clientWidth,
|
||||
height: container.value.clientHeight,
|
||||
})
|
||||
}
|
||||
|
||||
watch(() => props.data, (newData) => {
|
||||
if (chart) {
|
||||
chart.setData(newData)
|
||||
} else {
|
||||
nextTick(createChart)
|
||||
}
|
||||
}, { deep: true })
|
||||
|
||||
onMounted(() => {
|
||||
nextTick(createChart)
|
||||
const observer = new ResizeObserver(resize)
|
||||
if (container.value) observer.observe(container.value)
|
||||
onUnmounted(() => {
|
||||
observer.disconnect()
|
||||
chart?.destroy()
|
||||
chart = null
|
||||
})
|
||||
})
|
||||
</script>
|
||||
|
||||
<template>
|
||||
<div ref="container" class="timeseries-renderer" />
|
||||
</template>
|
||||
|
||||
<style scoped>
|
||||
.timeseries-renderer {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
min-height: 150px;
|
||||
}
|
||||
|
||||
.timeseries-renderer :deep(.u-legend) {
|
||||
font-family: var(--font-mono);
|
||||
font-size: var(--font-size-sm);
|
||||
color: var(--text-secondary);
|
||||
}
|
||||
</style>
|
||||
Reference in New Issue
Block a user