This commit is contained in:
2026-03-26 01:30:26 -03:00
parent 95246c5452
commit dfa3c12514
8 changed files with 740 additions and 44 deletions

View File

@@ -20,6 +20,7 @@ from detect.stages.scene_filter import scene_filter
from detect.stages.yolo_detector import detect_objects from detect.stages.yolo_detector import detect_objects
from detect.stages.ocr_stage import run_ocr from detect.stages.ocr_stage import run_ocr
from detect.stages.brand_resolver import resolve_brands from detect.stages.brand_resolver import resolve_brands
from detect.tracing import trace_node, flush as flush_traces
INFERENCE_URL = os.environ.get("INFERENCE_URL") # None = local mode INFERENCE_URL = os.environ.get("INFERENCE_URL") # None = local mode
@@ -66,9 +67,11 @@ def _emit_transition(state: DetectState, node: str, status: str):
def node_extract_frames(state: DetectState) -> dict: def node_extract_frames(state: DetectState) -> dict:
_emit_transition(state, "extract_frames", "running") _emit_transition(state, "extract_frames", "running")
with trace_node(state, "extract_frames") as span:
profile = _get_profile(state) profile = _get_profile(state)
config = profile.frame_extraction_config() config = profile.frame_extraction_config()
frames = extract_frames(state["video_path"], config, job_id=state.get("job_id")) frames = extract_frames(state["video_path"], config, job_id=state.get("job_id"))
span.set_output({"frames_extracted": len(frames)})
_emit_transition(state, "extract_frames", "done") _emit_transition(state, "extract_frames", "done")
return {"frames": frames, "stats": PipelineStats(frames_extracted=len(frames))} return {"frames": frames, "stats": PipelineStats(frames_extracted=len(frames))}
@@ -77,10 +80,12 @@ def node_extract_frames(state: DetectState) -> dict:
def node_filter_scenes(state: DetectState) -> dict: def node_filter_scenes(state: DetectState) -> dict:
_emit_transition(state, "filter_scenes", "running") _emit_transition(state, "filter_scenes", "running")
with trace_node(state, "filter_scenes") as span:
profile = _get_profile(state) profile = _get_profile(state)
config = profile.scene_filter_config() config = profile.scene_filter_config()
frames = state.get("frames", []) frames = state.get("frames", [])
kept = scene_filter(frames, config, job_id=state.get("job_id")) kept = scene_filter(frames, config, job_id=state.get("job_id"))
span.set_output({"frames_in": len(frames), "frames_kept": len(kept)})
stats = state.get("stats", PipelineStats()) stats = state.get("stats", PipelineStats())
stats.frames_after_scene_filter = len(kept) stats.frames_after_scene_filter = len(kept)
@@ -92,15 +97,18 @@ def node_filter_scenes(state: DetectState) -> dict:
def node_detect_objects(state: DetectState) -> dict: def node_detect_objects(state: DetectState) -> dict:
_emit_transition(state, "detect_objects", "running") _emit_transition(state, "detect_objects", "running")
with trace_node(state, "detect_objects") as span:
profile = _get_profile(state) profile = _get_profile(state)
config = profile.detection_config() config = profile.detection_config()
frames = state.get("filtered_frames", []) frames = state.get("filtered_frames", [])
job_id = state.get("job_id") job_id = state.get("job_id")
all_boxes = detect_objects(frames, config, inference_url=INFERENCE_URL, job_id=job_id) all_boxes = detect_objects(frames, config, inference_url=INFERENCE_URL, job_id=job_id)
total_regions = sum(len(boxes) for boxes in all_boxes.values())
span.set_output({"frames": len(frames), "regions_detected": total_regions})
stats = state.get("stats", PipelineStats()) stats = state.get("stats", PipelineStats())
stats.regions_detected = sum(len(boxes) for boxes in all_boxes.values()) stats.regions_detected = total_regions
_emit_transition(state, "detect_objects", "done") _emit_transition(state, "detect_objects", "done")
return {"boxes_by_frame": all_boxes, "stats": stats} return {"boxes_by_frame": all_boxes, "stats": stats}
@@ -109,6 +117,7 @@ def node_detect_objects(state: DetectState) -> dict:
def node_run_ocr(state: DetectState) -> dict: def node_run_ocr(state: DetectState) -> dict:
_emit_transition(state, "run_ocr", "running") _emit_transition(state, "run_ocr", "running")
with trace_node(state, "run_ocr") as span:
profile = _get_profile(state) profile = _get_profile(state)
config = profile.ocr_config() config = profile.ocr_config()
frames = state.get("filtered_frames", []) frames = state.get("filtered_frames", [])
@@ -116,6 +125,7 @@ def node_run_ocr(state: DetectState) -> dict:
job_id = state.get("job_id") job_id = state.get("job_id")
candidates = run_ocr(frames, boxes, config, inference_url=INFERENCE_URL, job_id=job_id) candidates = run_ocr(frames, boxes, config, inference_url=INFERENCE_URL, job_id=job_id)
span.set_output({"regions_in": sum(len(b) for b in boxes.values()), "text_candidates": len(candidates)})
stats = state.get("stats", PipelineStats()) stats = state.get("stats", PipelineStats())
stats.regions_resolved_by_ocr = len(candidates) stats.regions_resolved_by_ocr = len(candidates)
@@ -127,6 +137,7 @@ def node_run_ocr(state: DetectState) -> dict:
def node_match_brands(state: DetectState) -> dict: def node_match_brands(state: DetectState) -> dict:
_emit_transition(state, "match_brands", "running") _emit_transition(state, "match_brands", "running")
with trace_node(state, "match_brands") as span:
profile = _get_profile(state) profile = _get_profile(state)
dictionary = profile.brand_dictionary() dictionary = profile.brand_dictionary()
resolver_config = profile.resolver_config() resolver_config = profile.resolver_config()
@@ -137,6 +148,7 @@ def node_match_brands(state: DetectState) -> dict:
candidates, dictionary, resolver_config, candidates, dictionary, resolver_config,
content_type=profile.name, job_id=job_id, content_type=profile.name, job_id=job_id,
) )
span.set_output({"matched": len(matched), "unresolved": len(unresolved)})
_emit_transition(state, "match_brands", "done") _emit_transition(state, "match_brands", "done")
return {"detections": matched, "unresolved_candidates": unresolved} return {"detections": matched, "unresolved_candidates": unresolved}
@@ -144,24 +156,33 @@ def node_match_brands(state: DetectState) -> dict:
def node_escalate_vlm(state: DetectState) -> dict: def node_escalate_vlm(state: DetectState) -> dict:
_emit_transition(state, "escalate_vlm", "running") _emit_transition(state, "escalate_vlm", "running")
with trace_node(state, "escalate_vlm") as span:
job_id = state.get("job_id") job_id = state.get("job_id")
emit.log(job_id, "VLMLocal", "INFO", "Stub: VLM escalation not yet implemented") emit.log(job_id, "VLMLocal", "INFO", "Stub: VLM escalation not yet implemented")
span.set_output({"stub": True})
_emit_transition(state, "escalate_vlm", "done") _emit_transition(state, "escalate_vlm", "done")
return {} return {}
def node_escalate_cloud(state: DetectState) -> dict: def node_escalate_cloud(state: DetectState) -> dict:
_emit_transition(state, "escalate_cloud", "running") _emit_transition(state, "escalate_cloud", "running")
with trace_node(state, "escalate_cloud") as span:
job_id = state.get("job_id") job_id = state.get("job_id")
emit.log(job_id, "CloudLLM", "INFO", "Stub: cloud LLM escalation not yet implemented") emit.log(job_id, "CloudLLM", "INFO", "Stub: cloud LLM escalation not yet implemented")
span.set_output({"stub": True})
_emit_transition(state, "escalate_cloud", "done") _emit_transition(state, "escalate_cloud", "done")
return {} return {}
def node_compile_report(state: DetectState) -> dict: def node_compile_report(state: DetectState) -> dict:
_emit_transition(state, "compile_report", "running") _emit_transition(state, "compile_report", "running")
job_id = state.get("job_id")
with trace_node(state, "compile_report") as span:
job_id = state.get("job_id")
profile = _get_profile(state) profile = _get_profile(state)
detections = state.get("detections", []) detections = state.get("detections", [])
report = profile.aggregate(detections) report = profile.aggregate(detections)
@@ -174,7 +195,9 @@ def node_compile_report(state: DetectState) -> dict:
"content_type": report.content_type, "content_type": report.content_type,
"brands": {k: {"total_appearances": v.total_appearances} for k, v in report.brands.items()}, "brands": {k: {"total_appearances": v.total_appearances} for k, v in report.brands.items()},
}) })
span.set_output({"brands": len(report.brands), "detections": len(report.timeline)})
flush_traces()
_emit_transition(state, "compile_report", "done") _emit_transition(state, "compile_report", "done")
return {"report": report} return {"report": report}

131
detect/tracing.py Normal file
View File

@@ -0,0 +1,131 @@
"""
Langfuse tracing for the detection pipeline.
Provides span helpers that graph nodes use to record timing, frame counts,
and stage-level metadata. The Langfuse client is optional — if not configured
(no LANGFUSE_SECRET_KEY), tracing is a no-op.
Usage in graph nodes:
from detect.tracing import trace_node
def node_extract_frames(state):
with trace_node(state, "extract_frames") as span:
...
span.set_output({"frames": len(frames)})
return {...}
"""
from __future__ import annotations
import logging
import os
import time
from contextlib import contextmanager
from dataclasses import dataclass, field
logger = logging.getLogger(__name__)
_client = None
_enabled: bool | None = None
def _get_client():
"""Lazy-init Langfuse client. Returns None if not configured."""
global _client, _enabled
if _enabled is False:
return None
if _client is not None:
return _client
secret = os.environ.get("LANGFUSE_SECRET_KEY", "")
if not secret:
_enabled = False
logger.info("Langfuse not configured (no LANGFUSE_SECRET_KEY), tracing disabled")
return None
try:
from langfuse import Langfuse
_client = Langfuse()
_enabled = True
logger.info("Langfuse tracing enabled")
return _client
except Exception as e:
_enabled = False
logger.warning("Langfuse init failed: %s — tracing disabled", e)
return None
@dataclass
class SpanContext:
"""Wraps a Langfuse span with convenience methods."""
_span: object | None = None
_start: float = field(default_factory=time.monotonic)
metadata: dict = field(default_factory=dict)
def set_output(self, output: dict) -> None:
self.metadata.update(output)
def set_error(self, error: str) -> None:
self.metadata["error"] = error
def _finish(self, status: str = "ok") -> None:
elapsed = time.monotonic() - self._start
self.metadata["duration_seconds"] = round(elapsed, 3)
self.metadata["status"] = status
if self._span is not None:
try:
self._span.update(
output=self.metadata,
level="ERROR" if status == "error" else "DEFAULT",
)
self._span.end()
except Exception as e:
logger.debug("Failed to end Langfuse span: %s", e)
@contextmanager
def trace_node(state: dict, node_name: str):
"""
Context manager that creates a Langfuse span for a pipeline node.
Usage:
with trace_node(state, "extract_frames") as span:
frames = do_work()
span.set_output({"frames": len(frames)})
"""
job_id = state.get("job_id", "unknown")
profile = state.get("profile_name", "")
client = _get_client()
span_obj = None
if client is not None:
try:
trace = client.trace(
name=f"detect:{job_id}",
session_id=job_id,
metadata={"profile": profile},
)
span_obj = trace.span(
name=node_name,
input={"job_id": job_id, "profile": profile},
)
except Exception as e:
logger.debug("Failed to create Langfuse span: %s", e)
ctx = SpanContext(_span=span_obj)
try:
yield ctx
ctx._finish("ok")
except Exception:
ctx._finish("error")
raise
def flush():
"""Flush pending Langfuse events. Call at pipeline end."""
if _client is not None:
try:
_client.flush()
except Exception as e:
logger.debug("Langfuse flush failed: %s", e)

View File

@@ -26,6 +26,9 @@ google-cloud-run>=0.10.0
# GraphQL # GraphQL
strawberry-graphql[fastapi]>=0.311.0 strawberry-graphql[fastapi]>=0.311.0
# Observability
langfuse>=2.0.0
# Testing # Testing
pytest>=7.4.0 pytest>=7.4.0
pytest-django>=4.7.0 pytest-django>=4.7.0

View File

@@ -0,0 +1,196 @@
#!/usr/bin/env python3
"""
Push detection + stats events to test TimelinePanel and CostStatsPanel.
Simulates a pipeline run with detections spread across video time, escalation
events, and accumulating cost — exercises both new phase 8 panels.
Usage:
python tests/detect/manual/test_timeline_cost.py [--job JOB_ID] [--port PORT] [--delay SECS]
Opens: http://mpr.local.ar/detection/?job=<JOB_ID>
"""
import argparse
import json
import logging
import time
from datetime import datetime, timezone
import redis
logging.basicConfig(level=logging.INFO, format="%(levelname)-7s %(name)s%(message)s")
logger = logging.getLogger(__name__)
NODES = ["extract_frames", "filter_scenes", "detect_objects", "run_ocr",
"match_brands", "escalate_vlm", "escalate_cloud", "compile_report"]
# Detections spread across video time with different sources
DETECTIONS = [
("Nike", 0.97, "ocr", 2.0, 0.5),
("Nike", 0.95, "ocr", 4.5, 1.0),
("Emirates", 0.92, "ocr", 5.0, 2.0),
("Adidas", 0.89, "ocr", 8.0, 0.5),
("Nike", 0.94, "ocr", 12.0, 1.5),
("Coca-Cola", 0.85, "ocr", 15.0, 0.5),
("Emirates", 0.88, "ocr", 18.0, 2.0),
("Adidas", 0.91, "ocr", 22.0, 1.0),
("Mastercard", 0.78, "local_vlm", 25.0, 0.5),
("Nike", 0.96, "ocr", 28.0, 1.0),
("Emirates", 0.90, "ocr", 32.0, 2.0),
("Heineken", 0.72, "cloud_llm", 35.0, 0.5),
("Coca-Cola", 0.87, "ocr", 38.0, 0.5),
("Nike", 0.93, "ocr", 42.0, 1.5),
("Unknown", 0.65, "cloud_llm", 45.0, 0.5),
("Adidas", 0.90, "ocr", 48.0, 1.0),
("Emirates", 0.91, "ocr", 52.0, 2.0),
("Nike", 0.95, "ocr", 55.0, 1.0),
]
def ts():
return datetime.now(timezone.utc).isoformat()
def push(r, key, event):
event["ts"] = event.get("ts", ts())
r.rpush(key, json.dumps(event))
return event
def push_graph(r, key, active_node, status, delay):
nodes = []
for n in NODES:
if n == active_node:
nodes.append({"id": n, "status": status})
elif NODES.index(n) < NODES.index(active_node):
nodes.append({"id": n, "status": "done"})
else:
nodes.append({"id": n, "status": "pending"})
push(r, key, {"event": "graph_update", "nodes": nodes})
time.sleep(delay)
def push_stats(r, key, **overrides):
base = {
"event": "stats_update",
"frames_extracted": 0, "frames_after_scene_filter": 0,
"regions_detected": 0, "regions_resolved_by_ocr": 0,
"regions_escalated_to_local_vlm": 0, "regions_escalated_to_cloud_llm": 0,
"cloud_llm_calls": 0, "processing_time_seconds": 0, "estimated_cloud_cost_usd": 0,
}
base.update(overrides)
push(r, key, base)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--job", default="timeline-cost-test")
parser.add_argument("--port", type=int, default=6382)
parser.add_argument("--delay", type=float, default=0.4)
args = parser.parse_args()
r = redis.Redis(port=args.port, decode_responses=True)
key = f"detect_events:{args.job}"
r.delete(key)
logger.info("Pushing %d detections to %s", len(DETECTIONS), key)
logger.info("Open: http://mpr.local.ar/detection/?job=%s", args.job)
input("\nPress Enter to start...")
delay = args.delay
# Pipeline stages with progressive stats
push_graph(r, key, "extract_frames", "running", delay)
push_stats(r, key, frames_extracted=120, processing_time_seconds=3.2)
push_graph(r, key, "extract_frames", "done", delay)
push_graph(r, key, "filter_scenes", "running", delay)
push_stats(r, key, frames_extracted=120, frames_after_scene_filter=45, processing_time_seconds=5.1)
push_graph(r, key, "filter_scenes", "done", delay)
push_graph(r, key, "detect_objects", "running", delay)
push_stats(r, key, frames_extracted=120, frames_after_scene_filter=45,
regions_detected=38, processing_time_seconds=12.4)
push_graph(r, key, "detect_objects", "done", delay)
push_graph(r, key, "run_ocr", "running", delay)
push_stats(r, key, frames_extracted=120, frames_after_scene_filter=45,
regions_detected=38, regions_resolved_by_ocr=28, processing_time_seconds=18.7)
push_graph(r, key, "run_ocr", "done", delay)
# Brand matching — push detections one by one
push_graph(r, key, "match_brands", "running", delay)
for i, (brand, conf, source, timestamp, duration) in enumerate(DETECTIONS):
if source != "ocr":
continue
push(r, key, {"event": "detection",
"brand": brand, "confidence": conf, "source": source,
"timestamp": timestamp, "duration": duration,
"content_type": "soccer_broadcast", "frame_ref": i * 3})
logger.info("[%d] %s %.2f %s t=%.1fs", i + 1, brand, conf, source, timestamp)
time.sleep(delay * 0.3)
push_graph(r, key, "match_brands", "done", delay)
# VLM escalation
push_graph(r, key, "escalate_vlm", "running", delay)
push(r, key, {"event": "log", "level": "INFO", "stage": "VLMLocal",
"msg": "Processing 3 unresolved crops with moondream2"})
time.sleep(delay)
for i, (brand, conf, source, timestamp, duration) in enumerate(DETECTIONS):
if source != "local_vlm":
continue
push(r, key, {"event": "detection",
"brand": brand, "confidence": conf, "source": source,
"timestamp": timestamp, "duration": duration,
"content_type": "soccer_broadcast", "frame_ref": i * 3})
logger.info("[vlm] %s %.2f t=%.1fs", brand, conf, timestamp)
time.sleep(delay * 0.3)
push_stats(r, key, frames_extracted=120, frames_after_scene_filter=45,
regions_detected=38, regions_resolved_by_ocr=28,
regions_escalated_to_local_vlm=3, processing_time_seconds=25.1,
estimated_cloud_cost_usd=0)
push_graph(r, key, "escalate_vlm", "done", delay)
# Cloud escalation
push_graph(r, key, "escalate_cloud", "running", delay)
for i, (brand, conf, source, timestamp, duration) in enumerate(DETECTIONS):
if source != "cloud_llm":
continue
push(r, key, {"event": "detection",
"brand": brand, "confidence": conf, "source": source,
"timestamp": timestamp, "duration": duration,
"content_type": "soccer_broadcast", "frame_ref": i * 3})
logger.info("[cloud] %s %.2f t=%.1fs", brand, conf, timestamp)
time.sleep(delay * 0.3)
push_stats(r, key, frames_extracted=120, frames_after_scene_filter=45,
regions_detected=38, regions_resolved_by_ocr=28,
regions_escalated_to_local_vlm=3, regions_escalated_to_cloud_llm=2,
cloud_llm_calls=2, processing_time_seconds=31.4,
estimated_cloud_cost_usd=0.0042)
push_graph(r, key, "escalate_cloud", "done", delay)
# Report
push_graph(r, key, "compile_report", "running", delay)
push(r, key, {"event": "log", "level": "INFO", "stage": "Aggregator",
"msg": f"Report: {len(set(d[0] for d in DETECTIONS))} brands, {len(DETECTIONS)} detections"})
push_graph(r, key, "compile_report", "done", delay)
push(r, key, {"event": "job_complete", "job_id": args.job, "report": {
"video_source": "soccer_clip.mp4",
"content_type": "soccer_broadcast",
"duration_seconds": 60.0,
}})
logger.info("Done. Check Timeline (brand bars over time) and Cost & Stats panels.")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,34 @@
"""Tests for Langfuse tracing — works without Langfuse configured (no-op mode)."""
import pytest
from detect.tracing import trace_node, SpanContext, flush
def test_trace_node_noop():
"""Without LANGFUSE_SECRET_KEY, tracing is a no-op but doesn't error."""
state = {"job_id": "test-job", "profile_name": "soccer_broadcast"}
with trace_node(state, "extract_frames") as span:
assert isinstance(span, SpanContext)
span.set_output({"frames": 42})
assert span.metadata["frames"] == 42
assert span.metadata["status"] == "ok"
assert "duration_seconds" in span.metadata
def test_trace_node_error():
"""Span records error status on exception."""
state = {"job_id": "test-job"}
with pytest.raises(ValueError):
with trace_node(state, "bad_node") as span:
raise ValueError("boom")
assert span.metadata["status"] == "error"
def test_flush_noop():
"""Flush works when Langfuse is not configured."""
flush()

View File

@@ -7,6 +7,8 @@ import FunnelPanel from './panels/FunnelPanel.vue'
import PipelineGraphPanel from './panels/PipelineGraphPanel.vue' import PipelineGraphPanel from './panels/PipelineGraphPanel.vue'
import FramePanel from './panels/FramePanel.vue' import FramePanel from './panels/FramePanel.vue'
import BrandTablePanel from './panels/BrandTablePanel.vue' import BrandTablePanel from './panels/BrandTablePanel.vue'
import TimelinePanel from './panels/TimelinePanel.vue'
import CostStatsPanel from './panels/CostStatsPanel.vue'
import type { StatsUpdate } from './types/sse-contract' import type { StatsUpdate } from './types/sse-contract'
const jobId = ref(new URLSearchParams(window.location.search).get('job') || 'test-job') const jobId = ref(new URLSearchParams(window.location.search).get('job') || 'test-job')
@@ -69,6 +71,10 @@ source.connect()
<BrandTablePanel :source="source" :status="status" /> <BrandTablePanel :source="source" :status="status" />
<TimelinePanel :source="source" :status="status" />
<CostStatsPanel :source="source" :status="status" />
<LogPanel :source="source" :status="status" /> <LogPanel :source="source" :status="status" />
</LayoutGrid> </LayoutGrid>
</div> </div>

View File

@@ -0,0 +1,123 @@
<script setup lang="ts">
import { ref, computed } from 'vue'
import { Panel } from 'mpr-ui-framework'
import type { DataSource } from 'mpr-ui-framework'
import type { StatsUpdate, Detection } from '../types/sse-contract'
const props = defineProps<{
source: DataSource
status?: 'idle' | 'live' | 'processing' | 'error'
}>()
const stats = ref<StatsUpdate | null>(null)
const detectionCount = ref(0)
const confidenceSum = ref(0)
props.source.on<StatsUpdate>('stats_update', (e) => {
stats.value = e
})
props.source.on<Detection>('detection', (e) => {
detectionCount.value++
confidenceSum.value += e.confidence
})
const avgConfidence = computed(() => {
if (detectionCount.value === 0) return 0
return confidenceSum.value / detectionCount.value
})
const escalationRatio = computed(() => {
if (!stats.value || stats.value.regions_detected === 0) return 0
return (stats.value.regions_escalated_to_local_vlm + stats.value.regions_escalated_to_cloud_llm)
/ stats.value.regions_detected
})
interface Metric {
label: string
value: string
sub?: string
color?: string
}
const metrics = computed<Metric[]>(() => {
if (!stats.value) return []
const s = stats.value
return [
{
label: 'Cloud cost',
value: `$${s.estimated_cloud_cost_usd.toFixed(4)}`,
sub: `${s.cloud_llm_calls} calls`,
color: s.estimated_cloud_cost_usd > 0.01 ? '#e05252' : '#3ecf8e',
},
{
label: 'Escalation ratio',
value: `${(escalationRatio.value * 100).toFixed(1)}%`,
sub: `${s.regions_escalated_to_local_vlm + s.regions_escalated_to_cloud_llm} / ${s.regions_detected} regions`,
color: escalationRatio.value > 0.3 ? '#f5a623' : '#3ecf8e',
},
{
label: 'Avg confidence',
value: `${(avgConfidence.value * 100).toFixed(1)}%`,
sub: `${detectionCount.value} detections`,
color: avgConfidence.value > 0.8 ? '#3ecf8e' : '#f5a623',
},
{
label: 'Processing time',
value: `${s.processing_time_seconds.toFixed(1)}s`,
},
]
})
</script>
<template>
<Panel title="Cost & Stats" :status="status">
<div class="cost-stats" v-if="stats">
<div class="metric" v-for="m in metrics" :key="m.label">
<span class="label">{{ m.label }}</span>
<span class="value" :style="m.color ? { color: m.color } : {}">{{ m.value }}</span>
<span class="sub" v-if="m.sub">{{ m.sub }}</span>
</div>
</div>
<div v-else class="empty">Waiting for stats...</div>
</Panel>
</template>
<style scoped>
.cost-stats {
display: grid;
grid-template-columns: 1fr 1fr;
gap: var(--space-3);
padding: var(--space-3);
}
.metric {
background: var(--surface-2);
border-radius: var(--panel-radius);
padding: var(--space-3);
display: flex;
flex-direction: column;
gap: var(--space-1);
}
.label {
font-size: var(--font-size-sm);
color: var(--text-dim);
}
.value {
font-size: 22px;
font-weight: 600;
}
.sub {
font-size: 11px;
color: var(--text-dim);
}
.empty {
color: var(--text-dim);
padding: var(--space-6);
text-align: center;
}
</style>

View File

@@ -0,0 +1,180 @@
<script setup lang="ts">
import { ref, computed } from 'vue'
import { Panel } from 'mpr-ui-framework'
import type { DataSource } from 'mpr-ui-framework'
import type { Detection } from '../types/sse-contract'
const props = defineProps<{
source: DataSource
status?: 'idle' | 'live' | 'processing' | 'error'
}>()
interface TimelineEntry {
brand: string
timestamp: number
duration: number
confidence: number
source: string
}
const entries = ref<TimelineEntry[]>([])
props.source.on<Detection>('detection', (e) => {
entries.value.push({
brand: e.brand,
timestamp: e.timestamp,
duration: e.duration || 0.5,
confidence: e.confidence,
source: e.source,
})
})
// One row per unique brand, sorted by first appearance
const brands = computed(() => {
const seen = new Map<string, number>()
for (const e of entries.value) {
if (!seen.has(e.brand)) seen.set(e.brand, e.timestamp)
}
return [...seen.entries()]
.sort((a, b) => a[1] - b[1])
.map(([brand]) => brand)
})
const maxTime = computed(() => {
if (entries.value.length === 0) return 60
return Math.max(...entries.value.map((e) => e.timestamp + e.duration)) * 1.1
})
const sourceColor: Record<string, string> = {
ocr: '#3ecf8e',
local_vlm: '#f5a623',
cloud_llm: '#e05252',
logo_match: '#4f9cf9',
}
function barStyle(entry: TimelineEntry) {
const left = (entry.timestamp / maxTime.value) * 100
const width = Math.max((entry.duration / maxTime.value) * 100, 1)
const color = sourceColor[entry.source] || '#a78bfa'
const opacity = 0.4 + entry.confidence * 0.6
return {
left: `${left}%`,
width: `${width}%`,
background: color,
opacity,
}
}
</script>
<template>
<Panel title="Detection Timeline" :status="status">
<div class="timeline" v-if="brands.length > 0">
<div class="row" v-for="brand in brands" :key="brand">
<span class="brand-label">{{ brand }}</span>
<div class="track">
<div
v-for="(entry, i) in entries.filter((e) => e.brand === brand)"
:key="i"
class="bar"
:style="barStyle(entry)"
:title="`${entry.brand} — ${entry.source} (${(entry.confidence * 100).toFixed(0)}%) @ ${entry.timestamp.toFixed(1)}s`"
/>
</div>
</div>
<div class="time-axis">
<span>0s</span>
<span>{{ (maxTime / 2).toFixed(0) }}s</span>
<span>{{ maxTime.toFixed(0) }}s</span>
</div>
<div class="legend">
<span v-for="(color, source) in sourceColor" :key="source" class="legend-item">
<span class="legend-dot" :style="{ background: color }" />
{{ source }}
</span>
</div>
</div>
<div v-else class="empty">Waiting for detections...</div>
</Panel>
</template>
<style scoped>
.timeline {
padding: var(--space-2);
display: flex;
flex-direction: column;
gap: var(--space-1);
height: 100%;
overflow-y: auto;
}
.row {
display: flex;
align-items: center;
gap: var(--space-2);
height: 24px;
}
.brand-label {
width: 100px;
flex-shrink: 0;
font-size: var(--font-size-sm);
color: var(--text-secondary);
text-align: right;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.track {
flex: 1;
position: relative;
height: 16px;
background: var(--surface-2);
border-radius: 3px;
}
.bar {
position: absolute;
top: 2px;
height: 12px;
border-radius: 2px;
min-width: 4px;
}
.time-axis {
display: flex;
justify-content: space-between;
padding-left: 108px;
font-size: 10px;
color: var(--text-dim);
margin-top: var(--space-1);
}
.legend {
display: flex;
gap: var(--space-3);
padding-left: 108px;
margin-top: var(--space-2);
font-size: var(--font-size-sm);
color: var(--text-dim);
}
.legend-item {
display: flex;
align-items: center;
gap: 4px;
}
.legend-dot {
width: 8px;
height: 8px;
border-radius: 50%;
display: inline-block;
}
.empty {
color: var(--text-dim);
padding: var(--space-6);
text-align: center;
}
</style>