This commit is contained in:
2026-03-23 15:18:23 -03:00
parent 5ed876d694
commit b57da622cb
17 changed files with 554 additions and 103 deletions

68
detect/emit.py Normal file
View File

@@ -0,0 +1,68 @@
"""
Event emission helpers for detection pipeline stages.
Single place that knows how to build event payloads.
Stages call these instead of constructing dicts or dataclasses directly.
"""
from __future__ import annotations
import dataclasses
from datetime import datetime, timezone
from detect.events import push_detect_event
from detect.models import PipelineStats
def log(job_id: str | None, stage: str, level: str, msg: str) -> None:
"""Emit a log event."""
if not job_id:
return
payload = {
"level": level,
"stage": stage,
"msg": msg,
"ts": datetime.now(timezone.utc).isoformat(),
}
push_detect_event(job_id, "log", payload)
def stats(job_id: str | None, **kwargs) -> None:
"""Emit a stats_update event. Pass only the fields that changed."""
if not job_id:
return
s = PipelineStats(**kwargs)
push_detect_event(job_id, "stats_update", dataclasses.asdict(s))
def detection(
job_id: str | None,
brand: str,
confidence: float,
source: str,
timestamp: float,
duration: float = 0.0,
content_type: str = "",
frame_ref: int | None = None,
) -> None:
"""Emit a brand detection event."""
if not job_id:
return
payload = {
"brand": brand,
"confidence": confidence,
"source": source,
"timestamp": timestamp,
"duration": duration,
"content_type": content_type,
"frame_ref": frame_ref,
}
push_detect_event(job_id, "detection", payload)
def job_complete(job_id: str | None, report: dict) -> None:
"""Emit a job_complete event with the final report."""
if not job_id:
return
payload = {"job_id": job_id, "report": report}
push_detect_event(job_id, "job_complete", payload)

View File

@@ -1,25 +1,41 @@
""" """
Stage 1 — Frame Extraction Stage 1 — Frame Extraction
Extracts frames from a video at a configurable FPS using FFmpeg. Extracts frames from a video at a configurable FPS using the core ffmpeg module.
Emits log + stats_update SSE events as it works. Emits log + stats_update SSE events as it works.
""" """
from __future__ import annotations from __future__ import annotations
import subprocess
import tempfile import tempfile
from pathlib import Path from pathlib import Path
import ffmpeg
import numpy as np import numpy as np
from PIL import Image from PIL import Image
from core.ffmpeg.probe import probe_file from core.ffmpeg.probe import probe_file
from detect.events import push_detect_event from detect import emit
from detect.models import Frame from detect.models import Frame
from detect.profiles.base import FrameExtractionConfig from detect.profiles.base import FrameExtractionConfig
def _load_frames(tmpdir: Path, fps: float) -> list[Frame]:
"""Load extracted JPEG files into Frame objects."""
frame_files = sorted(tmpdir.glob("frame_*.jpg"))
frames = []
for i, fpath in enumerate(frame_files):
img = Image.open(fpath)
frame = Frame(
sequence=i,
chunk_id=0,
timestamp=i / fps,
image=np.array(img),
)
frames.append(frame)
return frames
def extract_frames( def extract_frames(
video_path: str, video_path: str,
config: FrameExtractionConfig, config: FrameExtractionConfig,
@@ -28,75 +44,37 @@ def extract_frames(
""" """
Extract frames from video at the configured FPS. Extract frames from video at the configured FPS.
Uses FFmpeg to decode frames as raw images, then loads them Uses ffmpeg-python to build the extraction pipeline,
as numpy arrays. Caps at config.max_frames. outputs JPEG files to a temp dir, then loads as numpy arrays.
""" """
probe = probe_file(video_path) probe = probe_file(video_path)
duration = probe.duration or 0.0 duration = probe.duration or 0.0
if job_id: emit.log(job_id, "FrameExtractor", "INFO",
push_detect_event(job_id, "log", { f"Starting extraction: {Path(video_path).name} "
"level": "INFO", f"({duration:.1f}s, {probe.width}x{probe.height}, fps={config.fps})")
"stage": "FrameExtractor",
"msg": f"Starting extraction: {Path(video_path).name} "
f"({duration:.1f}s, {probe.width}x{probe.height}, fps={config.fps})",
})
frames: list[Frame] = []
with tempfile.TemporaryDirectory() as tmpdir: with tempfile.TemporaryDirectory() as tmpdir:
pattern = str(Path(tmpdir) / "frame_%06d.jpg") pattern = str(Path(tmpdir) / "frame_%06d.jpg")
cmd = [ stream = (
"ffmpeg", "-i", video_path, ffmpeg
"-vf", f"fps={config.fps}", .input(video_path)
"-q:v", "2", .filter("fps", fps=config.fps)
"-frames:v", str(config.max_frames), .output(pattern, qscale=2, frames=config.max_frames)
pattern, .overwrite_output()
"-y", "-loglevel", "warning", )
]
result = subprocess.run(cmd, capture_output=True, text=True) try:
stream.run(capture_stdout=True, capture_stderr=True, quiet=True)
except ffmpeg.Error as e:
stderr = e.stderr.decode() if e.stderr else "unknown error"
emit.log(job_id, "FrameExtractor", "ERROR", f"FFmpeg failed: {stderr[:200]}")
raise RuntimeError(f"FFmpeg failed: {stderr}") from e
if result.returncode != 0: frames = _load_frames(Path(tmpdir), config.fps)
if job_id:
push_detect_event(job_id, "log", {
"level": "ERROR",
"stage": "FrameExtractor",
"msg": f"FFmpeg failed: {result.stderr[:200]}",
})
raise RuntimeError(f"FFmpeg failed: {result.stderr}")
frame_files = sorted(Path(tmpdir).glob("frame_*.jpg")) emit.log(job_id, "FrameExtractor", "INFO", f"Extracted {len(frames)} frames")
emit.stats(job_id, frames_extracted=len(frames))
for i, fpath in enumerate(frame_files):
img = Image.open(fpath)
arr = np.array(img)
timestamp = i / config.fps
frames.append(Frame(
sequence=i,
chunk_id=0,
timestamp=timestamp,
image=arr,
))
if job_id:
push_detect_event(job_id, "log", {
"level": "INFO",
"stage": "FrameExtractor",
"msg": f"Extracted {len(frames)} frames",
})
push_detect_event(job_id, "stats_update", {
"frames_extracted": len(frames),
"frames_after_scene_filter": 0,
"regions_detected": 0,
"regions_resolved_by_ocr": 0,
"regions_escalated_to_local_vlm": 0,
"regions_escalated_to_cloud_llm": 0,
"cloud_llm_calls": 0,
"processing_time_seconds": 0.0,
"estimated_cloud_cost_usd": 0.0,
})
return frames return frames

View File

@@ -0,0 +1,76 @@
"""
Stage 2 — Scene Filter
Removes near-duplicate frames using perceptual hashing (pHash).
Frames with a hamming distance below the threshold are considered
duplicates and dropped. This dramatically reduces work for downstream
CV stages without losing unique visual content.
"""
from __future__ import annotations
import imagehash
from PIL import Image
from detect import emit
from detect.models import Frame
from detect.profiles.base import SceneFilterConfig
def _compute_hashes(frames: list[Frame]) -> list[imagehash.ImageHash]:
"""Compute perceptual hashes for all frames."""
hashes = []
for f in frames:
img = Image.fromarray(f.image)
h = imagehash.phash(img)
f.perceptual_hash = str(h)
hashes.append(h)
return hashes
def _dedup(frames: list[Frame], hashes: list[imagehash.ImageHash], threshold: int) -> list[Frame]:
"""Greedy dedup: keep a frame if it's sufficiently different from all kept frames."""
kept = [frames[0]]
kept_hashes = [hashes[0]]
for i in range(1, len(frames)):
is_duplicate = any(hashes[i] - kh < threshold for kh in kept_hashes)
if not is_duplicate:
kept.append(frames[i])
kept_hashes.append(hashes[i])
return kept
def scene_filter(
frames: list[Frame],
config: SceneFilterConfig,
job_id: str | None = None,
) -> list[Frame]:
"""
Filter near-duplicate frames based on perceptual hash distance.
Keeps the first frame in each group of similar frames.
Returns a new list — does not mutate the input.
"""
if not config.enabled:
emit.log(job_id, "SceneFilter", "INFO", "Scene filter disabled, passing all frames through")
return frames
if not frames:
return []
emit.log(job_id, "SceneFilter", "INFO",
f"Filtering {len(frames)} frames (hamming_threshold={config.hamming_threshold})")
hashes = _compute_hashes(frames)
kept = _dedup(frames, hashes, config.hamming_threshold)
dropped = len(frames) - len(kept)
pct = (dropped / len(frames) * 100) if frames else 0
emit.log(job_id, "SceneFilter", "INFO",
f"Kept {len(kept)} frames, dropped {dropped} ({pct:.0f}% reduction)")
emit.stats(job_id, frames_extracted=len(frames), frames_after_scene_filter=len(kept))
return kept

View File

@@ -10,12 +10,16 @@ Opens: http://mpr.local.ar/detection/?job=<JOB_ID>
import argparse import argparse
import json import json
import logging
import random import random
import time import time
from datetime import datetime, timezone from datetime import datetime, timezone
import redis import redis
logging.basicConfig(level=logging.INFO, format="%(levelname)-7s %(name)s%(message)s")
logger = logging.getLogger(__name__)
STAGES = ["FrameExtractor", "SceneFilter", "YOLODetector", "OCRStage", "BrandResolver", "VLMLocal", "Aggregator"] STAGES = ["FrameExtractor", "SceneFilter", "YOLODetector", "OCRStage", "BrandResolver", "VLMLocal", "Aggregator"]
LEVELS = ["INFO", "INFO", "INFO", "INFO", "WARNING", "DEBUG", "ERROR"] # weighted toward INFO LEVELS = ["INFO", "INFO", "INFO", "INFO", "WARNING", "DEBUG", "ERROR"] # weighted toward INFO
MESSAGES = { MESSAGES = {
@@ -70,9 +74,9 @@ def main():
r = redis.Redis(port=args.port, decode_responses=True) r = redis.Redis(port=args.port, decode_responses=True)
key = f"detect_events:{args.job}" key = f"detect_events:{args.job}"
print(f"Pushing {args.count} log events to {key} (redis port {args.port})") logger.info("Pushing %d log events to %s (redis port %d)", args.count, key, args.port)
print(f"Open: http://mpr.local.ar/detection/?job={args.job}") logger.info("Open: http://mpr.local.ar/detection/?job=%s", args.job)
print() input("\nPress Enter to start...")
for i in range(args.count): for i in range(args.count):
stage = random.choice(STAGES) stage = random.choice(STAGES)
@@ -88,10 +92,10 @@ def main():
} }
r.rpush(key, json.dumps(event)) r.rpush(key, json.dumps(event))
print(f" {level:7s} {stage:16s} {msg[:60]}") logger.log(getattr(logging, level, logging.INFO), "[%s] %s", stage, msg)
time.sleep(args.delay) time.sleep(args.delay)
print(f"\nDone. {args.count} events pushed.") logger.info("Done. %d events pushed.", args.count)
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -10,11 +10,15 @@ Opens: http://mpr.local.ar/detection/?job=<JOB_ID>
import argparse import argparse
import json import json
import logging
import time import time
from datetime import datetime, timezone from datetime import datetime, timezone
import redis import redis
logging.basicConfig(level=logging.INFO, format="%(levelname)-7s %(name)s%(message)s")
logger = logging.getLogger(__name__)
def ts(): def ts():
return datetime.now(timezone.utc).isoformat() return datetime.now(timezone.utc).isoformat()
@@ -25,7 +29,7 @@ def push(r, key, event):
r.rpush(key, json.dumps(event)) r.rpush(key, json.dumps(event))
etype = event["event"] etype = event["event"]
detail = event.get("msg", event.get("stage", "")) detail = event.get("msg", event.get("stage", ""))
print(f" [{etype:14s}] {detail}") logger.info("[%s] %s", etype, detail)
return event return event
@@ -39,12 +43,11 @@ def main():
r = redis.Redis(port=args.port, decode_responses=True) r = redis.Redis(port=args.port, decode_responses=True)
key = f"detect_events:{args.job}" key = f"detect_events:{args.job}"
# Clear previous events for this job
r.delete(key) r.delete(key)
print(f"Simulating pipeline run → {key}") logger.info("Simulating pipeline run → %s", key)
print(f"Open: http://mpr.local.ar/detection/?job={args.job}") logger.info("Open: http://mpr.local.ar/detection/?job=%s", args.job)
print() input("\nPress Enter to start...")
delay = args.delay delay = args.delay
@@ -171,7 +174,7 @@ def main():
}, },
}}) }})
print(f"\nPipeline simulation complete.") logger.info("Pipeline simulation complete.")
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -0,0 +1,60 @@
#!/usr/bin/env python3
"""
Run FrameExtractor → SceneFilter on a real video and push events to Redis.
Usage:
python tests/detect/manual/run_extract_filter.py [--job JOB_ID] [--port PORT]
Opens: http://mpr.local.ar/detection/?job=<JOB_ID>
"""
import argparse
import logging
import os
import sys
# Parse args early so we can set REDIS_URL before imports
parser = argparse.ArgumentParser()
parser.add_argument("--job", default="extract-filter-test")
parser.add_argument("--port", type=int, default=6382)
args = parser.parse_args()
os.environ["REDIS_URL"] = f"redis://localhost:{args.port}/0"
logging.basicConfig(level=logging.INFO, format="%(levelname)-7s %(name)s%(message)s")
sys.path.insert(0, ".")
from detect.profiles.soccer import SoccerBroadcastProfile
from detect.stages.frame_extractor import extract_frames
from detect.stages.scene_filter import scene_filter
logger = logging.getLogger(__name__)
VIDEO = "media/out/chunks/95043d50-4df6-4ac8-bbd5-2ba873117c6e/chunk_0000.mp4"
def main():
profile = SoccerBroadcastProfile()
logger.info("Job: %s", args.job)
logger.info("Open: http://mpr.local.ar/detection/?job=%s", args.job)
input("\nPress Enter to start...")
# Stage 1: Extract frames
extract_config = profile.frame_extraction_config()
extract_config.max_frames = 30
logger.info("Extracting frames (fps=%s, max=%d)...", extract_config.fps, extract_config.max_frames)
frames = extract_frames(VIDEO, extract_config, job_id=args.job)
logger.info("%d frames extracted", len(frames))
# Stage 2: Scene filter
filter_config = profile.scene_filter_config()
logger.info("Filtering scenes (hamming_threshold=%d)...", filter_config.hamming_threshold)
kept = scene_filter(frames, filter_config, job_id=args.job)
logger.info("%d frames kept (%d dropped)", len(kept), len(frames) - len(kept))
logger.info("Done.")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,34 @@
#!/usr/bin/env python3
"""
End-to-end test: run FrameExtractor and verify SSE events are emitted.
Usage:
python tests/detect/manual/test_frame_extractor_e2e.py
Requires Redis running. Events appear at: http://mpr.local.ar/detection/?job=e2e-test
"""
import logging
import sys
sys.path.insert(0, ".")
from detect.profiles.soccer import SoccerBroadcastProfile
from detect.stages.frame_extractor import extract_frames
logger = logging.getLogger(__name__)
VIDEO = "media/out/chunks/95043d50-4df6-4ac8-bbd5-2ba873117c6e/chunk_0000.mp4"
JOB_ID = "e2e-test"
profile = SoccerBroadcastProfile()
config = profile.frame_extraction_config()
config.max_frames = 20
logger.info("Extracting frames from %s (fps=%s, max=%d)", VIDEO, config.fps, config.max_frames)
logger.info("Open: http://mpr.local.ar/detection/?job=%s", JOB_ID)
input("\nPress Enter to start...")
frames = extract_frames(VIDEO, config, job_id=JOB_ID)
logger.info("Done: %d frames extracted", len(frames))
logger.info("Open http://mpr.local.ar/detection/?job=%s to see the events", JOB_ID)

View File

@@ -61,7 +61,7 @@ def test_extract_frames_with_events(monkeypatch):
def mock_push(job_id, event_type, data): def mock_push(job_id, event_type, data):
events.append((job_id, event_type, data)) events.append((job_id, event_type, data))
monkeypatch.setattr("detect.stages.frame_extractor.push_detect_event", mock_push) monkeypatch.setattr("detect.emit.push_detect_event", mock_push)
video = _get_sample_video() video = _get_sample_video()
config = FrameExtractionConfig(fps=1, max_frames=5) config = FrameExtractionConfig(fps=1, max_frames=5)

View File

@@ -1,27 +0,0 @@
"""
End-to-end test: run FrameExtractor and verify SSE events are emitted.
Usage (manual):
python tests/detect/test_frame_extractor_e2e.py
Requires Redis running on localhost:6381.
Push events will appear at: http://mpr.local.ar/detection/?job=e2e-test
"""
import sys
sys.path.insert(0, ".")
from detect.profiles.soccer import SoccerBroadcastProfile
from detect.stages.frame_extractor import extract_frames
VIDEO = "media/out/chunks/95043d50-4df6-4ac8-bbd5-2ba873117c6e/chunk_0000.mp4"
JOB_ID = "e2e-test"
profile = SoccerBroadcastProfile()
config = profile.frame_extraction_config()
config.max_frames = 20 # keep it quick
print(f"Extracting frames from {VIDEO} (fps={config.fps}, max={config.max_frames})")
frames = extract_frames(VIDEO, config, job_id=JOB_ID)
print(f"Done: {len(frames)} frames extracted")
print(f"Open http://mpr.local.ar/detection/?job={JOB_ID} to see the events")

View File

@@ -0,0 +1,84 @@
"""Tests for SceneFilter stage."""
import numpy as np
import pytest
from detect.models import Frame
from detect.profiles.base import SceneFilterConfig
from detect.stages.scene_filter import scene_filter
def _make_frame(seq: int, color: tuple[int, int, int] = (128, 128, 128)) -> Frame:
"""Create a solid-color test frame."""
img = np.full((64, 64, 3), color, dtype=np.uint8)
return Frame(sequence=seq, chunk_id=0, timestamp=seq * 0.5, image=img)
def test_identical_frames_deduped():
frames = [_make_frame(i) for i in range(10)]
config = SceneFilterConfig(hamming_threshold=8)
kept = scene_filter(frames, config)
# All identical → only first survives
assert len(kept) == 1
assert kept[0].sequence == 0
def _make_noisy_frame(seq: int, seed: int) -> Frame:
"""Create a frame with random noise — each is visually unique."""
rng = np.random.RandomState(seed)
img = rng.randint(0, 256, (64, 64, 3), dtype=np.uint8)
return Frame(sequence=seq, chunk_id=0, timestamp=seq * 0.5, image=img)
def test_different_frames_kept():
frames = [_make_noisy_frame(i, seed=i * 1000) for i in range(5)]
config = SceneFilterConfig(hamming_threshold=8)
kept = scene_filter(frames, config)
# Random noise frames are visually distinct → most should survive
assert len(kept) >= 3
def test_disabled_passes_all():
frames = [_make_frame(i) for i in range(5)]
config = SceneFilterConfig(enabled=False)
kept = scene_filter(frames, config)
assert len(kept) == 5
def test_empty_input():
config = SceneFilterConfig(hamming_threshold=8)
kept = scene_filter([], config)
assert kept == []
def test_single_frame():
frames = [_make_frame(0)]
config = SceneFilterConfig(hamming_threshold=8)
kept = scene_filter(frames, config)
assert len(kept) == 1
def test_hashes_populated():
frames = [_make_frame(i, color=(i * 50, 100, 200)) for i in range(3)]
config = SceneFilterConfig(hamming_threshold=8)
scene_filter(frames, config)
for f in frames:
assert f.perceptual_hash != ""
def test_events_emitted(monkeypatch):
events = []
monkeypatch.setattr("detect.emit.push_detect_event",
lambda job_id, etype, data: events.append((etype, data)))
frames = [_make_frame(i) for i in range(5)]
config = SceneFilterConfig(hamming_threshold=8)
scene_filter(frames, config, job_id="test-job")
event_types = [e[0] for e in events]
assert "log" in event_types
assert "stats_update" in event_types

View File

@@ -9,6 +9,7 @@ COPY framework/ ./framework/
COPY detection-app/ ./detection-app/ COPY detection-app/ ./detection-app/
WORKDIR /ui/detection-app WORKDIR /ui/detection-app
ENV CI=true
RUN pnpm install RUN pnpm install
EXPOSE 5175 EXPOSE 5175

View File

@@ -3,6 +3,7 @@ import { ref } from 'vue'
import { SSEDataSource, Panel, LayoutGrid } from 'mpr-ui-framework' import { SSEDataSource, Panel, LayoutGrid } from 'mpr-ui-framework'
import 'mpr-ui-framework/src/tokens.css' import 'mpr-ui-framework/src/tokens.css'
import LogPanel from './panels/LogPanel.vue' import LogPanel from './panels/LogPanel.vue'
import FunnelPanel from './panels/FunnelPanel.vue'
import type { StatsUpdate } from './types/sse-contract' import type { StatsUpdate } from './types/sse-contract'
const jobId = ref(new URLSearchParams(window.location.search).get('job') || 'test-job') const jobId = ref(new URLSearchParams(window.location.search).get('job') || 'test-job')
@@ -39,7 +40,7 @@ source.connect()
<span class="job-id">job: {{ jobId }}</span> <span class="job-id">job: {{ jobId }}</span>
</header> </header>
<LayoutGrid :columns="2" :rows="1" gap="var(--space-2)"> <LayoutGrid :columns="2" :rows="2" gap="var(--space-2)">
<Panel title="Stats" :status="status"> <Panel title="Stats" :status="status">
<div class="stats" v-if="stats"> <div class="stats" v-if="stats">
<div class="stat" v-for="s in [ <div class="stat" v-for="s in [
@@ -57,6 +58,8 @@ source.connect()
<div v-else class="empty">Waiting for stats...</div> <div v-else class="empty">Waiting for stats...</div>
</Panel> </Panel>
<FunnelPanel :source="source" :status="status" />
<LogPanel :source="source" :status="status" /> <LogPanel :source="source" :status="status" />
</LayoutGrid> </LayoutGrid>
</div> </div>

View File

@@ -0,0 +1,56 @@
<script setup lang="ts">
import { ref, computed } from 'vue'
import { Panel } from 'mpr-ui-framework'
import TimeSeriesRenderer from 'mpr-ui-framework/src/renderers/TimeSeriesRenderer.vue'
import type { DataSource } from 'mpr-ui-framework'
import type { StatsUpdate } from '../types/sse-contract'
const props = defineProps<{
source: DataSource
status?: 'idle' | 'live' | 'processing' | 'error'
}>()
// Accumulate stats snapshots over time
const snapshots = ref<{ ts: number; stats: StatsUpdate }[]>([])
const startTime = Date.now() / 1000
props.source.on<StatsUpdate>('stats_update', (e) => {
snapshots.value.push({ ts: Date.now() / 1000 - startTime, stats: e })
})
const series = [
{ label: 'Frames', color: '#4f9cf9' },
{ label: 'After filter', color: '#3ecf8e' },
{ label: 'Regions', color: '#f5a623' },
{ label: 'OCR resolved', color: '#a78bfa' },
]
const chartData = computed(() => {
const timestamps = snapshots.value.map((s) => s.ts)
const frames = snapshots.value.map((s) => s.stats.frames_extracted)
const filtered = snapshots.value.map((s) => s.stats.frames_after_scene_filter)
const regions = snapshots.value.map((s) => s.stats.regions_detected)
const ocr = snapshots.value.map((s) => s.stats.regions_resolved_by_ocr)
return [timestamps, frames, filtered, regions, ocr] as const
})
</script>
<template>
<Panel title="Processing Funnel" :status="status">
<TimeSeriesRenderer
v-if="snapshots.length > 0"
:series="series"
:data="chartData"
:stacked="true"
/>
<div v-else class="empty">Waiting for stats...</div>
</Panel>
</template>
<style scoped>
.empty {
color: var(--text-dim);
padding: var(--space-6);
text-align: center;
}
</style>

View File

@@ -11,7 +11,8 @@
}, },
"dependencies": { "dependencies": {
"vue": "^3.5", "vue": "^3.5",
"pinia": "^2.2" "pinia": "^2.2",
"uplot": "^1.6"
}, },
"devDependencies": { "devDependencies": {
"typescript": "^5.6", "typescript": "^5.6",

View File

@@ -11,6 +11,9 @@ importers:
pinia: pinia:
specifier: ^2.2 specifier: ^2.2
version: 2.3.1(typescript@5.9.3)(vue@3.5.30(typescript@5.9.3)) version: 2.3.1(typescript@5.9.3)(vue@3.5.30(typescript@5.9.3))
uplot:
specifier: ^1.6
version: 1.6.32
vue: vue:
specifier: ^3.5 specifier: ^3.5
version: 3.5.30(typescript@5.9.3) version: 3.5.30(typescript@5.9.3)
@@ -748,6 +751,9 @@ packages:
engines: {node: '>=14.17'} engines: {node: '>=14.17'}
hasBin: true hasBin: true
uplot@1.6.32:
resolution: {integrity: sha512-KIMVnG68zvu5XXUbC4LQEPnhwOxBuLyW1AHtpm6IKTXImkbLgkMy+jabjLgSLMasNuGGzQm/ep3tOkyTxpiQIw==}
vite-node@2.1.9: vite-node@2.1.9:
resolution: {integrity: sha512-AM9aQ/IPrW/6ENLQg3AGY4K1N2TGZdR5e4gu/MmmR2xR3Ll1+dib+nook92g4TV3PXVyeyxdWwtaCAiUL0hMxA==} resolution: {integrity: sha512-AM9aQ/IPrW/6ENLQg3AGY4K1N2TGZdR5e4gu/MmmR2xR3Ll1+dib+nook92g4TV3PXVyeyxdWwtaCAiUL0hMxA==}
engines: {node: ^18.0.0 || >=20.0.0} engines: {node: ^18.0.0 || >=20.0.0}
@@ -1460,6 +1466,8 @@ snapshots:
typescript@5.9.3: {} typescript@5.9.3: {}
uplot@1.6.32: {}
vite-node@2.1.9: vite-node@2.1.9:
dependencies: dependencies:
cac: 6.7.14 cac: 6.7.14

View File

@@ -10,3 +10,4 @@ export { default as LayoutGrid } from './components/LayoutGrid.vue'
// Renderers // Renderers
export { default as LogRenderer } from './renderers/LogRenderer.vue' export { default as LogRenderer } from './renderers/LogRenderer.vue'
export { default as TimeSeriesRenderer } from './renderers/TimeSeriesRenderer.vue'

View File

@@ -0,0 +1,101 @@
<script setup lang="ts">
import { ref, onMounted, onUnmounted, watch, nextTick } from 'vue'
import uPlot from 'uplot'
import 'uplot/dist/uPlot.min.css'
export interface TimeSeriesSeries {
label: string
color: string
}
const props = withDefaults(defineProps<{
/** Array of series configs (label + color) */
series: TimeSeriesSeries[]
/** Data: [timestamps[], series1[], series2[], ...] */
data: uPlot.AlignedData
/** Chart title (optional) */
title?: string
/** Stacked area mode */
stacked?: boolean
}>(), {
stacked: false,
})
const container = ref<HTMLElement | null>(null)
let chart: uPlot | null = null
function buildOpts(): uPlot.Options {
const seriesOpts: uPlot.Series[] = [
{ label: 'Time' },
...props.series.map((s) => ({
label: s.label,
stroke: s.color,
fill: props.stacked ? s.color + '40' : undefined,
width: 2,
})),
]
return {
width: container.value?.clientWidth ?? 400,
height: container.value?.clientHeight ?? 200,
series: seriesOpts,
axes: [
{ stroke: '#555568', grid: { stroke: '#2e2e3822' } },
{ stroke: '#555568', grid: { stroke: '#2e2e3822' } },
],
cursor: { show: true },
legend: { show: true },
}
}
function createChart() {
if (!container.value) return
if (chart) chart.destroy()
chart = new uPlot(buildOpts(), props.data, container.value)
}
function resize() {
if (!chart || !container.value) return
chart.setSize({
width: container.value.clientWidth,
height: container.value.clientHeight,
})
}
watch(() => props.data, (newData) => {
if (chart) {
chart.setData(newData)
} else {
nextTick(createChart)
}
}, { deep: true })
onMounted(() => {
nextTick(createChart)
const observer = new ResizeObserver(resize)
if (container.value) observer.observe(container.value)
onUnmounted(() => {
observer.disconnect()
chart?.destroy()
chart = null
})
})
</script>
<template>
<div ref="container" class="timeseries-renderer" />
</template>
<style scoped>
.timeseries-renderer {
width: 100%;
height: 100%;
min-height: 150px;
}
.timeseries-renderer :deep(.u-legend) {
font-family: var(--font-mono);
font-size: var(--font-size-sm);
color: var(--text-secondary);
}
</style>