compare view

This commit is contained in:
2026-03-30 13:05:28 -03:00
parent aac27b8504
commit 55e83e4203
23 changed files with 1321 additions and 201 deletions

View File

@@ -30,12 +30,21 @@ class ConfigUpdate(BaseModel):
preprocessing: dict | None = None preprocessing: dict | None = None
class StageOutputHintInfo(BaseModel):
key: str
type: str
label: str = ""
default_opacity: float = 0.5
src_format: str = "png"
class StageConfigInfo(BaseModel): class StageConfigInfo(BaseModel):
name: str name: str
label: str label: str
description: str description: str
category: str category: str
config_fields: list[dict] config_fields: list[dict]
output_hints: list[StageOutputHintInfo] = []
reads: list[str] reads: list[str]
writes: list[str] writes: list[str]
@@ -121,6 +130,13 @@ def _stage_to_info(stage) -> StageConfigInfo:
} }
for f in stage.config_fields for f in stage.config_fields
], ],
output_hints=[
StageOutputHintInfo(
key=h.key, type=h.type, label=h.label,
default_opacity=h.default_opacity, src_format=h.src_format,
)
for h in getattr(stage, "output_hints", [])
],
reads=stage.io.reads, reads=stage.io.reads,
writes=stage.io.writes, writes=stage.io.writes,
) )

View File

@@ -41,34 +41,24 @@ class ScenarioInfo(BaseModel):
class ReplayRequest(BaseModel): class ReplayRequest(BaseModel):
timeline_id: str job_id: str
start_stage: str start_stage: str
config_overrides: dict | None = None config_overrides: dict | None = None
class ReplayResponse(BaseModel): class ReplayResponse(BaseModel):
status: str status: str
timeline_id: str job_id: str
replay_job_id: str
start_stage: str start_stage: str
detections: int = 0 detections: int = 0
brands_found: int = 0 brands_found: int = 0
class RetryRequest(BaseModel):
timeline_id: str
config_overrides: dict | None = None
start_stage: str = "escalate_vlm"
schedule_seconds: float | None = None # delay before execution (off-peak)
class RetryResponse(BaseModel):
status: str
task_id: str
timeline_id: str
class ReplaySingleStageRequest(BaseModel): class ReplaySingleStageRequest(BaseModel):
timeline_id: str job_id: str
stage: str stage: str
frame_refs: list[int] | None = None frame_refs: list[int] | None = None
config_overrides: dict | None = None config_overrides: dict | None = None
@@ -103,16 +93,24 @@ class ReplaySingleStageResponse(BaseModel):
# --- Endpoints --- # --- Endpoints ---
@router.get("/checkpoints/{timeline_id}") @router.get("/checkpoints/{timeline_id}")
def list_checkpoints(timeline_id: str) -> list[CheckpointInfo]: def list_checkpoints_endpoint(timeline_id: str) -> list[CheckpointInfo]:
"""List available checkpoint stages for a job.""" """List available checkpoint stages for a timeline."""
from core.detect.checkpoint import list_checkpoints as _list from core.detect.checkpoint.storage import get_checkpoints_for_timeline
try: try:
stages = _list(timeline_id) checkpoints = get_checkpoints_for_timeline(timeline_id)
except Exception as e: except Exception as e:
raise HTTPException(status_code=404, detail=f"No checkpoints for job {timeline_id}: {e}") raise HTTPException(status_code=404, detail=f"No checkpoints for timeline {timeline_id}: {e}")
result = [CheckpointInfo(stage=s) for s in stages] result = [
CheckpointInfo(
stage=c["stage_name"],
is_scenario=c.get("is_scenario", False),
scenario_label=c.get("scenario_label", ""),
)
for c in checkpoints
if c["stage_name"]
]
return result return result
@@ -211,11 +209,11 @@ def list_scenarios_endpoint():
@router.post("/replay", response_model=ReplayResponse) @router.post("/replay", response_model=ReplayResponse)
def replay(req: ReplayRequest): def replay(req: ReplayRequest):
"""Replay pipeline from a specific stage with optional config overrides.""" """Replay pipeline from a specific stage with optional config overrides."""
from core.detect.checkpoint import replay_from from core.detect.checkpoint.replay import replay_from
try: try:
result = replay_from( result = replay_from(
timeline_id=req.timeline_id, job_id=req.job_id,
start_stage=req.start_stage, start_stage=req.start_stage,
config_overrides=req.config_overrides, config_overrides=req.config_overrides,
) )
@@ -230,7 +228,8 @@ def replay(req: ReplayRequest):
response = ReplayResponse( response = ReplayResponse(
status="completed", status="completed",
timeline_id=req.timeline_id, job_id=req.job_id,
replay_job_id=result.get("job_id", ""),
start_stage=req.start_stage, start_stage=req.start_stage,
detections=len(detections), detections=len(detections),
brands_found=brands_found, brands_found=brands_found,
@@ -238,29 +237,6 @@ def replay(req: ReplayRequest):
return response return response
@router.post("/retry", response_model=RetryResponse)
def retry(req: RetryRequest):
"""Queue an async retry of unresolved candidates with different config."""
from core.detect.checkpoint.tasks import retry_candidates
kwargs = {
"timeline_id": req.timeline_id,
"config_overrides": req.config_overrides,
"start_stage": req.start_stage,
}
if req.schedule_seconds:
task = retry_candidates.apply_async(kwargs=kwargs, countdown=req.schedule_seconds)
else:
task = retry_candidates.delay(**kwargs)
response = RetryResponse(
status="queued",
task_id=task.id,
timeline_id=req.timeline_id,
)
return response
@router.post("/replay-stage", response_model=ReplaySingleStageResponse) @router.post("/replay-stage", response_model=ReplaySingleStageResponse)
def replay_single_stage(req: ReplaySingleStageRequest): def replay_single_stage(req: ReplaySingleStageRequest):
@@ -269,7 +245,7 @@ def replay_single_stage(req: ReplaySingleStageRequest):
try: try:
result = _replay( result = _replay(
timeline_id=req.timeline_id, job_id=req.job_id,
stage=req.stage, stage=req.stage,
frame_refs=req.frame_refs, frame_refs=req.frame_refs,
config_overrides=req.config_overrides, config_overrides=req.config_overrides,
@@ -324,6 +300,151 @@ def _gpu_url() -> str:
return url.rstrip("/") return url.rstrip("/")
# --- Overlay cache — save/load debug overlay images ---
class SaveOverlaysRequest(BaseModel):
timeline_id: str
job_id: str
stage: str
seq: int
overlays: dict[str, str] # {overlay_key: base64_png}
@router.post("/overlays")
def save_overlays_endpoint(req: SaveOverlaysRequest):
"""Save debug overlay images to blob storage cache."""
from core.detect.checkpoint.frames import save_overlays
save_overlays(req.timeline_id, req.job_id, req.stage, req.seq, req.overlays)
return {"status": "saved", "count": len(req.overlays)}
@router.get("/overlays/{timeline_id}/{job_id}/{stage}/{seq}")
def load_overlays_endpoint(timeline_id: str, job_id: str, stage: str, seq: int):
"""Load cached debug overlay images."""
from core.detect.checkpoint.frames import load_overlays
overlays = load_overlays(timeline_id, job_id, stage, seq)
return {"overlays": overlays or {}}
def _generate_debug_overlays(job_id: str, stage: str, frame) -> dict[str, str] | None:
"""Generate debug overlay images for a single frame."""
import os
inference_url = os.environ.get("INFERENCE_URL")
if stage == "detect_edges":
from core.detect.profile import get_profile, get_stage_config
from core.detect.stages.models import RegionAnalysisConfig
from core.db.connection import get_session
from core.db.job import get_job
from uuid import UUID
with get_session() as session:
job = get_job(session, UUID(job_id))
if not job:
return None
profile = get_profile(job.profile_name)
config = RegionAnalysisConfig(**get_stage_config(profile, "detect_edges"))
if inference_url:
from core.detect.inference import InferenceClient
client = InferenceClient(base_url=inference_url, job_id=job_id)
dr = client.detect_edges_debug(
image=frame.image,
edge_canny_low=config.edge_canny_low,
edge_canny_high=config.edge_canny_high,
edge_hough_threshold=config.edge_hough_threshold,
edge_hough_min_length=config.edge_hough_min_length,
edge_hough_max_gap=config.edge_hough_max_gap,
edge_pair_max_distance=config.edge_pair_max_distance,
edge_pair_min_distance=config.edge_pair_min_distance,
)
return {
"edge_overlay_b64": dr.edge_overlay_b64,
"lines_overlay_b64": dr.lines_overlay_b64,
}
else:
from core.detect.stages.edge_detector import _load_cv_edges
edges_mod = _load_cv_edges()
dr = edges_mod.detect_edges_debug(
frame.image,
canny_low=config.edge_canny_low,
canny_high=config.edge_canny_high,
hough_threshold=config.edge_hough_threshold,
hough_min_length=config.edge_hough_min_length,
hough_max_gap=config.edge_hough_max_gap,
pair_max_distance=config.edge_pair_max_distance,
pair_min_distance=config.edge_pair_min_distance,
)
return {
"edge_overlay_b64": dr["edge_overlay_b64"],
"lines_overlay_b64": dr["lines_overlay_b64"],
}
elif stage == "field_segmentation":
from core.detect.profile import get_profile, get_stage_config
from core.detect.stages.models import FieldSegmentationConfig
from core.db.connection import get_session
from core.db.job import get_job
from uuid import UUID
with get_session() as session:
job = get_job(session, UUID(job_id))
if not job:
return None
profile = get_profile(job.profile_name)
config = FieldSegmentationConfig(**get_stage_config(profile, "field_segmentation"))
if inference_url:
import httpx, json, base64, io
from PIL import Image
import numpy as np
buf = io.BytesIO()
Image.fromarray(frame.image).save(buf, format="JPEG", quality=85)
img_b64 = base64.b64encode(buf.getvalue()).decode()
resp = httpx.post(
f"{inference_url.rstrip('/')}/segment_field/debug",
json={
"image_b64": img_b64,
"hue_low": config.hue_low,
"hue_high": config.hue_high,
"sat_low": config.sat_low,
"sat_high": config.sat_high,
"val_low": config.val_low,
"val_high": config.val_high,
"morph_kernel": config.morph_kernel,
"min_area_ratio": config.min_area_ratio,
},
timeout=30.0,
)
if resp.status_code == 200:
data = resp.json()
return {"mask_overlay_b64": data.get("mask_b64", "")}
return None
return None
@router.get("/overlays/{timeline_id}/{job_id}/{stage}")
def list_overlay_frames_endpoint(timeline_id: str, job_id: str, stage: str):
"""List frame sequences that have cached overlays."""
from core.detect.checkpoint.frames import list_overlay_frames
seqs = list_overlay_frames(timeline_id, job_id, stage)
return {"frames": seqs}
# --- GPU proxy — thin passthrough to inference server for interactive editor ---
@router.post("/gpu/detect_edges") @router.post("/gpu/detect_edges")
async def gpu_detect_edges(request: Request): async def gpu_detect_edges(request: Request):
"""Proxy to GPU inference server — browser can't reach it directly.""" """Proxy to GPU inference server — browser can't reach it directly."""

View File

@@ -147,6 +147,108 @@ def load_cached_frames_b64(timeline_id: str) -> list[dict]:
return result return result
# ---------------------------------------------------------------------------
# Debug overlay storage — per job/stage/frame
# ---------------------------------------------------------------------------
def _overlay_prefix(timeline_id: str, job_id: str, stage: str) -> str:
return f"{CACHE_PREFIX}/{timeline_id}/overlays/{job_id}/{stage}/"
def _overlay_key(timeline_id: str, job_id: str, stage: str, seq: int, name: str) -> str:
return f"{CACHE_PREFIX}/{timeline_id}/overlays/{job_id}/{stage}/{seq}_{name}.png"
def save_overlays(
timeline_id: str,
job_id: str,
stage: str,
seq: int,
overlays: dict[str, str],
):
"""
Save debug overlay images (base64 PNG) to blob storage.
overlays: {overlay_key: base64_png_string}
e.g. {"edge_overlay_b64": "iVBOR...", "lines_overlay_b64": "iVBOR..."}
"""
from core.storage.s3 import upload_file
import tempfile
for name, b64_data in overlays.items():
key = _overlay_key(timeline_id, job_id, stage, seq, name)
raw = base64.b64decode(b64_data)
with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmp:
tmp.write(raw)
tmp_path = tmp.name
try:
upload_file(tmp_path, BUCKET, key)
finally:
os.unlink(tmp_path)
logger.info("Saved %d overlays for timeline %s job %s stage %s frame %d",
len(overlays), timeline_id, job_id, stage, seq)
def load_overlays(
timeline_id: str,
job_id: str,
stage: str,
seq: int,
) -> dict[str, str] | None:
"""
Load debug overlay images from blob storage as base64 strings.
Returns {overlay_key: base64_png_string} or None if no overlays cached.
"""
from core.storage.s3 import list_objects, download_to_temp
prefix = _overlay_prefix(timeline_id, job_id, stage)
seq_prefix = f"{seq}_"
objects = list_objects(BUCKET, prefix)
overlays = {}
for obj in objects:
filename = obj["key"].rsplit("/", 1)[-1]
if not filename.startswith(seq_prefix):
continue
name = filename[len(seq_prefix):].replace(".png", "")
tmp_path = download_to_temp(BUCKET, obj["key"])
try:
with open(tmp_path, "rb") as f:
overlays[name] = base64.b64encode(f.read()).decode()
finally:
os.unlink(tmp_path)
return overlays if overlays else None
def list_overlay_frames(
timeline_id: str,
job_id: str,
stage: str,
) -> list[int]:
"""List frame sequences that have cached overlays."""
from core.storage.s3 import list_objects
prefix = _overlay_prefix(timeline_id, job_id, stage)
objects = list_objects(BUCKET, prefix)
seqs = set()
for obj in objects:
filename = obj["key"].rsplit("/", 1)[-1]
seq_str = filename.split("_")[0]
try:
seqs.add(int(seq_str))
except ValueError:
continue
return sorted(seqs)
def clear_cache(timeline_id: str): def clear_cache(timeline_id: str):
"""Delete the frame cache for a timeline.""" """Delete the frame cache for a timeline."""
from core.storage.s3 import delete_objects from core.storage.s3 import delete_objects

View File

@@ -1,32 +1,88 @@
""" """
Pipeline replay — re-run from any stage with different config. Pipeline replay — re-run from any stage with different config.
Loads a checkpoint, applies config overrides, builds a subgraph Loads stage outputs from DB, frames from timeline cache,
starting from the target stage, and invokes it. reconstitutes state, and runs from a target stage onward.
Creates a new Job (run_type=REPLAY) for each replay invocation.
""" """
from __future__ import annotations from __future__ import annotations
import logging import logging
import os
import uuid import uuid
from core.detect import emit from core.detect import emit
# TODO: migrate to Timeline/Branch/Checkpoint model from core.detect.graph import NODES, get_pipeline
# These old functions no longer exist — replay needs rework from core.detect.graph.runner import PipelineRunner
def _not_migrated(*args, **kwargs):
raise NotImplementedError("Replay not yet migrated to Timeline/Branch/Checkpoint model")
load_checkpoint = _not_migrated
list_checkpoints = _not_migrated
from core.detect.graph import NODES, build_graph
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def _build_state_for_replay(
job_id: str,
up_to_stage: str,
) -> dict:
"""
Reconstitute pipeline state from a completed job's stage outputs,
up to (but not including) the target stage.
# OverrideProfile removed — config overrides are now handled by dict merging Loads frames from timeline cache + stage outputs from DB.
# in _load_profile() (nodes.py) and replay_single_stage (below). """
from .storage import load_stage_outputs_for_job, get_checkpoints_for_job
from .frames import load_cached_frames
from core.db.connection import get_session
from core.db.job import get_job
# Load the job to get timeline_id and profile
with get_session() as session:
job = get_job(session, uuid.UUID(job_id))
if not job:
raise ValueError(f"Job not found: {job_id}")
timeline_id = str(job.timeline_id) if job.timeline_id else ""
if not timeline_id:
raise ValueError(f"Job {job_id} has no timeline")
# Load frames from timeline cache
frames = load_cached_frames(timeline_id)
if not frames:
raise ValueError(f"No cached frames for timeline {timeline_id}. Run the pipeline first.")
# Load all stage outputs for this job
all_outputs = load_stage_outputs_for_job(job_id)
# Build state with envelope + frames
state = {
"job_id": job_id,
"timeline_id": timeline_id,
"video_path": job.video_path,
"profile_name": job.profile_name,
"source_asset_id": str(job.source_asset_id),
"frames": frames,
"config_overrides": {},
}
# Apply stage outputs in pipeline order, up to the target stage
target_idx = NODES.index(up_to_stage)
for stage_name in NODES[:target_idx]:
output = all_outputs.get(stage_name)
if output:
# Stage outputs contain serialized data — merge into state
# The stage registry's deserialize_fn can reconstitute if needed
for key, value in output.items():
state[key] = value
# Filtered frames: reconstruct from sequence list if present
filtered_seqs = state.get("filtered_frame_sequences")
if filtered_seqs:
seq_set = set(filtered_seqs)
state["filtered_frames"] = [f for f in frames if f.sequence in seq_set]
elif "filtered_frames" not in state:
state["filtered_frames"] = frames
return state
def replay_from( def replay_from(
@@ -38,49 +94,60 @@ def replay_from(
""" """
Replay the pipeline from a specific stage. Replay the pipeline from a specific stage.
Loads the checkpoint from the stage immediately before start_stage, Loads state from the original job's stage outputs up to start_stage,
applies config overrides, and runs the subgraph from start_stage onward. applies config overrides, and runs from start_stage onward.
Creates a new Job (run_type=REPLAY).
Returns the final state dict. Returns the final state dict.
""" """
if start_stage not in NODES: if start_stage not in NODES:
raise ValueError(f"Unknown stage: {start_stage!r}. Options: {NODES}") raise ValueError(f"Unknown stage: {start_stage!r}. Options: {NODES}")
start_idx = NODES.index(start_stage) start_idx = NODES.index(start_stage)
# Load checkpoint from the stage before start_stage
if start_idx == 0: if start_idx == 0:
raise ValueError("Cannot replay from the first stage — just run the full pipeline") raise ValueError("Cannot replay from the first stage — just run the full pipeline")
previous_stage = NODES[start_idx - 1] logger.info("Replaying job %s from %s", job_id, start_stage)
available = list_checkpoints(job_id) state = _build_state_for_replay(job_id, start_stage)
if previous_stage not in available:
raise ValueError(
f"No checkpoint for stage {previous_stage!r} (job {job_id}). "
f"Available: {available}"
)
logger.info("Replaying job %s from %s (loading checkpoint: %s)",
job_id, start_stage, previous_stage)
state = load_checkpoint(job_id, previous_stage)
# Apply config overrides # Apply config overrides
if config_overrides: if config_overrides:
state["config_overrides"] = config_overrides state["config_overrides"] = config_overrides
# Create replay job
from core.db.connection import get_session
from core.db.job import create_job, get_job
with get_session() as session:
original = get_job(session, uuid.UUID(job_id))
replay_job = create_job(
session,
source_asset_id=original.source_asset_id,
video_path=original.video_path,
timeline_id=original.timeline_id,
profile_name=original.profile_name,
run_type="replay",
parent_id=original.id,
config_overrides=config_overrides,
)
replay_job_id = str(replay_job.id)
# Update state with new job ID
state["job_id"] = replay_job_id
# Set run context for SSE events # Set run context for SSE events
run_id = str(uuid.uuid4())[:8]
emit.set_run_context( emit.set_run_context(
run_id=run_id, run_id=replay_job_id,
parent_job_id=job_id, parent_job_id=job_id,
run_type="replay", run_type="replay",
) )
# Build subgraph starting from start_stage # Run from start_stage onward
graph = build_graph(checkpoint=checkpoint, start_from=start_stage) pipeline = get_pipeline(
pipeline = graph.compile() checkpoint=checkpoint,
profile_name=state["profile_name"],
start_from=start_stage,
)
try: try:
result = pipeline.invoke(state) result = pipeline.invoke(state)
@@ -102,12 +169,6 @@ def replay_single_stage(
Fast path for interactive parameter tuning — runs only the target stage Fast path for interactive parameter tuning — runs only the target stage
function, not the full pipeline tail. Returns the stage output directly. function, not the full pipeline tail. Returns the stage output directly.
When debug=True and stage is detect_edges, returns additional overlay
data (Canny edges, Hough lines) for visual feedback in the editor.
For detect_edges: returns {"edge_regions_by_frame": {seq: [box, ...]}}
With debug=True, also returns {"debug": {seq: {edge_overlay_b64, lines_overlay_b64, ...}}}
""" """
if stage not in NODES: if stage not in NODES:
raise ValueError(f"Unknown stage: {stage!r}. Options: {NODES}") raise ValueError(f"Unknown stage: {stage!r}. Options: {NODES}")
@@ -116,19 +177,9 @@ def replay_single_stage(
if stage_idx == 0: if stage_idx == 0:
raise ValueError("Cannot replay the first stage — just run the full pipeline") raise ValueError("Cannot replay the first stage — just run the full pipeline")
previous_stage = NODES[stage_idx - 1] logger.info("Single-stage replay: job %s, stage %s (debug=%s)", job_id, stage, debug)
available = list_checkpoints(job_id) state = _build_state_for_replay(job_id, stage)
if previous_stage not in available:
raise ValueError(
f"No checkpoint for stage {previous_stage!r} (job {job_id}). "
f"Available: {available}"
)
logger.info("Single-stage replay: job %s, stage %s (loading checkpoint: %s, debug=%s)",
job_id, stage, previous_stage, debug)
state = load_checkpoint(job_id, previous_stage)
# Build profile with overrides # Build profile with overrides
from core.detect.profile import get_profile, get_stage_config from core.detect.profile import get_profile, get_stage_config
@@ -142,9 +193,17 @@ def replay_single_stage(
merged_configs[sname] = soverrides merged_configs[sname] = soverrides
profile = {**profile, "configs": merged_configs} profile = {**profile, "configs": merged_configs}
# Run the stage function directly (not through the graph) # Subset frames if requested
frames = state.get("filtered_frames", state.get("frames", []))
if frame_refs:
ref_set = set(frame_refs)
frames = [f for f in frames if f.sequence in ref_set]
# Run the specific stage
if stage == "detect_edges": if stage == "detect_edges":
return _replay_detect_edges(state, profile, frame_refs, job_id, debug) return _replay_detect_edges(state, profile, frames, job_id, debug)
elif stage == "field_segmentation":
return _replay_field_segmentation(state, profile, frames, job_id, debug)
else: else:
raise ValueError( raise ValueError(
f"Single-stage replay not yet implemented for {stage!r}. " f"Single-stage replay not yet implemented for {stage!r}. "
@@ -155,35 +214,28 @@ def replay_single_stage(
def _replay_detect_edges( def _replay_detect_edges(
state: dict, state: dict,
profile, profile,
frame_refs: list[int] | None, frames: list,
job_id: str, job_id: str,
debug: bool, debug: bool,
) -> dict: ) -> dict:
"""Run edge detection on checkpoint frames, optionally with debug overlays.""" """Run edge detection on checkpoint frames, optionally with debug overlays."""
import os
from core.detect.stages.edge_detector import detect_edge_regions from core.detect.stages.edge_detector import detect_edge_regions
from core.detect.profile import get_stage_config from core.detect.profile import get_stage_config
from core.detect.stages.models import RegionAnalysisConfig from core.detect.stages.models import RegionAnalysisConfig
config = RegionAnalysisConfig(**get_stage_config(profile, "detect_edges")) config = RegionAnalysisConfig(**get_stage_config(profile, "detect_edges"))
frames = state.get("filtered_frames", [])
if frame_refs:
ref_set = set(frame_refs)
frames = [f for f in frames if f.sequence in ref_set]
inference_url = os.environ.get("INFERENCE_URL") inference_url = os.environ.get("INFERENCE_URL")
field_masks = state.get("field_masks", {})
# Normal run — always needed for the boxes
result = detect_edge_regions( result = detect_edge_regions(
frames=frames, frames=frames,
config=config, config=config,
inference_url=inference_url, inference_url=inference_url,
job_id=job_id, job_id=job_id,
field_masks=field_masks,
) )
output = {"edge_regions_by_frame": result} output = {"edge_regions_by_frame": result}
# Debug overlays — call debug endpoint (remote) or local debug function
if debug and frames: if debug and frames:
debug_data = {} debug_data = {}
if inference_url: if inference_url:
@@ -207,7 +259,6 @@ def _replay_detect_edges(
"pair_count": dr.pair_count, "pair_count": dr.pair_count,
} }
else: else:
# Local mode — import GPU module directly
from core.detect.stages.edge_detector import _load_cv_edges from core.detect.stages.edge_detector import _load_cv_edges
edges_mod = _load_cv_edges() edges_mod = _load_cv_edges()
for frame in frames: for frame in frames:
@@ -230,3 +281,27 @@ def _replay_detect_edges(
output["debug"] = debug_data output["debug"] = debug_data
return output return output
def _replay_field_segmentation(
state: dict,
profile,
frames: list,
job_id: str,
debug: bool,
) -> dict:
"""Run field segmentation on checkpoint frames."""
from core.detect.stages.field_segmentation import run_field_segmentation
from core.detect.profile import get_stage_config
from core.detect.stages.models import FieldSegmentationConfig
config = FieldSegmentationConfig(**get_stage_config(profile, "field_segmentation"))
inference_url = os.environ.get("INFERENCE_URL")
result = run_field_segmentation(
frames=frames,
config=config,
inference_url=inference_url,
job_id=job_id,
)
return result

View File

@@ -39,13 +39,21 @@ def checkpoint_after_stage(job_id: str, stage_name: str, state: dict, result: di
return return
from .storage import save_checkpoint, save_stage_output from .storage import save_checkpoint, save_stage_output
from core.detect.stages.base import _REGISTRY from core.detect.stages.base import _REGISTRY, _LEGACY_REGISTRY
merged = {**state, **result} merged = {**state, **result}
# Serialize stage output using the stage's serialize_fn if available # Serialize stage output using the stage's serialize_fn if available
# Check new-style registry first, then legacy (some stages are in both)
serialize_fn = None
stage_cls = _REGISTRY.get(stage_name) stage_cls = _REGISTRY.get(stage_name)
serialize_fn = getattr(getattr(stage_cls, "definition", None), "serialize_fn", None) if stage_cls:
serialize_fn = getattr(getattr(stage_cls, "definition", None), "serialize_fn", None)
if not serialize_fn:
legacy = _LEGACY_REGISTRY.get(stage_name)
if legacy:
serialize_fn = legacy.serialize_fn
if serialize_fn: if serialize_fn:
output_json = serialize_fn(merged, job_id) output_json = serialize_fn(merged, job_id)
else: else:

View File

@@ -146,6 +146,7 @@ def node_field_segmentation(state: DetectState) -> dict:
_emit(state, "field_segmentation", "done") _emit(state, "field_segmentation", "done")
return { return {
"field_masks": result["field_masks"], "field_masks": result["field_masks"],
"field_mask_overlays": result.get("field_mask_overlays", {}),
"field_boundaries": result["field_boundaries"], "field_boundaries": result["field_boundaries"],
"field_coverage": result["field_coverage"], "field_coverage": result["field_coverage"],
} }

View File

@@ -24,7 +24,7 @@ from PIL import Image
from core.detect import emit from core.detect import emit
from core.detect.models import BoundingBox, Frame from core.detect.models import BoundingBox, Frame
from core.detect.stages.base import Stage from core.detect.stages.base import Stage
from core.detect.stages.models import StageDefinition, StageConfigField, StageIO from core.detect.stages.models import StageDefinition, StageConfigField, StageIO, StageOutputHint
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -42,7 +42,6 @@ class EdgeDetectionStage(Stage):
writes=["edge_regions_by_frame"], writes=["edge_regions_by_frame"],
), ),
config_fields=[ config_fields=[
StageConfigField(name="enabled", type="bool", default=True, description="Enable edge detection"),
StageConfigField(name="edge_canny_low", type="int", default=50, description="Canny low threshold", min=0, max=255), StageConfigField(name="edge_canny_low", type="int", default=50, description="Canny low threshold", min=0, max=255),
StageConfigField(name="edge_canny_high", type="int", default=150, description="Canny high threshold", min=0, max=255), StageConfigField(name="edge_canny_high", type="int", default=150, description="Canny high threshold", min=0, max=255),
StageConfigField(name="edge_hough_threshold", type="int", default=80, description="Hough accumulator threshold", min=1, max=500), StageConfigField(name="edge_hough_threshold", type="int", default=80, description="Hough accumulator threshold", min=1, max=500),
@@ -51,6 +50,11 @@ class EdgeDetectionStage(Stage):
StageConfigField(name="edge_pair_max_distance", type="int", default=200, description="Max distance between line pair (px)", min=10, max=500), StageConfigField(name="edge_pair_max_distance", type="int", default=200, description="Max distance between line pair (px)", min=10, max=500),
StageConfigField(name="edge_pair_min_distance", type="int", default=15, description="Min distance between line pair (px)", min=5, max=200), StageConfigField(name="edge_pair_min_distance", type="int", default=15, description="Min distance between line pair (px)", min=5, max=200),
], ],
output_hints=[
StageOutputHint(key="edge_regions_by_frame", type="boxes_by_frame", label="Edge regions"),
StageOutputHint(key="edge_overlay_b64", type="overlay", label="Canny edges", default_opacity=0.25),
StageOutputHint(key="lines_overlay_b64", type="overlay", label="Hough lines", default_opacity=0.25),
],
tracks_element="edge_region", tracks_element="edge_region",
) )

View File

@@ -24,6 +24,7 @@ from core.detect.stages.models import (
StageConfigField, StageConfigField,
StageDefinition, StageDefinition,
StageIO, StageIO,
StageOutputHint,
) )
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -41,7 +42,6 @@ class FieldSegmentationStage(Stage):
writes=["field_mask"], writes=["field_mask"],
), ),
config_fields=[ config_fields=[
StageConfigField(name="enabled", type="bool", default=True, description="Enable field segmentation"),
StageConfigField(name="hue_low", type="int", default=30, description="HSV hue lower bound", min=0, max=180), StageConfigField(name="hue_low", type="int", default=30, description="HSV hue lower bound", min=0, max=180),
StageConfigField(name="hue_high", type="int", default=85, description="HSV hue upper bound", min=0, max=180), StageConfigField(name="hue_high", type="int", default=85, description="HSV hue upper bound", min=0, max=180),
StageConfigField(name="sat_low", type="int", default=30, description="HSV saturation lower bound", min=0, max=255), StageConfigField(name="sat_low", type="int", default=30, description="HSV saturation lower bound", min=0, max=255),
@@ -51,6 +51,9 @@ class FieldSegmentationStage(Stage):
StageConfigField(name="morph_kernel", type="int", default=15, description="Morphology kernel size", min=3, max=51), StageConfigField(name="morph_kernel", type="int", default=15, description="Morphology kernel size", min=3, max=51),
StageConfigField(name="min_area_ratio", type="float", default=0.05, description="Min contour area as fraction of frame", min=0.01, max=0.5), StageConfigField(name="min_area_ratio", type="float", default=0.05, description="Min contour area as fraction of frame", min=0.01, max=0.5),
], ],
output_hints=[
StageOutputHint(key="mask_overlay_b64", type="overlay", label="Field mask", default_opacity=0.5, src_format="png"),
],
) )
@@ -102,6 +105,7 @@ def run_field_segmentation(
client = InferenceClient(base_url=url, job_id=job_id or "", log_level=_run_log_level) client = InferenceClient(base_url=url, job_id=job_id or "", log_level=_run_log_level)
field_masks = {} field_masks = {}
field_mask_overlays = {}
field_boundaries = {} field_boundaries = {}
field_coverage = {} field_coverage = {}
@@ -126,6 +130,7 @@ def run_field_segmentation(
mask_b64 = resp.get("mask_b64", "") mask_b64 = resp.get("mask_b64", "")
if mask_b64: if mask_b64:
field_masks[frame.sequence] = _decode_mask_b64(mask_b64) field_masks[frame.sequence] = _decode_mask_b64(mask_b64)
field_mask_overlays[frame.sequence] = mask_b64
field_boundaries[frame.sequence] = resp.get("boundary", []) field_boundaries[frame.sequence] = resp.get("boundary", [])
field_coverage[frame.sequence] = resp.get("coverage", 0.0) field_coverage[frame.sequence] = resp.get("coverage", 0.0)
@@ -136,6 +141,7 @@ def run_field_segmentation(
return { return {
"field_masks": field_masks, "field_masks": field_masks,
"field_mask_overlays": field_mask_overlays,
"field_boundaries": field_boundaries, "field_boundaries": field_boundaries,
"field_coverage": field_coverage, "field_coverage": field_coverage,
} }

View File

@@ -27,6 +27,14 @@ class StageIO(BaseModel):
writes: List[str] = Field(default_factory=list) writes: List[str] = Field(default_factory=list)
optional_reads: List[str] = Field(default_factory=list) optional_reads: List[str] = Field(default_factory=list)
class StageOutputHint(BaseModel):
"""How to render a stage output in the compare/editor views."""
key: str
type: str
label: str = ""
default_opacity: float = 0.5
src_format: str = "png"
class StageDefinition(BaseModel): class StageDefinition(BaseModel):
"""Complete metadata for a pipeline stage.""" """Complete metadata for a pipeline stage."""
name: str name: str
@@ -35,6 +43,7 @@ class StageDefinition(BaseModel):
category: str = "detection" category: str = "detection"
io: StageIO io: StageIO
config_fields: List[StageConfigField] = Field(default_factory=list) config_fields: List[StageConfigField] = Field(default_factory=list)
output_hints: List[StageOutputHint] = Field(default_factory=list)
tracks_element: Optional[str] = None tracks_element: Optional[str] = None
class FrameExtractionConfig(BaseModel): class FrameExtractionConfig(BaseModel):

View File

@@ -1,4 +1,4 @@
"""Registration for CV analysis stages: edge detection.""" """Registration for CV analysis stages: edge detection, field segmentation."""
from core.detect.stages.models import StageDefinition, StageIO, StageConfigField from core.detect.stages.models import StageDefinition, StageIO, StageConfigField
from core.detect.stages.base import register_stage from core.detect.stages.base import register_stage
@@ -20,6 +20,24 @@ def _deser_regions(data: dict, job_id: str) -> dict:
return {"edge_regions_by_frame": regions} return {"edge_regions_by_frame": regions}
def _ser_field_seg(state: dict, job_id: str) -> dict:
"""Serialize field segmentation — boundaries + coverage + mask overlays."""
boundaries = state.get("field_boundaries", {})
coverage = state.get("field_coverage", {})
mask_overlays = state.get("field_mask_overlays", {})
return {
"field_boundaries": {str(k): v for k, v in boundaries.items()},
"field_coverage": {str(k): v for k, v in coverage.items()},
"mask_overlays_by_frame": {str(k): v for k, v in mask_overlays.items()},
}
def _deser_field_seg(data: dict, job_id: str) -> dict:
boundaries = {int(k): v for k, v in data.get("field_boundaries", {}).items()}
coverage = {int(k): v for k, v in data.get("field_coverage", {}).items()}
return {"field_boundaries": boundaries, "field_coverage": coverage}
def register(): def register():
edge_detection = StageDefinition( edge_detection = StageDefinition(
name="detect_edges", name="detect_edges",
@@ -31,7 +49,6 @@ def register():
writes=["edge_regions_by_frame"], writes=["edge_regions_by_frame"],
), ),
config_fields=[ config_fields=[
StageConfigField(name="enabled", type="bool", default=True, description="Enable region analysis"),
StageConfigField(name="edge_canny_low", type="int", default=50, description="Canny low threshold", min=0, max=255), StageConfigField(name="edge_canny_low", type="int", default=50, description="Canny low threshold", min=0, max=255),
StageConfigField(name="edge_canny_high", type="int", default=150, description="Canny high threshold", min=0, max=255), StageConfigField(name="edge_canny_high", type="int", default=150, description="Canny high threshold", min=0, max=255),
StageConfigField(name="edge_hough_threshold", type="int", default=80, description="Hough accumulator threshold", min=1, max=500), StageConfigField(name="edge_hough_threshold", type="int", default=80, description="Hough accumulator threshold", min=1, max=500),
@@ -42,3 +59,25 @@ def register():
], ],
) )
register_stage(edge_detection, serialize_fn=_ser_regions, deserialize_fn=_deser_regions) register_stage(edge_detection, serialize_fn=_ser_regions, deserialize_fn=_deser_regions)
field_seg = StageDefinition(
name="field_segmentation",
label="Field Segmentation",
description="HSV green mask — detect pitch boundaries",
category="cv_analysis",
io=StageIO(
reads=["filtered_frames"],
writes=["field_mask"],
),
config_fields=[
StageConfigField(name="hue_low", type="int", default=30, description="HSV hue lower bound", min=0, max=180),
StageConfigField(name="hue_high", type="int", default=85, description="HSV hue upper bound", min=0, max=180),
StageConfigField(name="sat_low", type="int", default=30, description="HSV saturation lower bound", min=0, max=255),
StageConfigField(name="sat_high", type="int", default=255, description="HSV saturation upper bound", min=0, max=255),
StageConfigField(name="val_low", type="int", default=30, description="HSV value lower bound", min=0, max=255),
StageConfigField(name="val_high", type="int", default=255, description="HSV value upper bound", min=0, max=255),
StageConfigField(name="morph_kernel", type="int", default=15, description="Morphology kernel size", min=3, max=51),
StageConfigField(name="min_area_ratio", type="float", default=0.05, description="Min contour area", min=0.01, max=0.5),
],
)
register_stage(field_seg, serialize_fn=_ser_field_seg, deserialize_fn=_deser_field_seg)

View File

@@ -60,7 +60,6 @@ def register():
io=StageIO(reads=["frames"], writes=["filtered_frames"]), io=StageIO(reads=["frames"], writes=["filtered_frames"]),
config_fields=[ config_fields=[
StageConfigField(name="hamming_threshold", type="int", default=8, description="Hamming distance threshold", min=0, max=64), StageConfigField(name="hamming_threshold", type="int", default=8, description="Hamming distance threshold", min=0, max=64),
StageConfigField(name="enabled", type="bool", default=True, description="Enable scene filtering"),
], ],
) )
register_stage(scene_filter, serialize_fn=_ser_filter, deserialize_fn=_deser_filter) register_stage(scene_filter, serialize_fn=_ser_filter, deserialize_fn=_deser_filter)

View File

@@ -35,6 +35,16 @@ class StageIO:
optional_reads: List[str] = field(default_factory=list) optional_reads: List[str] = field(default_factory=list)
@dataclass
class StageOutputHint:
"""How to render a stage output in the compare/editor views."""
key: str # key in the stage output JSON, e.g. "edge_regions_by_frame"
type: str # "boxes_by_frame" | "overlay"
label: str = ""
default_opacity: float = 0.5
src_format: str = "png" # for overlays
@dataclass @dataclass
class StageDefinition: class StageDefinition:
"""Complete metadata for a pipeline stage.""" """Complete metadata for a pipeline stage."""
@@ -44,6 +54,7 @@ class StageDefinition:
category: str = "detection" category: str = "detection"
io: StageIO = field(default_factory=StageIO) io: StageIO = field(default_factory=StageIO)
config_fields: List[StageConfigField] = field(default_factory=list) config_fields: List[StageConfigField] = field(default_factory=list)
output_hints: List[StageOutputHint] = field(default_factory=list)
tracks_element: Optional[str] = None tracks_element: Optional[str] = None
@@ -139,6 +150,7 @@ class PipelineConfig:
STAGE_VIEWS = [ STAGE_VIEWS = [
StageConfigField, StageConfigField,
StageIO, StageIO,
StageOutputHint,
StageDefinition, StageDefinition,
FrameExtractionConfig, FrameExtractionConfig,
SceneFilterConfig, SceneFilterConfig,

View File

@@ -2,8 +2,8 @@
""" """
Seed a scenario from a video chunk. Seed a scenario from a video chunk.
Creates a Timeline (frames in MinIO) + Branch + Checkpoint marked Creates a Timeline + caches frames + Checkpoint marked as scenario.
as a scenario. No pipeline, no Redis, no SSE. No pipeline, no Redis, no SSE.
Prerequisites: Prerequisites:
- Postgres reachable (Kind NodePort or local) - Postgres reachable (Kind NodePort or local)
@@ -14,7 +14,7 @@ Usage:
python tests/detect/manual/seed_scenario.py --video media/mpr/out/chunks/.../chunk_0001.mp4 python tests/detect/manual/seed_scenario.py --video media/mpr/out/chunks/.../chunk_0001.mp4
Then open: Then open:
http://mpr.local.ar/detection/?job=<TIMELINE_ID>#/editor/detect_edges http://mpr.local.ar/detection/?job=<JOB_ID>#/editor/detect_edges
""" """
from __future__ import annotations from __future__ import annotations
@@ -110,36 +110,41 @@ def main():
frames = extract_frames_ffmpeg(video_path, args.fps, args.max_frames) frames = extract_frames_ffmpeg(video_path, args.fps, args.max_frames)
logger.info("Extracted %d frames", len(frames)) logger.info("Extracted %d frames", len(frames))
# Create timeline + branch + checkpoint # Create timeline
from core.detect.checkpoint.storage import create_timeline, save_stage_output from core.detect.checkpoint.storage import (
create_timeline, save_checkpoint, update_timeline_status,
)
from core.detect.checkpoint.frames import cache_frames
timeline_id, branch_id = create_timeline( timeline_id = create_timeline(
source_video=video_path, chunk_paths=[video_path],
profile_name="soccer_broadcast", profile_name="soccer_broadcast",
frames=frames, name=args.label,
fps=args.fps, fps=args.fps,
) )
# Mark as scenario # Cache frames on the timeline
from core.db import get_latest_checkpoint cache_frames(timeline_id, frames)
from core.db.connection import get_session update_timeline_status(timeline_id, "cached", frame_count=len(frames))
with get_session() as session: # Create scenario checkpoint
checkpoint = get_latest_checkpoint(session, branch_id) checkpoint_id = save_checkpoint(
if checkpoint: timeline_id=timeline_id,
checkpoint.is_scenario = True stage_name="extract_frames",
checkpoint.scenario_label = args.label is_scenario=True,
session.commit() scenario_label=args.label,
stats={"frames_extracted": len(frames)},
)
logger.info("") logger.info("")
logger.info("Scenario created:") logger.info("Scenario created:")
logger.info(" Timeline: %s", timeline_id) logger.info(" Timeline: %s", timeline_id)
logger.info(" Branch: %s", branch_id) logger.info(" Checkpoint: %s", checkpoint_id)
logger.info(" Label: %s", args.label) logger.info(" Label: %s", args.label)
logger.info(" Frames: %d", len(frames)) logger.info(" Frames: %d", len(frames))
logger.info("") logger.info("")
logger.info("Open in editor:") logger.info("Open in source selector to see the timeline:")
logger.info(" http://mpr.local.ar/detection/?job=%s#/editor/detect_edges", timeline_id) logger.info(" http://mpr.local.ar/detection/")
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -10,7 +10,9 @@ import BrandTablePanel from './panels/BrandTablePanel.vue'
import TimelinePanel from './panels/TimelinePanel.vue' import TimelinePanel from './panels/TimelinePanel.vue'
import CostStatsPanel from './panels/CostStatsPanel.vue' import CostStatsPanel from './panels/CostStatsPanel.vue'
import SourceSelector from './panels/SourceSelector.vue' import SourceSelector from './panels/SourceSelector.vue'
import CompareView from './panels/CompareView.vue'
import StageConfig from './components/StageConfig.vue' import StageConfig from './components/StageConfig.vue'
import OverlayControls from './components/OverlayControls.vue'
import FrameStrip from './components/FrameStrip.vue' import FrameStrip from './components/FrameStrip.vue'
import { usePipelineStore } from './stores/pipeline' import { usePipelineStore } from './stores/pipeline'
import { useSSEConnection } from './composables/useSSEConnection' import { useSSEConnection } from './composables/useSSEConnection'
@@ -41,9 +43,14 @@ const {
// Editor overlays + CV result accumulation // Editor overlays + CV result accumulation
const { const {
editorOverlays, editorBoxes, editorOverlays, editorBoxes,
updateDisplayForFrame, onReplayResult, setActiveStage, updateDisplayForFrame, onReplayResult: _onReplayResult, setActiveStage,
saveOverlaysToCache,
} = useEditorState(currentFrameRef) } = useEditorState(currentFrameRef)
function onReplayResult(result: any) {
_onReplayResult(result)
}
// Set active stage when editor opens // Set active stage when editor opens
watch(() => pipeline.editorStage, (stage) => { watch(() => pipeline.editorStage, (stage) => {
if (stage) setActiveStage(stage) if (stage) setActiveStage(stage)
@@ -56,6 +63,13 @@ function setCheckpointFrame(index: number) {
if (frame) updateDisplayForFrame(frame.seq) if (frame) updateDisplayForFrame(frame.seq)
} }
// Save overlays to S3 cache then close editor
function closeEditorWithSave() {
saveOverlaysToCache(pipeline.timelineId, pipeline.jobId)
// Small delay to let the save requests fire before navigation
setTimeout(() => pipeline.closeEditor(), 100)
}
// Wire job start to clear log panel // Wire job start to clear log panel
function onJobStarted(newJobId: string, opts?: { pauseAfterStage?: boolean }) { function onJobStarted(newJobId: string, opts?: { pauseAfterStage?: boolean }) {
logPanel.value?.clear() logPanel.value?.clear()
@@ -74,6 +88,11 @@ function onJobStarted(newJobId: string, opts?: { pauseAfterStage?: boolean }) {
<path d="M2 4h4l2 2h6v8H2V4z"/><path d="M2 4V2h12v2"/> <path d="M2 4h4l2 2h6v8H2V4z"/><path d="M2 4V2h12v2"/>
</svg> </svg>
</button> </button>
<button class="header-btn" title="Compare jobs" @click="pipeline.openCompare()">
<svg width="14" height="14" viewBox="0 0 16 16" fill="none" stroke="currentColor" stroke-width="1.5">
<rect x="1" y="3" width="6" height="10" rx="1"/><rect x="9" y="3" width="6" height="10" rx="1"/><path d="M8 5v6"/>
</svg>
</button>
<!-- Transport controls visible when a pipeline is running --> <!-- Transport controls visible when a pipeline is running -->
<div v-if="sseConnected && (status === 'live' || status === 'processing' || paused)" class="transport"> <div v-if="sseConnected && (status === 'live' || status === 'processing' || paused)" class="transport">
@@ -210,21 +229,8 @@ function onJobStarted(newJobId: string, opts?: { pauseAfterStage?: boolean }) {
/> />
<div class="editor-bottom"> <div class="editor-bottom">
<div class="overlay-controls"> <OverlayControls :overlays="editorOverlays" />
<label v-for="(overlay, idx) in editorOverlays" :key="idx" class="overlay-toggle"> <button class="editor-close" @click="closeEditorWithSave()">✕ Close</button>
<input type="checkbox" v-model="overlay.visible" />
<span class="overlay-label">{{ overlay.label }}</span>
<input
type="range"
min="0" max="1" step="0.05"
:value="overlay.opacity ?? 0.5"
@input="(e: Event) => overlay.opacity = Number((e.target as HTMLInputElement).value)"
class="opacity-slider"
/>
<span class="opacity-value">{{ Math.round((overlay.opacity ?? 0.5) * 100) }}%</span>
</label>
</div>
<button class="editor-close" @click="pipeline.closeEditor()">✕ Close</button>
</div> </div>
</div> </div>
@@ -242,6 +248,9 @@ function onJobStarted(newJobId: string, opts?: { pauseAfterStage?: boolean }) {
<!-- === SOURCE SELECTOR MODE === --> <!-- === SOURCE SELECTOR MODE === -->
<SourceSelector v-else-if="pipeline.layoutMode === 'source_selector'" @job-started="(id: string, opts: any) => onJobStarted(id, opts)" /> <SourceSelector v-else-if="pipeline.layoutMode === 'source_selector'" @job-started="(id: string, opts: any) => onJobStarted(id, opts)" />
<!-- === COMPARE MODE === -->
<CompareView v-else-if="pipeline.layoutMode === 'compare'" />
</template> </template>
</SplitPane> </SplitPane>
@@ -457,41 +466,6 @@ header h1 { font-size: var(--font-size-lg); font-weight: 600; }
font-size: var(--font-size-sm); font-size: var(--font-size-sm);
} }
.overlay-controls {
display: flex;
gap: var(--space-4);
padding: var(--space-2) var(--space-3);
align-items: center;
height: 100%;
}
.overlay-toggle {
display: flex;
align-items: center;
gap: var(--space-2);
font-size: var(--font-size-sm);
color: var(--text-secondary);
cursor: pointer;
}
.overlay-toggle input[type="checkbox"] {
accent-color: #00bcd4;
}
.overlay-label {
min-width: 80px;
}
.opacity-slider {
width: 80px;
height: 3px;
}
.opacity-value {
font-size: 10px;
color: var(--text-dim);
min-width: 30px;
}
/* Source selector */ /* Source selector */
.source-selector { .source-selector {

View File

@@ -0,0 +1,62 @@
<script setup lang="ts">
import type { FrameOverlay } from 'mpr-ui-framework'
defineProps<{
overlays: FrameOverlay[]
}>()
</script>
<template>
<div class="overlay-controls">
<label v-for="(overlay, idx) in overlays" :key="idx" class="overlay-toggle">
<input type="checkbox" v-model="overlay.visible" />
<span class="overlay-label">{{ overlay.label }}</span>
<input
type="range"
min="0" max="1" step="0.05"
:value="overlay.opacity ?? 0.5"
@input="(e: Event) => overlay.opacity = Number((e.target as HTMLInputElement).value)"
class="opacity-slider"
/>
<span class="opacity-value">{{ Math.round((overlay.opacity ?? 0.5) * 100) }}%</span>
</label>
</div>
</template>
<style scoped>
.overlay-controls {
display: flex;
gap: var(--space-4);
padding: var(--space-2) var(--space-3);
align-items: center;
height: 100%;
}
.overlay-toggle {
display: flex;
align-items: center;
gap: var(--space-2);
font-size: var(--font-size-sm);
color: var(--text-secondary);
cursor: pointer;
}
.overlay-toggle input[type="checkbox"] {
accent-color: #00bcd4;
}
.overlay-label {
min-width: 50px;
}
.opacity-slider {
width: 80px;
height: 3px;
}
.opacity-value {
font-size: 10px;
color: var(--text-dim);
min-width: 30px;
}
</style>

View File

@@ -71,10 +71,11 @@ export function useCheckpointLoader(
const { stages, checkpointStageFor } = useStageRegistry() const { stages, checkpointStageFor } = useStageRegistry()
// Auto-load checkpoint when entering editor mode. // Auto-load checkpoint when entering editor mode.
// Also watches timelineId — it resolves async on page load.
watch( watch(
() => [pipeline.layoutMode, pipeline.editorStage, jobId.value, stages.value.length] as const, () => [pipeline.layoutMode, pipeline.editorStage, jobId.value, stages.value.length, pipeline.timelineId] as const,
([mode, stage, job]) => { ([mode, stage, job]) => {
if (mode === 'bbox_editor' && stage && job) { if (mode === 'bbox_editor' && stage && job && pipeline.timelineId) {
const cpStage = checkpointStageFor(stage) const cpStage = checkpointStageFor(stage)
if (cpStage) { if (cpStage) {
loadCheckpoint(job, cpStage) loadCheckpoint(job, cpStage)

View File

@@ -69,7 +69,7 @@ export function useEditorState(currentFrameRef: Ref<number | null>) {
} }
} }
function onReplayResult(result: StageResult) { function onReplayResult(result: StageResult, jobId?: string) {
if (result.frameWidth && result.frameHeight) { if (result.frameWidth && result.frameHeight) {
frameDimensions.value = { w: result.frameWidth, h: result.frameHeight } frameDimensions.value = { w: result.frameWidth, h: result.frameHeight }
} }
@@ -115,6 +115,28 @@ export function useEditorState(currentFrameRef: Ref<number | null>) {
})) }))
} }
async function saveOverlaysToCache(timelineId: string, jobId: string) {
const stage = activeStage.value
if (!stage) return
const entries = Object.entries(allFrameOverlays.value)
if (entries.length === 0) return
for (const [seqStr, overlayMap] of entries) {
fetch('/api/detect/overlays', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
timeline_id: timelineId,
job_id: jobId,
stage,
seq: Number(seqStr),
overlays: overlayMap,
}),
}).catch(() => {})
}
}
function resetEditorState() { function resetEditorState() {
activeStage.value = null activeStage.value = null
allFrameRegions.value = {} allFrameRegions.value = {}
@@ -136,6 +158,7 @@ export function useEditorState(currentFrameRef: Ref<number | null>) {
updateDisplayForFrame, updateDisplayForFrame,
onReplayResult, onReplayResult,
setActiveStage, setActiveStage,
saveOverlaysToCache,
resetEditorState, resetEditorState,
} }
} }

View File

@@ -32,6 +32,9 @@ export function useHashRouter() {
if (configMatch) { if (configMatch) {
return { mode: 'stage_editor', stage: configMatch[1] } return { mode: 'stage_editor', stage: configMatch[1] }
} }
if (path === 'compare') {
return { mode: 'compare', stage: null as string | null }
}
return { mode: 'normal', stage: null as string | null } return { mode: 'normal', stage: null as string | null }
} }
@@ -49,6 +52,8 @@ export function useHashRouter() {
hash = `#/config/${pipeline.editorStage}` hash = `#/config/${pipeline.editorStage}`
} else if (pipeline.layoutMode === 'source_selector') { } else if (pipeline.layoutMode === 'source_selector') {
hash = '#/source' hash = '#/source'
} else if (pipeline.layoutMode === 'compare') {
hash = '#/compare'
} }
if (window.location.hash !== hash) { if (window.location.hash !== hash) {
window.history.pushState(null, '', hash) window.history.pushState(null, '', hash)

View File

@@ -28,6 +28,19 @@ export function useSSEConnection() {
pipeline.openSourceSelector() pipeline.openSourceSelector()
} }
// Resolve timeline_id from job on init
if (jobParam) {
pipeline.setJob(jobParam)
fetch(`/api/detect/jobs/${jobParam}`)
.then(r => r.ok ? r.json() : null)
.then(data => {
if (data?.timeline_id) {
pipeline.setTimelineId(data.timeline_id)
}
})
.catch(() => {})
}
const source = new SSEDataSource({ const source = new SSEDataSource({
id: 'detect-stream', id: 'detect-stream',
url: jobId.value ? `/api/detect/stream/${jobId.value}` : '', url: jobId.value ? `/api/detect/stream/${jobId.value}` : '',

View File

@@ -11,12 +11,21 @@ export interface StageConfigField {
options: string[] | null options: string[] | null
} }
export interface StageOutputHint {
key: string
type: string // "boxes_by_frame" | "overlay"
label: string
default_opacity: number
src_format: string
}
export interface StageInfo { export interface StageInfo {
name: string name: string
label: string label: string
description: string description: string
category: string category: string
config_fields: StageConfigField[] config_fields: StageConfigField[]
output_hints: StageOutputHint[]
reads: string[] reads: string[]
writes: string[] writes: string[]
} }

View File

@@ -0,0 +1,573 @@
<script setup lang="ts">
import { ref, computed, watch, onMounted } from 'vue'
import { Panel, SplitPane, FrameRenderer } from 'mpr-ui-framework'
import type { FrameBBox, FrameOverlay } from 'mpr-ui-framework'
import type { Job, Checkpoint } from '@common/types/generated'
import { usePipelineStore } from '../stores/pipeline'
import { useStageRegistry } from '../composables/useStageRegistry'
import type { StageOutputHint } from '../composables/useStageRegistry'
import FrameStrip from '../components/FrameStrip.vue'
import OverlayControls from '../components/OverlayControls.vue'
import { getOverlays } from '../composables/useOverlayCache'
const pipeline = usePipelineStore()
const { editableStages, getStage } = useStageRegistry()
function isEditable(stage: string): boolean {
return editableStages.value.includes(stage)
}
function openEditor(jobId: string, stage: string) {
// Navigate to editor for this job + stage
const url = new URL(window.location.href)
url.searchParams.set('job', jobId)
url.hash = `#/editor/${stage}`
window.location.href = url.toString()
}
interface JobDetail {
id: string
timeline_id: string | null
profile_name: string
status: string
config_overrides: Record<string, unknown>
checkpoints: { stage_name: string; stats: Record<string, unknown> }[]
stage_outputs: Record<string, Record<string, unknown>>
}
interface CheckpointFrame {
seq: number
timestamp: number
jpeg_b64: string
}
// Two sides
const jobA = ref<JobDetail | null>(null)
const jobB = ref<JobDetail | null>(null)
// Available jobs for this timeline
const jobs = ref<Job[]>([])
// Shared frames from timeline cache
const frames = ref<CheckpointFrame[]>([])
const frameIndex = ref(0)
// Only show stages that have visual editors
const stages = computed(() => {
if (!jobA.value) return []
const editable = new Set(editableStages.value)
return jobA.value.checkpoints
.filter(c => c.stage_name && editable.has(c.stage_name))
.map(c => c.stage_name)
})
// Persist selections in localStorage
const STORAGE_KEY = 'mpr:compare'
function loadSaved(): { jobA?: string; jobB?: string; stage?: string } {
try {
const raw = localStorage.getItem(STORAGE_KEY)
return raw ? JSON.parse(raw) : {}
} catch { return {} }
}
function saveSelection() {
const data = {
jobA: jobA.value?.id ?? '',
jobB: jobB.value?.id ?? '',
stage: selectedStage.value,
}
localStorage.setItem(STORAGE_KEY, JSON.stringify(data))
}
const saved = loadSaved()
const selectedStage = ref(saved.stage ?? '')
// Current frame image
const currentFrame = computed(() => frames.value[frameIndex.value] ?? null)
const currentImage = computed(() => currentFrame.value?.jpeg_b64 ?? '')
const currentSeq = computed(() => currentFrame.value?.seq ?? 0)
// Generic box extraction from stage outputs using output_hints
function boxesForJob(job: JobDetail | null): FrameBBox[] {
if (!job || !selectedStage.value) return []
const output = job.stage_outputs[selectedStage.value]
if (!output) return []
const stage = getStage(selectedStage.value)
if (!stage) return []
const boxHints = stage.output_hints.filter(h => h.type === 'boxes_by_frame')
const allBoxes: FrameBBox[] = []
for (const hint of boxHints) {
const regions = output[hint.key] as Record<string, unknown[]> | undefined
if (!regions) continue
const seq = String(currentSeq.value)
const boxes = regions[seq] as { x: number; y: number; w: number; h: number; confidence: number; label: string }[] | undefined
if (boxes) {
for (const b of boxes) {
allBoxes.push({
x: b.x, y: b.y, w: b.w, h: b.h,
confidence: b.confidence,
label: b.label || '',
})
}
}
}
return allBoxes
}
const boxesA = computed(() => boxesForJob(jobA.value))
const boxesB = computed(() => boxesForJob(jobB.value))
// Debug overlays from S3 cache — driven by stage registry output_hints
const overlaysA = ref<FrameOverlay[]>([])
const overlaysB = ref<FrameOverlay[]>([])
function overlayHintsForStage(): StageOutputHint[] {
const stage = getStage(selectedStage.value)
if (!stage) return []
return stage.output_hints.filter(h => h.type === 'overlay')
}
async function loadOverlaysForFrame(
job: JobDetail | null,
seq: number,
target: typeof overlaysA,
) {
if (!job || !selectedStage.value || !pipeline.timelineId) {
target.value = []
return
}
const hints = overlayHintsForStage()
if (hints.length === 0) {
target.value = []
return
}
// First check stage outputs for inline overlay data (e.g. mask_overlays_by_frame)
const stageOutput = job.stage_outputs[selectedStage.value] ?? {}
const prev = new Map(target.value.map(o => [o.label, o]))
// Try inline overlays from stage output (keyed by frame seq)
let foundInline = false
const result: FrameOverlay[] = []
for (const h of hints) {
const existing = prev.get(h.label)
// Convention: overlay data in stage output uses key with _by_frame suffix
const byFrameKey = h.key.replace('_b64', 's_by_frame').replace('overlay', 'overlay')
// Try common patterns: mask_overlays_by_frame, etc.
const possibleKeys = [
`${h.key.replace('_b64', '')}_by_frame`,
`${h.key.replace('_b64', 's')}_by_frame`,
`mask_overlays_by_frame`,
]
let src = ''
for (const k of possibleKeys) {
const byFrame = stageOutput[k] as Record<string, string> | undefined
if (byFrame && byFrame[String(seq)]) {
src = byFrame[String(seq)]
foundInline = true
break
}
}
result.push({
src,
label: h.label,
visible: existing?.visible ?? true,
opacity: existing?.opacity ?? h.default_opacity,
srcFormat: h.src_format as 'jpeg' | 'png',
})
}
if (foundInline) {
target.value = result
return
}
// Fall back to S3 overlay cache
try {
const resp = await fetch(
`/api/detect/overlays/${pipeline.timelineId}/${job.id}/${selectedStage.value}/${seq}`
)
const data = resp.ok ? await resp.json() : {}
const cached = data.overlays ?? {}
target.value = hints.map(h => {
const existing = prev.get(h.label)
return {
src: cached[h.key] ?? '',
label: h.label,
visible: existing?.visible ?? true,
opacity: existing?.opacity ?? h.default_opacity,
srcFormat: h.src_format as 'jpeg' | 'png',
}
})
} catch {
target.value = result // use whatever we have (possibly empty)
}
}
// Reload overlays when frame or stage changes
function refreshOverlays() {
loadOverlaysForFrame(jobA.value, currentSeq.value, overlaysA)
loadOverlaysForFrame(jobB.value, currentSeq.value, overlaysB)
}
watch([currentSeq, selectedStage], refreshOverlays)
// Sync overlay visibility/opacity from A controls → B overlays
watch(overlaysA, (aList) => {
for (const a of aList) {
const b = overlaysB.value.find(o => o.label === a.label)
if (b) {
b.visible = a.visible
b.opacity = a.opacity
}
}
}, { deep: true })
// Resolve timeline from a saved job if store doesn't have it
async function ensureTimelineId(): Promise<string> {
if (pipeline.timelineId) return pipeline.timelineId
// Try to resolve from saved job selection
const savedJobId = saved.jobA || saved.jobB
if (savedJobId) {
try {
const resp = await fetch(`/api/detect/jobs/${savedJobId}`)
if (resp.ok) {
const data = await resp.json()
if (data.timeline_id) {
pipeline.setTimelineId(data.timeline_id)
return data.timeline_id
}
}
} catch { /* ignore */ }
}
return ''
}
// Load jobs — prefer current timeline, fall back to all jobs
async function loadJobs() {
const tid = await ensureTimelineId()
const url = tid
? `/api/detect/jobs?timeline_id=${tid}`
: '/api/detect/jobs'
try {
const resp = await fetch(url)
if (!resp.ok) return
const result = await resp.json()
// If only one job on this timeline, show all jobs for comparison
if (tid && result.length < 2) {
const allResp = await fetch('/api/detect/jobs')
if (allResp.ok) {
jobs.value = await allResp.json()
return
}
}
jobs.value = result
} catch { /* ignore */ }
}
// Load a job's detail
async function loadJobDetail(jobId: string): Promise<JobDetail | null> {
try {
const resp = await fetch(`/api/detect/jobs/${jobId}`)
if (!resp.ok) return null
return await resp.json()
} catch {
return null
}
}
// Load frames from timeline cache via checkpoint endpoint
async function loadFrames(stage: string) {
const tid = pipeline.timelineId
if (!tid) return
try {
const resp = await fetch(`/api/detect/checkpoints/${tid}/${stage}`)
if (!resp.ok) return
const data = await resp.json()
frames.value = data.frames ?? []
frameIndex.value = 0
} catch { /* ignore */ }
}
async function selectJobA(job: Job) {
jobA.value = await loadJobDetail(job.id)
if (jobA.value && stages.value.length > 0 && !selectedStage.value) {
selectedStage.value = stages.value[0]
}
// Load frames if stage is already selected (e.g. restored from localStorage)
if (selectedStage.value && frames.value.length === 0) {
await loadFrames(selectedStage.value)
}
refreshOverlays()
saveSelection()
}
async function selectJobB(job: Job) {
jobB.value = await loadJobDetail(job.id)
refreshOverlays()
saveSelection()
}
function jobLabel(j: Job): string {
const id = j.id.slice(0, 8)
const date = j.created_at ? new Date(j.created_at).toLocaleTimeString(undefined, { hour: '2-digit', minute: '2-digit' }) : ''
const overrides = Object.keys(j.config_overrides || {}).length
const extra = overrides ? ` +${overrides} overrides` : ''
return `${id} ${j.run_type} ${j.status} ${date}${extra}`
}
function setFrame(index: number) {
if (index >= 0 && index < frames.value.length) {
frameIndex.value = index
}
}
// Load frames when stage selection changes
watch(selectedStage, (stage) => {
if (stage) {
loadFrames(stage)
saveSelection()
}
})
// Auto-select last two jobs (most recent first in the list)
watch(jobs, (list) => {
if (list.length === 0) return
if (list.length >= 2 && !jobA.value) selectJobA(list[0])
if (list.length >= 2 && !jobB.value) selectJobB(list[1])
else if (list.length === 1 && !jobA.value) selectJobA(list[0])
}, { immediate: true })
// Load on mount, and also when timelineId becomes available (async from SSE init)
onMounted(loadJobs)
watch(() => pipeline.timelineId, (tid) => {
if (tid && jobs.value.length === 0) loadJobs()
})
</script>
<template>
<div class="compare-view">
<!-- Header: job selectors + stage selector -->
<div class="compare-header">
<div class="job-selector">
<label>A</label>
<select @change="(e: Event) => { const id = (e.target as HTMLSelectElement).value; const j = jobs.find(j => j.id === id); if (j) selectJobA(j) }">
<option value="">select job...</option>
<option v-for="j in jobs" :key="j.id" :value="j.id" :selected="jobA?.id === j.id">
{{ jobLabel(j) }}
</option>
</select>
</div>
<div class="stage-selector">
<select v-model="selectedStage">
<option v-for="s in stages" :key="s" :value="s">{{ s.replace(/_/g, ' ') }}</option>
</select>
</div>
<div class="job-selector">
<label>B</label>
<select @change="(e: Event) => { const id = (e.target as HTMLSelectElement).value; const j = jobs.find(j => j.id === id); if (j) selectJobB(j) }">
<option value="">select job...</option>
<option v-for="j in jobs" :key="j.id" :value="j.id" :selected="jobB?.id === j.id">
{{ jobLabel(j) }}
</option>
</select>
</div>
<button class="close-btn" @click="pipeline.closeEditor()">Close</button>
</div>
<!-- Two frame panels side by side -->
<div class="compare-frames">
<SplitPane direction="horizontal" :initial-size="1" size-mode="ratio" :min="0.3" :max="3">
<template #first>
<Panel :title="jobA ? `A: ${jobA.id.slice(0, 8)}` : 'A'" status="idle">
<div class="compare-frame-content">
<FrameRenderer :image-src="currentImage" :boxes="boxesA" :overlays="overlaysA" />
<button
v-if="jobA && selectedStage && isEditable(selectedStage)"
class="edit-btn"
title="Edit this stage"
@click="openEditor(jobA.id, selectedStage)"
>
<svg width="14" height="14" viewBox="0 0 12 12" fill="none" stroke="currentColor" stroke-width="1.5">
<circle cx="5" cy="5" r="3.5"/><line x1="7.5" y1="7.5" x2="11" y2="11"/>
</svg>
</button>
</div>
</Panel>
</template>
<template #second>
<Panel :title="jobB ? `B: ${jobB.id.slice(0, 8)}` : 'B'" status="idle">
<div class="compare-frame-content">
<FrameRenderer :image-src="currentImage" :boxes="boxesB" :overlays="overlaysB" />
<button
v-if="jobB && selectedStage && isEditable(selectedStage)"
class="edit-btn"
title="Edit this stage"
@click="openEditor(jobB.id, selectedStage)"
>
<svg width="14" height="14" viewBox="0 0 12 12" fill="none" stroke="currentColor" stroke-width="1.5">
<circle cx="5" cy="5" r="3.5"/><line x1="7.5" y1="7.5" x2="11" y2="11"/>
</svg>
</button>
</div>
</Panel>
</template>
</SplitPane>
</div>
<!-- Shared frame strip -->
<FrameStrip
v-if="frames.length > 0"
:frames="frames"
:current-index="frameIndex"
:selection-start="0"
:selection-end="frames.length - 1"
@frame-click="setFrame"
/>
<!-- Overlay controls + stats -->
<div class="compare-bottom">
<OverlayControls :overlays="overlaysA" />
<div class="compare-stats" v-if="selectedStage">
<span class="stat-side">A: <strong>{{ boxesA.length }}</strong></span>
<span class="stat-side">B: <strong>{{ boxesB.length }}</strong></span>
</div>
</div>
</div>
</template>
<style scoped>
.compare-view {
display: flex;
flex-direction: column;
height: 100%;
min-height: 0;
overflow: hidden;
}
.compare-header {
display: flex;
align-items: center;
gap: var(--space-3);
padding: var(--space-2) var(--space-3);
background: var(--surface-2);
border-bottom: var(--panel-border);
flex-shrink: 0;
}
.job-selector {
display: flex;
align-items: center;
gap: var(--space-2);
}
.job-selector label {
font-weight: 700;
font-size: var(--font-size-sm);
color: var(--text-secondary);
min-width: 14px;
}
.job-selector select,
.stage-selector select {
background: var(--surface-1);
border: 1px solid var(--surface-3);
border-radius: 3px;
color: var(--text-secondary);
font-family: var(--font-mono);
font-size: var(--font-size-sm);
padding: 3px 6px;
}
.stage-selector {
margin: 0 auto;
}
.close-btn {
background: var(--surface-3);
border: 1px solid var(--surface-3);
border-radius: 4px;
padding: 3px 10px;
color: var(--text-secondary);
font-family: var(--font-mono);
font-size: var(--font-size-sm);
cursor: pointer;
margin-left: auto;
}
.close-btn:hover {
background: var(--status-error);
color: #000;
}
.compare-frames {
flex: 1;
min-height: 0;
overflow: hidden;
}
.compare-frame-content {
height: 100%;
min-height: 0;
}
.compare-bottom {
display: flex;
align-items: center;
gap: var(--space-4);
padding: var(--space-2) var(--space-3);
background: var(--surface-2);
border-top: var(--panel-border);
flex-shrink: 0;
height: 36px;
}
.compare-stats {
display: flex;
gap: var(--space-4);
font-size: var(--font-size-sm);
color: var(--text-dim);
margin-left: auto;
}
.compare-frame-content {
position: relative;
}
.edit-btn {
position: absolute;
top: 8px;
right: 8px;
background: rgba(0, 0, 0, 0.5);
border: 1px solid var(--surface-3);
border-radius: 4px;
width: 28px;
height: 28px;
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
color: var(--text-secondary);
transition: all 0.15s;
z-index: 1;
}
.edit-btn:hover {
background: rgba(0, 0, 0, 0.7);
color: var(--text-primary);
}
</style>

View File

@@ -210,6 +210,36 @@ async function runPipeline() {
} }
} }
async function runOnTimeline(timelineId: string) {
running.value = true
error.value = null
try {
const resp = await fetch('/api/detect/run', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
timeline_id: timelineId,
profile_name: selectedProfile.value,
checkpoint: checkpoint.value,
skip_vlm: skipVlm.value,
skip_cloud: skipCloud.value,
log_level: logLevel.value,
pause_after_stage: pauseAfterStage.value,
}),
})
if (!resp.ok) {
const detail = await resp.text()
throw new Error(`${resp.status}: ${detail}`)
}
const data = await resp.json()
pipeline.setTimelineId(timelineId)
emit('job-started', data.job_id, { pauseAfterStage: pauseAfterStage.value })
} catch (e: any) {
error.value = `Failed to start pipeline: ${e.message}`
running.value = false
}
}
function formatSize(bytes: number): string { function formatSize(bytes: number): string {
if (bytes < 1024) return `${bytes}B` if (bytes < 1024) return `${bytes}B`
if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(0)}KB` if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(0)}KB`
@@ -320,7 +350,8 @@ onMounted(loadSources)
:class="['source-item', { selected: selectedTimeline === tl.id }]" :class="['source-item', { selected: selectedTimeline === tl.id }]"
@click="selectTimeline(tl)" @click="selectTimeline(tl)"
> >
<span class="source-id">{{ tl.name || tl.id.slice(0, 12) }}</span> <span class="source-id">{{ tl.id.slice(0, 8) }}</span>
<span class="timeline-name" v-if="tl.name">{{ tl.name }}</span>
<span class="source-meta"> <span class="source-meta">
<span class="source-type-badge">{{ tl.status.toUpperCase() }}</span> <span class="source-type-badge">{{ tl.status.toUpperCase() }}</span>
<span v-if="tl.frame_count" class="source-count">{{ tl.frame_count }} frames</span> <span v-if="tl.frame_count" class="source-count">{{ tl.frame_count }} frames</span>
@@ -344,6 +375,14 @@ onMounted(loadSources)
</div> </div>
</div> </div>
<div v-else-if="selectedTimeline" class="source-empty">No jobs yet</div> <div v-else-if="selectedTimeline" class="source-empty">No jobs yet</div>
<button
v-if="selectedTimeline"
class="run-again-btn"
@click="runOnTimeline(selectedTimeline)"
:disabled="running"
>
{{ running ? 'Starting...' : 'Run again on this timeline' }}
</button>
</div> </div>
<div class="source-actions"> <div class="source-actions">
@@ -441,7 +480,8 @@ onMounted(loadSources)
font-weight: 600; font-weight: 600;
} }
.source-id { font-family: var(--font-mono); } .source-id { font-family: var(--font-mono); color: var(--text-dim); font-size: 10px; }
.timeline-name { font-size: var(--font-size-sm); }
.source-meta { .source-meta {
display: flex; display: flex;
@@ -535,6 +575,20 @@ onMounted(loadSources)
.run-btn:hover { opacity: 0.9; } .run-btn:hover { opacity: 0.9; }
.run-btn:disabled { opacity: 0.5; cursor: not-allowed; } .run-btn:disabled { opacity: 0.5; cursor: not-allowed; }
.run-again-btn {
background: var(--surface-3);
color: var(--text-secondary);
border: 1px solid var(--surface-3);
border-radius: 4px;
padding: var(--space-1) var(--space-3);
font-family: var(--font-mono);
font-size: var(--font-size-sm);
cursor: pointer;
margin-top: var(--space-1);
}
.run-again-btn:hover { background: var(--status-live); color: #000; }
.run-again-btn:disabled { opacity: 0.5; cursor: not-allowed; }
.source-actions { .source-actions {
flex-shrink: 0; flex-shrink: 0;
display: flex; display: flex;

View File

@@ -80,10 +80,19 @@ export const usePipelineStore = defineStore('pipeline', () => {
editorStage.value = stage editorStage.value = stage
} }
function closeEditor() { function openCompare() {
layoutMode.value = 'normal' layoutMode.value = 'compare'
editorStage.value = null editorStage.value = null
sourceHasSelection.value = false }
function closeEditor() {
// Reload to reinitialize panels with current job context
const url = new URL(window.location.href)
url.hash = ''
if (jobId.value) {
url.searchParams.set('job', jobId.value)
}
window.location.href = url.toString()
} }
function reset() { function reset() {
@@ -105,6 +114,6 @@ export const usePipelineStore = defineStore('pipeline', () => {
checkpoints, error, layoutMode, editorStage, sourceHasSelection, checkpoints, error, layoutMode, editorStage, sourceHasSelection,
isRunning, isPaused, canReplay, isEditing, isRunning, isPaused, canReplay, isEditing,
setJob, setTimelineId, setStatus, updateNodes, setRunContext, setCheckpoints, setError, setJob, setTimelineId, setStatus, updateNodes, setRunContext, setCheckpoints, setError,
openSourceSelector, openBBoxEditor, openStageEditor, closeEditor, reset, openSourceSelector, openBBoxEditor, openStageEditor, openCompare, closeEditor, reset,
} }
}) })