compare view
This commit is contained in:
@@ -41,34 +41,24 @@ class ScenarioInfo(BaseModel):
|
||||
|
||||
|
||||
class ReplayRequest(BaseModel):
|
||||
timeline_id: str
|
||||
job_id: str
|
||||
start_stage: str
|
||||
config_overrides: dict | None = None
|
||||
|
||||
|
||||
class ReplayResponse(BaseModel):
|
||||
status: str
|
||||
timeline_id: str
|
||||
job_id: str
|
||||
replay_job_id: str
|
||||
start_stage: str
|
||||
detections: int = 0
|
||||
brands_found: int = 0
|
||||
|
||||
|
||||
class RetryRequest(BaseModel):
|
||||
timeline_id: str
|
||||
config_overrides: dict | None = None
|
||||
start_stage: str = "escalate_vlm"
|
||||
schedule_seconds: float | None = None # delay before execution (off-peak)
|
||||
|
||||
|
||||
class RetryResponse(BaseModel):
|
||||
status: str
|
||||
task_id: str
|
||||
timeline_id: str
|
||||
|
||||
|
||||
class ReplaySingleStageRequest(BaseModel):
|
||||
timeline_id: str
|
||||
job_id: str
|
||||
stage: str
|
||||
frame_refs: list[int] | None = None
|
||||
config_overrides: dict | None = None
|
||||
@@ -103,16 +93,24 @@ class ReplaySingleStageResponse(BaseModel):
|
||||
# --- Endpoints ---
|
||||
|
||||
@router.get("/checkpoints/{timeline_id}")
|
||||
def list_checkpoints(timeline_id: str) -> list[CheckpointInfo]:
|
||||
"""List available checkpoint stages for a job."""
|
||||
from core.detect.checkpoint import list_checkpoints as _list
|
||||
def list_checkpoints_endpoint(timeline_id: str) -> list[CheckpointInfo]:
|
||||
"""List available checkpoint stages for a timeline."""
|
||||
from core.detect.checkpoint.storage import get_checkpoints_for_timeline
|
||||
|
||||
try:
|
||||
stages = _list(timeline_id)
|
||||
checkpoints = get_checkpoints_for_timeline(timeline_id)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=404, detail=f"No checkpoints for job {timeline_id}: {e}")
|
||||
raise HTTPException(status_code=404, detail=f"No checkpoints for timeline {timeline_id}: {e}")
|
||||
|
||||
result = [CheckpointInfo(stage=s) for s in stages]
|
||||
result = [
|
||||
CheckpointInfo(
|
||||
stage=c["stage_name"],
|
||||
is_scenario=c.get("is_scenario", False),
|
||||
scenario_label=c.get("scenario_label", ""),
|
||||
)
|
||||
for c in checkpoints
|
||||
if c["stage_name"]
|
||||
]
|
||||
return result
|
||||
|
||||
|
||||
@@ -211,11 +209,11 @@ def list_scenarios_endpoint():
|
||||
@router.post("/replay", response_model=ReplayResponse)
|
||||
def replay(req: ReplayRequest):
|
||||
"""Replay pipeline from a specific stage with optional config overrides."""
|
||||
from core.detect.checkpoint import replay_from
|
||||
from core.detect.checkpoint.replay import replay_from
|
||||
|
||||
try:
|
||||
result = replay_from(
|
||||
timeline_id=req.timeline_id,
|
||||
job_id=req.job_id,
|
||||
start_stage=req.start_stage,
|
||||
config_overrides=req.config_overrides,
|
||||
)
|
||||
@@ -230,7 +228,8 @@ def replay(req: ReplayRequest):
|
||||
|
||||
response = ReplayResponse(
|
||||
status="completed",
|
||||
timeline_id=req.timeline_id,
|
||||
job_id=req.job_id,
|
||||
replay_job_id=result.get("job_id", ""),
|
||||
start_stage=req.start_stage,
|
||||
detections=len(detections),
|
||||
brands_found=brands_found,
|
||||
@@ -238,29 +237,6 @@ def replay(req: ReplayRequest):
|
||||
return response
|
||||
|
||||
|
||||
@router.post("/retry", response_model=RetryResponse)
|
||||
def retry(req: RetryRequest):
|
||||
"""Queue an async retry of unresolved candidates with different config."""
|
||||
from core.detect.checkpoint.tasks import retry_candidates
|
||||
|
||||
kwargs = {
|
||||
"timeline_id": req.timeline_id,
|
||||
"config_overrides": req.config_overrides,
|
||||
"start_stage": req.start_stage,
|
||||
}
|
||||
|
||||
if req.schedule_seconds:
|
||||
task = retry_candidates.apply_async(kwargs=kwargs, countdown=req.schedule_seconds)
|
||||
else:
|
||||
task = retry_candidates.delay(**kwargs)
|
||||
|
||||
response = RetryResponse(
|
||||
status="queued",
|
||||
task_id=task.id,
|
||||
timeline_id=req.timeline_id,
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
@router.post("/replay-stage", response_model=ReplaySingleStageResponse)
|
||||
def replay_single_stage(req: ReplaySingleStageRequest):
|
||||
@@ -269,7 +245,7 @@ def replay_single_stage(req: ReplaySingleStageRequest):
|
||||
|
||||
try:
|
||||
result = _replay(
|
||||
timeline_id=req.timeline_id,
|
||||
job_id=req.job_id,
|
||||
stage=req.stage,
|
||||
frame_refs=req.frame_refs,
|
||||
config_overrides=req.config_overrides,
|
||||
@@ -324,6 +300,151 @@ def _gpu_url() -> str:
|
||||
return url.rstrip("/")
|
||||
|
||||
|
||||
# --- Overlay cache — save/load debug overlay images ---
|
||||
|
||||
|
||||
class SaveOverlaysRequest(BaseModel):
|
||||
timeline_id: str
|
||||
job_id: str
|
||||
stage: str
|
||||
seq: int
|
||||
overlays: dict[str, str] # {overlay_key: base64_png}
|
||||
|
||||
|
||||
@router.post("/overlays")
|
||||
def save_overlays_endpoint(req: SaveOverlaysRequest):
|
||||
"""Save debug overlay images to blob storage cache."""
|
||||
from core.detect.checkpoint.frames import save_overlays
|
||||
|
||||
save_overlays(req.timeline_id, req.job_id, req.stage, req.seq, req.overlays)
|
||||
return {"status": "saved", "count": len(req.overlays)}
|
||||
|
||||
|
||||
@router.get("/overlays/{timeline_id}/{job_id}/{stage}/{seq}")
|
||||
def load_overlays_endpoint(timeline_id: str, job_id: str, stage: str, seq: int):
|
||||
"""Load cached debug overlay images."""
|
||||
from core.detect.checkpoint.frames import load_overlays
|
||||
|
||||
overlays = load_overlays(timeline_id, job_id, stage, seq)
|
||||
return {"overlays": overlays or {}}
|
||||
|
||||
|
||||
def _generate_debug_overlays(job_id: str, stage: str, frame) -> dict[str, str] | None:
|
||||
"""Generate debug overlay images for a single frame."""
|
||||
import os
|
||||
|
||||
inference_url = os.environ.get("INFERENCE_URL")
|
||||
|
||||
if stage == "detect_edges":
|
||||
from core.detect.profile import get_profile, get_stage_config
|
||||
from core.detect.stages.models import RegionAnalysisConfig
|
||||
from core.db.connection import get_session
|
||||
from core.db.job import get_job
|
||||
from uuid import UUID
|
||||
|
||||
with get_session() as session:
|
||||
job = get_job(session, UUID(job_id))
|
||||
if not job:
|
||||
return None
|
||||
|
||||
profile = get_profile(job.profile_name)
|
||||
config = RegionAnalysisConfig(**get_stage_config(profile, "detect_edges"))
|
||||
|
||||
if inference_url:
|
||||
from core.detect.inference import InferenceClient
|
||||
client = InferenceClient(base_url=inference_url, job_id=job_id)
|
||||
dr = client.detect_edges_debug(
|
||||
image=frame.image,
|
||||
edge_canny_low=config.edge_canny_low,
|
||||
edge_canny_high=config.edge_canny_high,
|
||||
edge_hough_threshold=config.edge_hough_threshold,
|
||||
edge_hough_min_length=config.edge_hough_min_length,
|
||||
edge_hough_max_gap=config.edge_hough_max_gap,
|
||||
edge_pair_max_distance=config.edge_pair_max_distance,
|
||||
edge_pair_min_distance=config.edge_pair_min_distance,
|
||||
)
|
||||
return {
|
||||
"edge_overlay_b64": dr.edge_overlay_b64,
|
||||
"lines_overlay_b64": dr.lines_overlay_b64,
|
||||
}
|
||||
else:
|
||||
from core.detect.stages.edge_detector import _load_cv_edges
|
||||
edges_mod = _load_cv_edges()
|
||||
dr = edges_mod.detect_edges_debug(
|
||||
frame.image,
|
||||
canny_low=config.edge_canny_low,
|
||||
canny_high=config.edge_canny_high,
|
||||
hough_threshold=config.edge_hough_threshold,
|
||||
hough_min_length=config.edge_hough_min_length,
|
||||
hough_max_gap=config.edge_hough_max_gap,
|
||||
pair_max_distance=config.edge_pair_max_distance,
|
||||
pair_min_distance=config.edge_pair_min_distance,
|
||||
)
|
||||
return {
|
||||
"edge_overlay_b64": dr["edge_overlay_b64"],
|
||||
"lines_overlay_b64": dr["lines_overlay_b64"],
|
||||
}
|
||||
|
||||
elif stage == "field_segmentation":
|
||||
from core.detect.profile import get_profile, get_stage_config
|
||||
from core.detect.stages.models import FieldSegmentationConfig
|
||||
from core.db.connection import get_session
|
||||
from core.db.job import get_job
|
||||
from uuid import UUID
|
||||
|
||||
with get_session() as session:
|
||||
job = get_job(session, UUID(job_id))
|
||||
if not job:
|
||||
return None
|
||||
|
||||
profile = get_profile(job.profile_name)
|
||||
config = FieldSegmentationConfig(**get_stage_config(profile, "field_segmentation"))
|
||||
|
||||
if inference_url:
|
||||
import httpx, json, base64, io
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
|
||||
buf = io.BytesIO()
|
||||
Image.fromarray(frame.image).save(buf, format="JPEG", quality=85)
|
||||
img_b64 = base64.b64encode(buf.getvalue()).decode()
|
||||
|
||||
resp = httpx.post(
|
||||
f"{inference_url.rstrip('/')}/segment_field/debug",
|
||||
json={
|
||||
"image_b64": img_b64,
|
||||
"hue_low": config.hue_low,
|
||||
"hue_high": config.hue_high,
|
||||
"sat_low": config.sat_low,
|
||||
"sat_high": config.sat_high,
|
||||
"val_low": config.val_low,
|
||||
"val_high": config.val_high,
|
||||
"morph_kernel": config.morph_kernel,
|
||||
"min_area_ratio": config.min_area_ratio,
|
||||
},
|
||||
timeout=30.0,
|
||||
)
|
||||
if resp.status_code == 200:
|
||||
data = resp.json()
|
||||
return {"mask_overlay_b64": data.get("mask_b64", "")}
|
||||
|
||||
return None
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@router.get("/overlays/{timeline_id}/{job_id}/{stage}")
|
||||
def list_overlay_frames_endpoint(timeline_id: str, job_id: str, stage: str):
|
||||
"""List frame sequences that have cached overlays."""
|
||||
from core.detect.checkpoint.frames import list_overlay_frames
|
||||
|
||||
seqs = list_overlay_frames(timeline_id, job_id, stage)
|
||||
return {"frames": seqs}
|
||||
|
||||
|
||||
# --- GPU proxy — thin passthrough to inference server for interactive editor ---
|
||||
|
||||
|
||||
@router.post("/gpu/detect_edges")
|
||||
async def gpu_detect_edges(request: Request):
|
||||
"""Proxy to GPU inference server — browser can't reach it directly."""
|
||||
|
||||
Reference in New Issue
Block a user