This commit is contained in:
2026-03-30 07:22:14 -03:00
parent d0707333fd
commit 4220b0418e
182 changed files with 3668 additions and 5231 deletions

View File

@@ -58,32 +58,30 @@ def write_config(update: ConfigUpdate):
@router.get("/config/profiles")
def list_profiles():
def get_profiles():
"""List available detection profiles."""
from detect.profiles import _PROFILES
return [{"name": name} for name in _PROFILES]
from core.detect.profile import list_profiles as _list
return [{"name": name} for name in _list()]
@router.get("/config/profiles/{profile_name}/pipeline")
def get_pipeline_config(profile_name: str):
"""Return the pipeline composition for a profile."""
from detect.profiles import get_profile
from core.detect.profile import get_profile
from fastapi import HTTPException
from dataclasses import asdict
try:
profile = get_profile(profile_name)
except ValueError:
raise HTTPException(status_code=404, detail=f"Unknown profile: {profile_name}")
config = profile.pipeline_config()
return asdict(config)
return profile["pipeline"]
@router.get("/config/stages", response_model=list[StageConfigInfo])
def list_stage_configs():
"""Return the stage palette with config field metadata for the editor."""
from detect.stages import list_stages
from core.detect.stages import list_stages
result = []
for stage in list_stages():
@@ -95,7 +93,7 @@ def list_stage_configs():
@router.get("/config/stages/{stage_name}", response_model=StageConfigInfo)
def get_stage_config(stage_name: str):
"""Return config field metadata for a single stage."""
from detect.stages import get_stage
from core.detect.stages import get_stage
try:
stage = get_stage(stage_name)

View File

@@ -105,7 +105,7 @@ class ReplaySingleStageResponse(BaseModel):
@router.get("/checkpoints/{timeline_id}")
def list_checkpoints(timeline_id: str) -> list[CheckpointInfo]:
"""List available checkpoint stages for a job."""
from detect.checkpoint import list_checkpoints as _list
from core.detect.checkpoint import list_checkpoints as _list
try:
stages = _list(timeline_id)
@@ -139,10 +139,10 @@ class CheckpointData(BaseModel):
def get_checkpoint_data(timeline_id: str, stage: str):
"""Load checkpoint frames + metadata for the editor UI."""
from uuid import UUID
from core.db.tables import Timeline, Checkpoint
from core.db.models import Timeline, Checkpoint
from core.db.connection import get_session
from core.db.checkpoint import list_checkpoints
from detect.checkpoint.frames import load_frames_b64
from core.detect.checkpoint.frames import load_frames_b64
with get_session() as session:
timeline = session.get(Timeline, UUID(timeline_id))
@@ -184,7 +184,7 @@ def get_checkpoint_data(timeline_id: str, stage: str):
@router.get("/scenarios", response_model=list[ScenarioInfo])
def list_scenarios_endpoint():
"""List all available scenarios (bookmarked checkpoints)."""
from core.db.tables import Timeline
from core.db.models import Timeline
from core.db.connection import get_session
from core.db.checkpoint import list_scenarios
@@ -212,7 +212,7 @@ def list_scenarios_endpoint():
@router.post("/replay", response_model=ReplayResponse)
def replay(req: ReplayRequest):
"""Replay pipeline from a specific stage with optional config overrides."""
from detect.checkpoint import replay_from
from core.detect.checkpoint import replay_from
try:
result = replay_from(
@@ -242,7 +242,7 @@ def replay(req: ReplayRequest):
@router.post("/retry", response_model=RetryResponse)
def retry(req: RetryRequest):
"""Queue an async retry of unresolved candidates with different config."""
from detect.checkpoint.tasks import retry_candidates
from core.detect.checkpoint.tasks import retry_candidates
kwargs = {
"timeline_id": req.timeline_id,
@@ -266,7 +266,7 @@ def retry(req: RetryRequest):
@router.post("/replay-stage", response_model=ReplaySingleStageResponse)
def replay_single_stage(req: ReplaySingleStageRequest):
"""Replay a single stage on specific frames — fast path for interactive tuning."""
from detect.checkpoint.replay import replay_single_stage as _replay
from core.detect.checkpoint.replay import replay_single_stage as _replay
try:
result = _replay(
@@ -361,3 +361,41 @@ async def gpu_detect_edges_debug(request: Request):
media_type="application/json")
except Exception as e:
raise HTTPException(status_code=502, detail=f"GPU server unreachable: {e}")
@router.post("/gpu/segment_field")
async def gpu_segment_field(request: Request):
"""Proxy to GPU inference server — field segmentation."""
import httpx
body = await request.body()
try:
async with httpx.AsyncClient(timeout=30.0) as client:
resp = await client.post(
f"{_gpu_url()}/segment_field",
content=body,
headers={"Content-Type": "application/json"},
)
return Response(content=resp.content, status_code=resp.status_code,
media_type="application/json")
except Exception as e:
raise HTTPException(status_code=502, detail=f"GPU server unreachable: {e}")
@router.post("/gpu/segment_field/debug")
async def gpu_segment_field_debug(request: Request):
"""Proxy to GPU inference server — field segmentation with debug overlay."""
import httpx
body = await request.body()
try:
async with httpx.AsyncClient(timeout=30.0) as client:
resp = await client.post(
f"{_gpu_url()}/segment_field/debug",
content=body,
headers={"Content-Type": "application/json"},
)
return Response(content=resp.content, status_code=resp.status_code,
media_type="application/json")
except Exception as e:
raise HTTPException(status_code=502, detail=f"GPU server unreachable: {e}")

View File

@@ -60,9 +60,9 @@ def _resolve_video_path(video_path: str) -> str:
@router.post("/run", response_model=RunResponse)
def run_pipeline(req: RunRequest):
"""Launch a detection pipeline run on a source chunk."""
from detect import emit
from detect.graph import get_pipeline
from detect.state import DetectState
from core.detect import emit
from core.detect.graph import get_pipeline
from core.detect.state import DetectState
local_path = _resolve_video_path(req.video_path)
job_id = str(uuid.uuid4())
@@ -79,7 +79,7 @@ def run_pipeline(req: RunRequest):
# Clear any stale events from a previous run with same job_id
from core.events import _get_redis
from detect.events import DETECT_EVENTS_PREFIX
from core.detect.events import DETECT_EVENTS_PREFIX
r = _get_redis()
r.delete(f"{DETECT_EVENTS_PREFIX}:{job_id}")
@@ -97,7 +97,7 @@ def run_pipeline(req: RunRequest):
source_asset_id=req.source_asset_id,
)
from detect.graph import (
from core.detect.graph import (
PipelineCancelled, set_cancel_check, clear_cancel_check,
init_pause, clear_pause,
)
@@ -117,7 +117,7 @@ def run_pipeline(req: RunRequest):
emit.job_complete(job_id, {"status": "cancelled"})
except Exception as e:
logger.exception("Pipeline run %s failed: %s", job_id, e)
from detect.graph import _node_states, NODES
from core.detect.graph import _node_states, NODES
if job_id in _node_states:
states = _node_states[job_id]
for node in reversed(NODES):
@@ -145,7 +145,7 @@ def run_pipeline(req: RunRequest):
@router.post("/stop/{job_id}")
def stop_pipeline(job_id: str):
"""Stop a running pipeline. Signals cancellation; the thread checks on next stage."""
from detect import emit
from core.detect import emit
if job_id not in _running_jobs:
raise HTTPException(status_code=404, detail=f"No running pipeline: {job_id}")
@@ -158,7 +158,7 @@ def stop_pipeline(job_id: str):
@router.post("/pause/{job_id}")
def pause(job_id: str):
"""Pause a running pipeline after the current stage completes."""
from detect.graph import pause_pipeline
from core.detect.graph import pause_pipeline
if job_id not in _running_jobs:
raise HTTPException(status_code=404, detail=f"No running pipeline: {job_id}")
@@ -170,7 +170,7 @@ def pause(job_id: str):
@router.post("/resume/{job_id}")
def resume(job_id: str):
"""Resume a paused pipeline."""
from detect.graph import resume_pipeline
from core.detect.graph import resume_pipeline
if job_id not in _running_jobs:
raise HTTPException(status_code=404, detail=f"No running pipeline: {job_id}")
@@ -182,7 +182,7 @@ def resume(job_id: str):
@router.post("/step/{job_id}")
def step(job_id: str):
"""Run one stage then pause again."""
from detect.graph import step_pipeline
from core.detect.graph import step_pipeline
if job_id not in _running_jobs:
raise HTTPException(status_code=404, detail=f"No running pipeline: {job_id}")
@@ -194,7 +194,7 @@ def step(job_id: str):
@router.post("/pause-after-stage/{job_id}")
def toggle_pause_after_stage(job_id: str, enabled: bool = True):
"""Toggle pause-after-each-stage mode."""
from detect.graph import set_pause_after_stage
from core.detect.graph import set_pause_after_stage
if job_id not in _running_jobs:
raise HTTPException(status_code=404, detail=f"No running pipeline: {job_id}")
@@ -206,7 +206,7 @@ def toggle_pause_after_stage(job_id: str, enabled: bool = True):
@router.get("/status/{job_id}")
def pipeline_status(job_id: str):
"""Get pipeline run status."""
from detect.graph import is_paused
from core.detect.graph import is_paused
running = job_id in _running_jobs
paused = is_paused(job_id)
@@ -224,11 +224,23 @@ def pipeline_status(job_id: str):
return {"status": status, "job_id": job_id}
@router.get("/timeline/{job_id}")
def get_timeline_for_job(job_id: str):
"""Get the timeline_id for a running or completed job."""
from core.detect.checkpoint.runner_bridge import get_timeline_id
tid = get_timeline_id(job_id)
if tid is None:
raise HTTPException(status_code=404, detail=f"No timeline for job: {job_id}")
return {"timeline_id": tid, "job_id": job_id}
@router.post("/clear/{job_id}")
def clear_pipeline(job_id: str):
"""Clear events for a job from Redis."""
from core.events import _get_redis
from detect.events import DETECT_EVENTS_PREFIX
from core.detect.events import DETECT_EVENTS_PREFIX
r = _get_redis()
r.delete(f"{DETECT_EVENTS_PREFIX}:{job_id}")

View File

@@ -17,7 +17,7 @@ from fastapi import APIRouter
from starlette.responses import StreamingResponse
from core.events import poll_events
from detect.events import DETECT_EVENTS_PREFIX, TERMINAL_EVENTS
from core.detect.events import DETECT_EVENTS_PREFIX, TERMINAL_EVENTS
logger = logging.getLogger(__name__)