phase 4
This commit is contained in:
43
core/detect/state.py
Normal file
43
core/detect/state.py
Normal file
@@ -0,0 +1,43 @@
|
||||
"""
|
||||
LangGraph state definition for the detection pipeline.
|
||||
|
||||
This TypedDict flows through all graph nodes. Each node reads what
|
||||
it needs and writes its outputs. LangGraph manages the state transitions.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TypedDict
|
||||
|
||||
from core.detect.models import BoundingBox, BrandDetection, DetectionReport, Frame, PipelineStats, TextCandidate
|
||||
|
||||
|
||||
class DetectState(TypedDict, total=False):
|
||||
# Input
|
||||
video_path: str
|
||||
job_id: str
|
||||
profile_name: str
|
||||
source_asset_id: str # UUID of the source MediaAsset
|
||||
|
||||
# Stage outputs
|
||||
frames: list[Frame]
|
||||
filtered_frames: list[Frame]
|
||||
field_masks: dict # {seq: np.ndarray} — pitch mask per frame
|
||||
field_boundaries: dict # {seq: [(x,y), ...]} — pitch boundary per frame
|
||||
field_coverage: dict # {seq: float} — pitch coverage ratio per frame
|
||||
edge_regions_by_frame: dict[int, list[BoundingBox]]
|
||||
boxes_by_frame: dict[int, list[BoundingBox]]
|
||||
preprocessed_crops: dict # "{frame_seq}_{box_idx}" → np.ndarray
|
||||
text_candidates: list[TextCandidate]
|
||||
unresolved_candidates: list[TextCandidate]
|
||||
detections: list[BrandDetection]
|
||||
report: DetectionReport
|
||||
|
||||
# Session brands (accumulated during the run, persisted to DB)
|
||||
session_brands: dict # {normalized_name: canonical_name}
|
||||
|
||||
# Running stats (updated by each stage)
|
||||
stats: PipelineStats
|
||||
|
||||
# Config overrides for replay (merged into profile configs dict)
|
||||
config_overrides: dict
|
||||
Reference in New Issue
Block a user