Files
mediaproc/tests/detect/manual/test_escalation_e2e.py
2026-03-26 07:40:14 -03:00

281 lines
11 KiB
Python

#!/usr/bin/env python3
"""
Push a full pipeline simulation with escalation events.
Exercises all stages including VLM and cloud escalation, with progressive
stats showing cost accumulating. Tests all panels: pipeline graph, funnel,
timeline, cost stats, brand table, and log.
Usage:
python tests/detect/manual/test_escalation_e2e.py [--job JOB_ID] [--port PORT] [--delay SECS]
Opens: http://mpr.local.ar/detection/?job=<JOB_ID>
"""
import argparse
import json
import logging
import time
from datetime import datetime, timezone
import redis
logging.basicConfig(level=logging.INFO, format="%(levelname)-7s %(name)s%(message)s")
logger = logging.getLogger(__name__)
NODES = ["extract_frames", "filter_scenes", "detect_objects", "preprocess",
"run_ocr", "match_brands", "escalate_vlm", "escalate_cloud", "compile_report"]
def ts():
return datetime.now(timezone.utc).isoformat()
RUN_CONTEXT = {}
def set_run_context(run_id: str, parent_job_id: str, run_type: str = "initial"):
RUN_CONTEXT.update({"run_id": run_id, "parent_job_id": parent_job_id, "run_type": run_type})
def push(r, key, event):
event["ts"] = event.get("ts", ts())
event.update(RUN_CONTEXT)
r.rpush(key, json.dumps(event))
return event
def push_graph(r, key, active_node, status, delay):
nodes = []
for n in NODES:
if n == active_node:
nodes.append({"id": n, "status": status})
elif NODES.index(n) < NODES.index(active_node):
nodes.append({"id": n, "status": "done"})
else:
nodes.append({"id": n, "status": "pending"})
push(r, key, {"event": "graph_update", "nodes": nodes})
time.sleep(delay)
def push_stats(r, key, **fields):
base = {
"event": "stats_update",
"frames_extracted": 0, "frames_after_scene_filter": 0,
"regions_detected": 0, "regions_resolved_by_ocr": 0,
"regions_escalated_to_local_vlm": 0, "regions_escalated_to_cloud_llm": 0,
"cloud_llm_calls": 0, "processing_time_seconds": 0, "estimated_cloud_cost_usd": 0,
}
base.update(fields)
push(r, key, base)
_bbox_idx = 0
def push_detection(r, key, brand, conf, source, timestamp, frame_ref, delay):
global _bbox_idx
# Spread fake bboxes across the frame so they don't overlap
col = _bbox_idx % 4
row = _bbox_idx // 4
bbox = {"x": 50 + col * 200, "y": 50 + row * 120, "w": 160, "h": 80}
_bbox_idx += 1
push(r, key, {
"event": "detection",
"brand": brand, "confidence": conf, "source": source,
"timestamp": timestamp, "duration": 0.5,
"content_type": "soccer_broadcast", "frame_ref": frame_ref,
"bbox": bbox,
})
logger.info(" [%s] %s %.2f t=%.1fs", source, brand, conf, timestamp)
time.sleep(delay * 0.3)
def main():
parser = argparse.ArgumentParser()
import time as _time
default_job = f"escalation-{int(_time.time()) % 100000}"
parser.add_argument("--job", default=default_job)
parser.add_argument("--port", type=int, default=6382)
parser.add_argument("--delay", type=float, default=0.5)
args = parser.parse_args()
r = redis.Redis(port=args.port, decode_responses=True)
key = f"detect_events:{args.job}"
r.delete(key)
delay = args.delay
run_id = f"{args.job[:8]}-r1"
set_run_context(run_id=run_id, parent_job_id=args.job, run_type="initial")
logger.info("Full escalation pipeline simulation → %s", key)
logger.info("Run: %s (parent: %s)", run_id, args.job)
logger.info("Open: http://mpr.local.ar/detection/?job=%s", args.job)
input("\nPress Enter to start...")
# --- Extract frames ---
push_graph(r, key, "extract_frames", "running", delay)
push(r, key, {"event": "log", "level": "INFO", "stage": "FrameExtractor",
"msg": "Extracting frames: match_clip.mp4 (90.0s, 1920x1080, fps=2)"})
time.sleep(delay)
push_stats(r, key, frames_extracted=180, processing_time_seconds=4.5)
push_graph(r, key, "extract_frames", "done", delay)
# --- Scene filter ---
push_graph(r, key, "filter_scenes", "running", delay)
push_stats(r, key, frames_extracted=180, frames_after_scene_filter=52, processing_time_seconds=6.8)
push(r, key, {"event": "log", "level": "INFO", "stage": "SceneFilter",
"msg": "Kept 52 frames (71% reduction)"})
push_graph(r, key, "filter_scenes", "done", delay)
# --- YOLO detect ---
push_graph(r, key, "detect_objects", "running", delay)
push(r, key, {"event": "log", "level": "INFO", "stage": "YOLODetector",
"msg": "Running yolov8n on 52 frames"})
time.sleep(delay)
# Push a sample frame with YOLO boxes
import base64, io
from PIL import Image as PILImage, ImageDraw
frame_img = PILImage.new("RGB", (960, 540), "#1a1a2e")
draw = ImageDraw.Draw(frame_img)
draw.rectangle([40, 440, 900, 520], outline="#444", width=2)
draw.text((100, 460), "SPONSOR BOARD AREA", fill="#666")
draw.rectangle([350, 150, 610, 380], outline="#333", width=1)
draw.text((400, 200), "PLAYER", fill="#555")
buf = io.BytesIO()
frame_img.save(buf, "JPEG")
frame_b64 = base64.b64encode(buf.getvalue()).decode()
yolo_boxes = [
{"x": 40, "y": 440, "w": 860, "h": 80, "confidence": 0.92,
"label": "ad_board", "stage": "detect_objects", "source": "yolo"},
{"x": 350, "y": 150, "w": 260, "h": 230, "confidence": 0.87,
"label": "person", "stage": "detect_objects", "source": "yolo"},
{"x": 700, "y": 30, "w": 200, "h": 60, "confidence": 0.78,
"label": "scoreboard", "stage": "detect_objects", "source": "yolo"},
]
push(r, key, {"event": "frame_update", "frame_ref": 25, "timestamp": 12.5,
"jpeg_b64": frame_b64, "boxes": yolo_boxes})
time.sleep(delay)
push_stats(r, key, frames_extracted=180, frames_after_scene_filter=52,
regions_detected=41, processing_time_seconds=14.2)
push_graph(r, key, "detect_objects", "done", delay)
# --- OCR ---
push_graph(r, key, "run_ocr", "running", delay)
push(r, key, {"event": "log", "level": "INFO", "stage": "OCRStage",
"msg": "Running OCR on 41 regions (mode=remote)"})
time.sleep(delay)
push_stats(r, key, frames_extracted=180, frames_after_scene_filter=52,
regions_detected=41, regions_resolved_by_ocr=30, processing_time_seconds=21.5)
push_graph(r, key, "run_ocr", "done", delay)
# --- Brand matching ---
push_graph(r, key, "match_brands", "running", delay)
push(r, key, {"event": "log", "level": "INFO", "stage": "BrandResolver",
"msg": "Matching 30 candidates against 12 brands (fuzzy_threshold=75)"})
time.sleep(delay)
# OCR detections
ocr_brands = [
("Nike", 0.97, 2.0, 4), ("Nike", 0.95, 5.5, 11), ("Emirates", 0.92, 8.0, 16),
("Adidas", 0.89, 12.0, 24), ("Coca-Cola", 0.85, 18.0, 36),
("Nike", 0.94, 22.0, 44), ("Emirates", 0.88, 28.0, 56),
("Adidas", 0.91, 32.0, 64), ("Nike", 0.96, 38.0, 76),
("Emirates", 0.90, 42.0, 84), ("Coca-Cola", 0.87, 48.0, 96),
("Nike", 0.93, 52.0, 104), ("Adidas", 0.90, 58.0, 116),
]
for brand, conf, ts_val, fref in ocr_brands:
push_detection(r, key, brand, conf, "ocr", ts_val, fref, delay)
push(r, key, {"event": "log", "level": "INFO", "stage": "BrandResolver",
"msg": "Exact: 10, Fuzzy: 3, Unresolved: 11 → VLM"})
push_graph(r, key, "match_brands", "done", delay)
# --- VLM escalation ---
push_graph(r, key, "escalate_vlm", "running", delay)
push(r, key, {"event": "log", "level": "INFO", "stage": "VLMLocal",
"msg": "Processing 11 unresolved crops with moondream2"})
time.sleep(delay)
vlm_brands = [
("Mastercard", 0.78, 15.0, 30), ("Santander", 0.74, 25.0, 50),
("Qatar Airways", 0.81, 35.0, 70), ("Heineken", 0.76, 45.0, 90),
("Lay's", 0.72, 55.0, 110),
]
for brand, conf, ts_val, fref in vlm_brands:
push_detection(r, key, brand, conf, "local_vlm", ts_val, fref, delay)
push_stats(r, key, frames_extracted=180, frames_after_scene_filter=52,
regions_detected=41, regions_resolved_by_ocr=30,
regions_escalated_to_local_vlm=11, processing_time_seconds=38.7,
estimated_cloud_cost_usd=0)
push(r, key, {"event": "log", "level": "INFO", "stage": "VLMLocal",
"msg": "VLM resolved 5, unresolved 6 → cloud"})
push_graph(r, key, "escalate_vlm", "done", delay)
# --- Cloud escalation ---
push_graph(r, key, "escalate_cloud", "running", delay)
push(r, key, {"event": "log", "level": "INFO", "stage": "CloudLLM",
"msg": "Escalating 6 crops to groq (llama-3.2-90b-vision)"})
time.sleep(delay)
cloud_brands = [
("Pepsi", 0.68, 10.0, 20),
("Gazprom", 0.65, 40.0, 80),
]
for brand, conf, ts_val, fref in cloud_brands:
push_detection(r, key, brand, conf, "cloud_llm", ts_val, fref, delay)
push_stats(r, key, frames_extracted=180, frames_after_scene_filter=52,
regions_detected=41, regions_resolved_by_ocr=30,
regions_escalated_to_local_vlm=11, regions_escalated_to_cloud_llm=6,
cloud_llm_calls=6, processing_time_seconds=45.2,
estimated_cloud_cost_usd=0.0) # groq free tier
push(r, key, {"event": "log", "level": "WARNING", "stage": "CloudLLM",
"msg": "4 crops unresolved after cloud — likely not brands"})
push(r, key, {"event": "log", "level": "INFO", "stage": "CloudLLM",
"msg": "Cloud resolved 2/6 — cost $0.0000 (groq free tier)"})
push_graph(r, key, "escalate_cloud", "done", delay)
# --- Compile report ---
push_graph(r, key, "compile_report", "running", delay)
total_brands = len(set(b[0] for b in ocr_brands + vlm_brands + cloud_brands))
total_dets = len(ocr_brands) + len(vlm_brands) + len(cloud_brands)
push(r, key, {"event": "log", "level": "INFO", "stage": "Aggregator",
"msg": f"Report: {total_brands} brands, {total_dets} detections (merged from {total_dets} raw)"})
push(r, key, {"event": "job_complete", "job_id": args.job, "report": {
"video_source": "match_clip.mp4",
"content_type": "soccer_broadcast",
"duration_seconds": 90.0,
"brands": {
"Nike": {"total_appearances": 5, "avg_confidence": 0.95},
"Emirates": {"total_appearances": 3, "avg_confidence": 0.90},
"Adidas": {"total_appearances": 3, "avg_confidence": 0.90},
"Coca-Cola": {"total_appearances": 2, "avg_confidence": 0.86},
"Mastercard": {"total_appearances": 1, "avg_confidence": 0.78},
"Santander": {"total_appearances": 1, "avg_confidence": 0.74},
"Qatar Airways": {"total_appearances": 1, "avg_confidence": 0.81},
"Heineken": {"total_appearances": 1, "avg_confidence": 0.76},
"Lay's": {"total_appearances": 1, "avg_confidence": 0.72},
"Pepsi": {"total_appearances": 1, "avg_confidence": 0.68},
"Gazprom": {"total_appearances": 1, "avg_confidence": 0.65},
},
}})
push_graph(r, key, "compile_report", "done", delay)
logger.info("Done. %d brands, %d detections across ocr/vlm/cloud.", total_brands, total_dets)
logger.info("Check: pipeline graph (all green), timeline (3 source colors),")
logger.info(" cost panel (escalation ratio), brand table (source column).")
if __name__ == "__main__":
main()