phase cv 0
This commit is contained in:
163
gpu/server.py
163
gpu/server.py
@@ -52,74 +52,25 @@ def _gpu_log(job_id: str, log_level: str, stage: str, level: str, msg: str):
|
||||
emit_log(job_id, stage, level, msg, log_level=log_level)
|
||||
|
||||
|
||||
# --- Request/Response models ---
|
||||
# --- Request/Response models (generated from core/schema/models/inference.py) ---
|
||||
|
||||
class DetectRequest(BaseModel):
|
||||
image: str
|
||||
model: str | None = None
|
||||
confidence: float | None = None
|
||||
target_classes: list[str] | None = None
|
||||
|
||||
|
||||
class BBox(BaseModel):
|
||||
x: int
|
||||
y: int
|
||||
w: int
|
||||
h: int
|
||||
confidence: float
|
||||
label: str
|
||||
|
||||
|
||||
class DetectResponse(BaseModel):
|
||||
detections: list[BBox]
|
||||
|
||||
|
||||
class OCRRequest(BaseModel):
|
||||
image: str
|
||||
languages: list[str] | None = None
|
||||
|
||||
|
||||
class OCRTextResult(BaseModel):
|
||||
text: str
|
||||
confidence: float
|
||||
bbox: list[int]
|
||||
|
||||
|
||||
class OCRResponse(BaseModel):
|
||||
results: list[OCRTextResult]
|
||||
|
||||
|
||||
class PreprocessRequest(BaseModel):
|
||||
image: str
|
||||
binarize: bool = False
|
||||
deskew: bool = False
|
||||
contrast: bool = True
|
||||
|
||||
|
||||
class PreprocessResponse(BaseModel):
|
||||
image: str # base64 JPEG of processed image
|
||||
|
||||
|
||||
class VLMRequest(BaseModel):
|
||||
image: str
|
||||
prompt: str
|
||||
model: str | None = None
|
||||
|
||||
|
||||
class VLMResponse(BaseModel):
|
||||
brand: str
|
||||
confidence: float
|
||||
reasoning: str
|
||||
|
||||
|
||||
class ConfigUpdate(BaseModel):
|
||||
device: str | None = None
|
||||
yolo_model: str | None = None
|
||||
yolo_confidence: float | None = None
|
||||
vram_budget_mb: int | None = None
|
||||
strategy: str | None = None
|
||||
ocr_languages: list[str] | None = None
|
||||
ocr_min_confidence: float | None = None
|
||||
from models.inference_contract import (
|
||||
AnalyzeRegionsDebugResponse,
|
||||
AnalyzeRegionsRequest,
|
||||
AnalyzeRegionsResponse,
|
||||
BBox,
|
||||
ConfigUpdate,
|
||||
DetectRequest,
|
||||
DetectResponse,
|
||||
OCRRequest,
|
||||
OCRResponse,
|
||||
OCRTextResult,
|
||||
PreprocessRequest,
|
||||
PreprocessResponse,
|
||||
RegionBox,
|
||||
VLMRequest,
|
||||
VLMResponse,
|
||||
)
|
||||
|
||||
|
||||
# --- App ---
|
||||
@@ -281,6 +232,84 @@ def vlm(req: VLMRequest, request: Request):
|
||||
return VLMResponse(**result)
|
||||
|
||||
|
||||
@app.post("/detect_edges", response_model=AnalyzeRegionsResponse)
|
||||
def detect_edges_endpoint(req: AnalyzeRegionsRequest, request: Request):
|
||||
job_id, log_level = _job_ctx(request)
|
||||
|
||||
try:
|
||||
image = _decode_image(req.image)
|
||||
h, w = image.shape[:2]
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=f"Bad image: {e}")
|
||||
|
||||
try:
|
||||
t0 = time.monotonic()
|
||||
from models.cv.edges import detect_edges
|
||||
|
||||
edge_regions = detect_edges(
|
||||
image,
|
||||
canny_low=req.edge_canny_low,
|
||||
canny_high=req.edge_canny_high,
|
||||
hough_threshold=req.edge_hough_threshold,
|
||||
hough_min_length=req.edge_hough_min_length,
|
||||
hough_max_gap=req.edge_hough_max_gap,
|
||||
pair_max_distance=req.edge_pair_max_distance,
|
||||
pair_min_distance=req.edge_pair_min_distance,
|
||||
)
|
||||
infer_ms = (time.monotonic() - t0) * 1000
|
||||
|
||||
_gpu_log(job_id, log_level, "GPU:CV", "DEBUG",
|
||||
f"Edge analysis {w}x{h}: {infer_ms:.0f}ms → {len(edge_regions)} regions")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Region analysis failed: {e}")
|
||||
|
||||
boxes = [RegionBox(**r) for r in edge_regions]
|
||||
return AnalyzeRegionsResponse(regions=boxes)
|
||||
|
||||
|
||||
@app.post("/detect_edges/debug", response_model=AnalyzeRegionsDebugResponse)
|
||||
def detect_edges_debug_endpoint(req: AnalyzeRegionsRequest, request: Request):
|
||||
job_id, log_level = _job_ctx(request)
|
||||
|
||||
try:
|
||||
image = _decode_image(req.image)
|
||||
h, w = image.shape[:2]
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=f"Bad image: {e}")
|
||||
|
||||
try:
|
||||
t0 = time.monotonic()
|
||||
from models.cv.edges import detect_edges_debug
|
||||
|
||||
result = detect_edges_debug(
|
||||
image,
|
||||
canny_low=req.edge_canny_low,
|
||||
canny_high=req.edge_canny_high,
|
||||
hough_threshold=req.edge_hough_threshold,
|
||||
hough_min_length=req.edge_hough_min_length,
|
||||
hough_max_gap=req.edge_hough_max_gap,
|
||||
pair_max_distance=req.edge_pair_max_distance,
|
||||
pair_min_distance=req.edge_pair_min_distance,
|
||||
)
|
||||
infer_ms = (time.monotonic() - t0) * 1000
|
||||
|
||||
_gpu_log(job_id, log_level, "GPU:CV", "DEBUG",
|
||||
f"Edge debug {w}x{h}: {infer_ms:.0f}ms → {len(result['regions'])} regions, "
|
||||
f"{result['horizontal_count']} horizontals, {result['pair_count']} pairs")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Region debug analysis failed: {e}")
|
||||
|
||||
boxes = [RegionBox(**r) for r in result["regions"]]
|
||||
response = AnalyzeRegionsDebugResponse(
|
||||
regions=boxes,
|
||||
edge_overlay_b64=result["edge_overlay_b64"],
|
||||
lines_overlay_b64=result["lines_overlay_b64"],
|
||||
horizontal_count=result["horizontal_count"],
|
||||
pair_count=result["pair_count"],
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
|
||||
|
||||
Reference in New Issue
Block a user