[project] name = "mpr-gpu" version = "0.1.0" description = "MPR remote inference server (GPU)" requires-python = ">=3.11" dependencies = [ "fastapi>=0.109.0", "uvicorn[standard]>=0.27.0", "rapidfuzz>=3.0.0", "Pillow>=10.0.0", "redis>=5.0.0", "ultralytics>=8.0.0", "paddleocr>=3.0.0", "paddlepaddle-gpu==3.0.0", "transformers>=4.40.0,<5", "accelerate>=0.27.0", "torch", "torchvision", "opencv-python-headless>=4.8.0", ] # RTX 3080 / CUDA toolkit 12.8 — cu126 wheels are forward-compatible # (no cu128 wheels yet on either index). Mixing PyPI torch with CUDA 12.8 # causes NCCL symbol errors, so the explicit index pins prevent uv from # pulling torch transitively from PyPI via ultralytics. [tool.uv.sources] torch = { index = "pytorch-cu126" } torchvision = { index = "pytorch-cu126" } paddlepaddle-gpu = { index = "paddle-cu126" } [[tool.uv.index]] name = "pytorch-cu126" url = "https://download.pytorch.org/whl/cu126" explicit = true [[tool.uv.index]] name = "paddle-cu126" url = "https://www.paddlepaddle.org.cn/packages/stable/cu126/" explicit = true [tool.uv] package = false