- root pyproject.toml replaces requirements.txt and requirements-worker.txt (worker = root + ffmpeg-python which root already had); test deps moved to [dependency-groups] dev - core/gpu/pyproject.toml replaces core/gpu/requirements.txt; uses [tool.uv.sources] to pin torch/torchvision and paddlepaddle-gpu to their CUDA index URLs, replacing the manual reinstall dance from old comments - Dockerfiles use uv sync --frozen against uv.lock for reproducible builds; PATH includes /app/.venv/bin so k8s manifests' bare uvicorn/celery commands resolve without wrapping in uv run - core/gpu/run.sh local mode now does uv sync + uv run python server.py; errors out cleanly if uv is missing
43 lines
1.1 KiB
TOML
43 lines
1.1 KiB
TOML
[project]
|
|
name = "mpr-gpu"
|
|
version = "0.1.0"
|
|
description = "MPR remote inference server (GPU)"
|
|
requires-python = ">=3.11"
|
|
dependencies = [
|
|
"fastapi>=0.109.0",
|
|
"uvicorn[standard]>=0.27.0",
|
|
"rapidfuzz>=3.0.0",
|
|
"Pillow>=10.0.0",
|
|
"redis>=5.0.0",
|
|
"ultralytics>=8.0.0",
|
|
"paddleocr>=3.0.0",
|
|
"paddlepaddle-gpu==3.0.0",
|
|
"transformers>=4.40.0,<5",
|
|
"accelerate>=0.27.0",
|
|
"torch",
|
|
"torchvision",
|
|
"opencv-python-headless>=4.8.0",
|
|
]
|
|
|
|
# RTX 3080 / CUDA toolkit 12.8 — cu126 wheels are forward-compatible
|
|
# (no cu128 wheels yet on either index). Mixing PyPI torch with CUDA 12.8
|
|
# causes NCCL symbol errors, so the explicit index pins prevent uv from
|
|
# pulling torch transitively from PyPI via ultralytics.
|
|
[tool.uv.sources]
|
|
torch = { index = "pytorch-cu126" }
|
|
torchvision = { index = "pytorch-cu126" }
|
|
paddlepaddle-gpu = { index = "paddle-cu126" }
|
|
|
|
[[tool.uv.index]]
|
|
name = "pytorch-cu126"
|
|
url = "https://download.pytorch.org/whl/cu126"
|
|
explicit = true
|
|
|
|
[[tool.uv.index]]
|
|
name = "paddle-cu126"
|
|
url = "https://www.paddlepaddle.org.cn/packages/stable/cu126/"
|
|
explicit = true
|
|
|
|
[tool.uv]
|
|
package = false
|