- root pyproject.toml replaces requirements.txt and requirements-worker.txt (worker = root + ffmpeg-python which root already had); test deps moved to [dependency-groups] dev - core/gpu/pyproject.toml replaces core/gpu/requirements.txt; uses [tool.uv.sources] to pin torch/torchvision and paddlepaddle-gpu to their CUDA index URLs, replacing the manual reinstall dance from old comments - Dockerfiles use uv sync --frozen against uv.lock for reproducible builds; PATH includes /app/.venv/bin so k8s manifests' bare uvicorn/celery commands resolve without wrapping in uv run - core/gpu/run.sh local mode now does uv sync + uv run python server.py; errors out cleanly if uv is missing
60 lines
1.5 KiB
Bash
Executable File
60 lines
1.5 KiB
Bash
Executable File
#!/bin/bash
|
|
# Run the inference server
|
|
#
|
|
# Usage:
|
|
# ./run.sh # Local: uv sync + run server.py (auto-installs/activates .venv)
|
|
# ./run.sh docker # Docker (CPU)
|
|
# ./run.sh docker-gpu # Docker with GPU
|
|
# ./run.sh stop # Stop Docker container
|
|
|
|
set -e
|
|
cd "$(dirname "${BASH_SOURCE[0]}")"
|
|
|
|
# Load env (create from template if missing)
|
|
if [ ! -f .env ]; then
|
|
if [ -f .env.template ]; then
|
|
cp .env.template .env
|
|
echo "Created .env from template — edit as needed"
|
|
fi
|
|
fi
|
|
|
|
if [ -f .env ]; then
|
|
set -a
|
|
source .env
|
|
set +a
|
|
fi
|
|
|
|
case "${1:-local}" in
|
|
local)
|
|
if ! command -v uv >/dev/null 2>&1; then
|
|
echo "uv not found. Install: curl -LsSf https://astral.sh/uv/install.sh | sh"
|
|
exit 1
|
|
fi
|
|
uv sync
|
|
uv run python server.py
|
|
;;
|
|
docker)
|
|
docker build -t mpr-inference .
|
|
ENV_FLAG=""; [ -f .env ] && ENV_FLAG="--env-file .env"
|
|
docker run --rm -p "${PORT:-8000}:8000" \
|
|
$ENV_FLAG \
|
|
--name mpr-inference \
|
|
mpr-inference
|
|
;;
|
|
docker-gpu)
|
|
docker build -t mpr-inference .
|
|
ENV_FLAG=""; [ -f .env ] && ENV_FLAG="--env-file .env"
|
|
docker run --rm --gpus all -p "${PORT:-8000}:8000" \
|
|
$ENV_FLAG \
|
|
--name mpr-inference \
|
|
mpr-inference
|
|
;;
|
|
stop)
|
|
docker stop mpr-inference 2>/dev/null || true
|
|
;;
|
|
*)
|
|
echo "Usage: ./run.sh [local|docker|docker-gpu|stop]"
|
|
exit 1
|
|
;;
|
|
esac
|