55 lines
1.3 KiB
Bash
Executable File
55 lines
1.3 KiB
Bash
Executable File
#!/bin/bash
|
|
# Run the inference server
|
|
#
|
|
# Usage:
|
|
# ./run.sh # Local (pip install -r requirements.txt first)
|
|
# ./run.sh docker # Docker (CPU)
|
|
# ./run.sh docker-gpu # Docker with GPU
|
|
# ./run.sh stop # Stop Docker container
|
|
|
|
set -e
|
|
cd "$(dirname "${BASH_SOURCE[0]}")"
|
|
|
|
# Load env (create from template if missing)
|
|
if [ ! -f .env ]; then
|
|
if [ -f .env.template ]; then
|
|
cp .env.template .env
|
|
echo "Created .env from template — edit as needed"
|
|
fi
|
|
fi
|
|
|
|
if [ -f .env ]; then
|
|
set -a
|
|
source .env
|
|
set +a
|
|
fi
|
|
|
|
case "${1:-local}" in
|
|
local)
|
|
python server.py
|
|
;;
|
|
docker)
|
|
docker build -t mpr-inference .
|
|
ENV_FLAG=""; [ -f .env ] && ENV_FLAG="--env-file .env"
|
|
docker run --rm -p "${PORT:-8000}:8000" \
|
|
$ENV_FLAG \
|
|
--name mpr-inference \
|
|
mpr-inference
|
|
;;
|
|
docker-gpu)
|
|
docker build -t mpr-inference .
|
|
ENV_FLAG=""; [ -f .env ] && ENV_FLAG="--env-file .env"
|
|
docker run --rm --gpus all -p "${PORT:-8000}:8000" \
|
|
$ENV_FLAG \
|
|
--name mpr-inference \
|
|
mpr-inference
|
|
;;
|
|
stop)
|
|
docker stop mpr-inference 2>/dev/null || true
|
|
;;
|
|
*)
|
|
echo "Usage: ./run.sh [local|docker|docker-gpu|stop]"
|
|
exit 1
|
|
;;
|
|
esac
|