Files
mediaproc/ctrl/.env.template
2026-03-26 04:40:00 -03:00

67 lines
1.4 KiB
Plaintext

# MPR Environment Configuration
# Copy to .env and adjust values as needed
# Database
POSTGRES_DB=mpr
POSTGRES_USER=mpr_user
POSTGRES_PASSWORD=mpr_pass
POSTGRES_HOST=postgres
POSTGRES_PORT=5432
DATABASE_URL=postgresql://mpr_user:mpr_pass@postgres:5432/mpr
# Redis
REDIS_HOST=redis
REDIS_PORT=6379
REDIS_URL=redis://redis:6379/0
# Django
DEBUG=1
DJANGO_SETTINGS_MODULE=admin.mpr.settings
SECRET_KEY=change-this-in-production
# Worker
MPR_EXECUTOR=local
# gRPC
GRPC_HOST=grpc
GRPC_PORT=50051
GRPC_MAX_WORKERS=10
# S3 Storage (MinIO locally, real S3 on AWS)
# In k8s/docker: http://minio:9000
# On dev machine (port-forward): http://localhost:9000
# On AWS: omit S3_ENDPOINT_URL entirely
S3_ENDPOINT_URL=http://localhost:9000
S3_BUCKET_IN=mpr-media-in
S3_BUCKET_OUT=mpr-media-out
AWS_REGION=us-east-1
AWS_ACCESS_KEY_ID=minioadmin
AWS_SECRET_ACCESS_KEY=minioadmin
# Inference
INFERENCE_URL=http://mcrndeb:8000
# Cloud LLM (detection pipeline escalation)
# Set CLOUD_LLM_PROVIDER to: groq, gemini, claude, openai
CLOUD_LLM_PROVIDER=groq
# Groq (default, free tier)
GROQ_API_KEY=
GROQ_MODEL=meta-llama/llama-4-scout-17b-16e-instruct
# Gemini
#GEMINI_API_KEY=
#GEMINI_MODEL=gemini-2.0-flash
# Claude (uses anthropic SDK)
#ANTHROPIC_API_KEY=
#CLAUDE_MODEL=claude-sonnet-4-20250514
# OpenAI-compatible
#OPENAI_API_KEY=
#OPENAI_MODEL=gpt-4o-mini
#OPENAI_BASE_URL=https://api.openai.com/v1
# Vite
VITE_ALLOWED_HOSTS=your-domain.local