# MPR Environment Configuration # Copy to .env and adjust values as needed # Database POSTGRES_DB=mpr POSTGRES_USER=mpr_user POSTGRES_PASSWORD=mpr_pass POSTGRES_HOST=postgres POSTGRES_PORT=5432 DATABASE_URL=postgresql://mpr_user:mpr_pass@postgres:5432/mpr # Redis REDIS_HOST=redis REDIS_PORT=6379 REDIS_URL=redis://redis:6379/0 # Django DEBUG=1 DJANGO_SETTINGS_MODULE=admin.mpr.settings SECRET_KEY=change-this-in-production # Worker MPR_EXECUTOR=local # gRPC GRPC_HOST=grpc GRPC_PORT=50051 GRPC_MAX_WORKERS=10 # S3 Storage (MinIO locally, real S3 on AWS) S3_ENDPOINT_URL=http://minio:9000 S3_BUCKET_IN=mpr-media-in S3_BUCKET_OUT=mpr-media-out AWS_REGION=us-east-1 AWS_ACCESS_KEY_ID=minioadmin AWS_SECRET_ACCESS_KEY=minioadmin # Inference INFERENCE_URL=http://mcrndeb:8000 # Cloud LLM (detection pipeline escalation) # Set CLOUD_LLM_PROVIDER to: groq, gemini, claude, openai CLOUD_LLM_PROVIDER=groq # Groq (default, free tier) GROQ_API_KEY= GROQ_MODEL=llama-3.2-90b-vision-preview # Gemini #GEMINI_API_KEY= #GEMINI_MODEL=gemini-2.0-flash # Claude (uses anthropic SDK) #ANTHROPIC_API_KEY= #CLAUDE_MODEL=claude-sonnet-4-20250514 # OpenAI-compatible #OPENAI_API_KEY= #OPENAI_MODEL=gpt-4o-mini #OPENAI_BASE_URL=https://api.openai.com/v1 # Vite VITE_ALLOWED_HOSTS=your-domain.local