shoehorning graphql, step functions and lamdas. aws deployment scripts

This commit is contained in:
2026-02-06 18:25:42 -03:00
parent 013587d108
commit e642908abb
35 changed files with 2354 additions and 930 deletions

View File

@@ -27,9 +27,13 @@ GRPC_HOST=grpc
GRPC_PORT=50051
GRPC_MAX_WORKERS=10
# Media
MEDIA_IN=/app/media/in
MEDIA_OUT=/app/media/out
# S3 Storage (MinIO locally, real S3 on AWS)
S3_ENDPOINT_URL=http://minio:9000
S3_BUCKET_IN=mpr-media-in
S3_BUCKET_OUT=mpr-media-out
AWS_REGION=us-east-1
AWS_ACCESS_KEY_ID=minioadmin
AWS_SECRET_ACCESS_KEY=minioadmin
# Vite
VITE_ALLOWED_HOSTS=your-domain.local

View File

@@ -1,18 +1,17 @@
#!/bin/bash
# Deploy MPR to remote server via rsync
# Uses project .gitignore for excludes
# MPR Deploy Script
#
# Usage: ./ctrl/deploy.sh [--restart] [--dry-run]
# Usage: ./ctrl/deploy.sh <command> [options]
#
# Examples:
# ./ctrl/deploy.sh # Sync files only
# ./ctrl/deploy.sh --restart # Sync and restart services
# ./ctrl/deploy.sh --dry-run # Preview sync
# Commands:
# rsync [--restart] [--dry-run] Sync to remote server via rsync
# aws Deploy AWS infrastructure (Lambda, Step Functions, S3)
set -e
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
cd "$PROJECT_ROOT"
source "$SCRIPT_DIR/.env" 2>/dev/null || true
@@ -21,56 +20,268 @@ GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
if [ -z "$SERVER" ] || [ -z "$REMOTE_PATH" ]; then
echo -e "${RED}Error: SERVER and REMOTE_PATH must be set in ctrl/.env${NC}"
echo "Example:"
echo " SERVER=user@host"
echo " REMOTE_PATH=~/mpr"
exit 1
fi
# ─── Rsync Deploy ─────────────────────────────────────────────────────────────
RESTART=false
DRY_RUN=""
deploy_rsync() {
if [ -z "${SERVER:-}" ] || [ -z "${REMOTE_PATH:-}" ]; then
echo -e "${RED}Error: SERVER and REMOTE_PATH must be set in ctrl/.env${NC}"
echo "Example:"
echo " SERVER=user@host"
echo " REMOTE_PATH=~/mpr"
exit 1
fi
while [ $# -gt 0 ]; do
case "$1" in
--restart)
RESTART=true
shift
;;
--dry-run)
DRY_RUN="--dry-run"
shift
;;
*)
echo "Unknown option: $1"
exit 1
;;
esac
done
RESTART=false
DRY_RUN=""
echo -e "${GREEN}=== Deploying MPR to $SERVER:$REMOTE_PATH ===${NC}"
while [ $# -gt 0 ]; do
case "$1" in
--restart) RESTART=true; shift ;;
--dry-run) DRY_RUN="--dry-run"; shift ;;
*) echo "Unknown option: $1"; exit 1 ;;
esac
done
# Sync files using .gitignore for excludes
echo -e "${YELLOW}Syncing files...${NC}"
rsync -avz --delete $DRY_RUN \
--filter=':- .gitignore' \
--exclude='.git' \
--exclude='media/*' \
--exclude='ctrl/.env' \
"$PROJECT_ROOT/" "$SERVER:$REMOTE_PATH/"
echo -e "${GREEN}=== Deploying MPR to $SERVER:$REMOTE_PATH ===${NC}"
if [ -n "$DRY_RUN" ]; then
echo -e "${YELLOW}Dry run - no changes made${NC}"
exit 0
fi
echo -e "${YELLOW}Syncing files...${NC}"
rsync -avz --delete $DRY_RUN \
--filter=':- .gitignore' \
--exclude='.git' \
--exclude='media/*' \
--exclude='ctrl/.env' \
"$PROJECT_ROOT/" "$SERVER:$REMOTE_PATH/"
# Copy env template if .env doesn't exist on remote
ssh "$SERVER" "[ -f $REMOTE_PATH/ctrl/.env ] || cp $REMOTE_PATH/ctrl/.env.template $REMOTE_PATH/ctrl/.env"
if [ -n "$DRY_RUN" ]; then
echo -e "${YELLOW}Dry run - no changes made${NC}"
exit 0
fi
if [ "$RESTART" = true ]; then
echo -e "${YELLOW}Restarting services...${NC}"
ssh "$SERVER" "cd $REMOTE_PATH/ctrl && docker compose down && docker compose up -d --build"
fi
ssh "$SERVER" "[ -f $REMOTE_PATH/ctrl/.env ] || cp $REMOTE_PATH/ctrl/.env.template $REMOTE_PATH/ctrl/.env"
echo -e "${GREEN}Done!${NC}"
if [ "$RESTART" = true ]; then
echo -e "${YELLOW}Restarting services...${NC}"
ssh "$SERVER" "cd $REMOTE_PATH/ctrl && docker compose down && docker compose up -d --build"
fi
echo -e "${GREEN}Done!${NC}"
}
# ─── AWS Deploy ────────────────────────────────────────────────────────────────
deploy_aws() {
REGION="${AWS_REGION:-us-east-1}"
ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
PROJECT="mpr"
# S3
BUCKET_IN="${S3_BUCKET_IN:-mpr-media-in}"
BUCKET_OUT="${S3_BUCKET_OUT:-mpr-media-out}"
# ECR
ECR_REPO="${PROJECT}-transcode"
ECR_URI="${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/${ECR_REPO}"
# Lambda
LAMBDA_NAME="${PROJECT}-transcode"
LAMBDA_TIMEOUT=900
LAMBDA_MEMORY=2048
# Step Functions
SFN_NAME="${PROJECT}-transcode"
# IAM
LAMBDA_ROLE_NAME="${PROJECT}-lambda-role"
SFN_ROLE_NAME="${PROJECT}-sfn-role"
# Callback
CALLBACK_URL="${CALLBACK_URL:-https://mpr.mcrn.ar/api}"
CALLBACK_API_KEY="${CALLBACK_API_KEY:-changeme}"
echo -e "${GREEN}=== Deploying MPR to AWS ($REGION, account $ACCOUNT_ID) ===${NC}"
# ─── S3 Buckets ───────────────────────────────────────────────────────
echo -e "${YELLOW}Creating S3 buckets...${NC}"
for bucket in "$BUCKET_IN" "$BUCKET_OUT"; do
if ! aws s3api head-bucket --bucket "$bucket" 2>/dev/null; then
aws s3api create-bucket \
--bucket "$bucket" \
--region "$REGION" \
--create-bucket-configuration LocationConstraint="$REGION"
echo " Created $bucket"
else
echo " $bucket already exists"
fi
done
# ─── IAM Roles ────────────────────────────────────────────────────────
echo -e "${YELLOW}Creating IAM roles...${NC}"
if ! aws iam get-role --role-name "$LAMBDA_ROLE_NAME" 2>/dev/null; then
aws iam create-role \
--role-name "$LAMBDA_ROLE_NAME" \
--assume-role-policy-document '{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"Service": "lambda.amazonaws.com"},
"Action": "sts:AssumeRole"
}]
}'
aws iam attach-role-policy \
--role-name "$LAMBDA_ROLE_NAME" \
--policy-arn arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
aws iam put-role-policy \
--role-name "$LAMBDA_ROLE_NAME" \
--policy-name "${PROJECT}-s3-access" \
--policy-document '{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Action": ["s3:GetObject", "s3:PutObject"],
"Resource": [
"arn:aws:s3:::'"$BUCKET_IN"'/*",
"arn:aws:s3:::'"$BUCKET_OUT"'/*"
]
}]
}'
echo " Created $LAMBDA_ROLE_NAME"
echo " Waiting for role to propagate..."
sleep 10
else
echo " $LAMBDA_ROLE_NAME already exists"
fi
LAMBDA_ROLE_ARN=$(aws iam get-role --role-name "$LAMBDA_ROLE_NAME" --query Role.Arn --output text)
if ! aws iam get-role --role-name "$SFN_ROLE_NAME" 2>/dev/null; then
aws iam create-role \
--role-name "$SFN_ROLE_NAME" \
--assume-role-policy-document '{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"Service": "states.amazonaws.com"},
"Action": "sts:AssumeRole"
}]
}'
aws iam put-role-policy \
--role-name "$SFN_ROLE_NAME" \
--policy-name "${PROJECT}-sfn-invoke-lambda" \
--policy-document '{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Action": "lambda:InvokeFunction",
"Resource": "arn:aws:lambda:'"$REGION"':'"$ACCOUNT_ID"':function:'"$LAMBDA_NAME"'"
}]
}'
echo " Created $SFN_ROLE_NAME"
sleep 10
else
echo " $SFN_ROLE_NAME already exists"
fi
SFN_ROLE_ARN=$(aws iam get-role --role-name "$SFN_ROLE_NAME" --query Role.Arn --output text)
# ─── ECR Repository ──────────────────────────────────────────────────
echo -e "${YELLOW}Setting up ECR...${NC}"
if ! aws ecr describe-repositories --repository-names "$ECR_REPO" --region "$REGION" 2>/dev/null; then
aws ecr create-repository --repository-name "$ECR_REPO" --region "$REGION"
echo " Created ECR repo $ECR_REPO"
else
echo " ECR repo $ECR_REPO already exists"
fi
# ─── Build & Push Lambda Image ───────────────────────────────────────
echo -e "${YELLOW}Building Lambda container image...${NC}"
docker build -f ctrl/lambda/Dockerfile -t "${ECR_REPO}:latest" .
echo -e "${YELLOW}Pushing to ECR...${NC}"
aws ecr get-login-password --region "$REGION" | \
docker login --username AWS --password-stdin "${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com"
docker tag "${ECR_REPO}:latest" "${ECR_URI}:latest"
docker push "${ECR_URI}:latest"
# ─── Lambda Function ─────────────────────────────────────────────────
echo -e "${YELLOW}Deploying Lambda function...${NC}"
LAMBDA_ARN="arn:aws:lambda:${REGION}:${ACCOUNT_ID}:function:${LAMBDA_NAME}"
if aws lambda get-function --function-name "$LAMBDA_NAME" --region "$REGION" 2>/dev/null; then
aws lambda update-function-code \
--function-name "$LAMBDA_NAME" \
--image-uri "${ECR_URI}:latest" \
--region "$REGION"
echo " Updated $LAMBDA_NAME"
else
aws lambda create-function \
--function-name "$LAMBDA_NAME" \
--package-type Image \
--code ImageUri="${ECR_URI}:latest" \
--role "$LAMBDA_ROLE_ARN" \
--timeout "$LAMBDA_TIMEOUT" \
--memory-size "$LAMBDA_MEMORY" \
--environment "Variables={S3_BUCKET_IN=${BUCKET_IN},S3_BUCKET_OUT=${BUCKET_OUT},AWS_REGION=${REGION}}" \
--region "$REGION"
echo " Created $LAMBDA_NAME"
fi
# ─── Step Functions ───────────────────────────────────────────────────
echo -e "${YELLOW}Deploying Step Functions state machine...${NC}"
SFN_DEFINITION=$(sed "s|\${TranscodeLambdaArn}|${LAMBDA_ARN}|g" ctrl/state-machine.json)
SFN_ARN="arn:aws:states:${REGION}:${ACCOUNT_ID}:stateMachine:${SFN_NAME}"
if aws stepfunctions describe-state-machine --state-machine-arn "$SFN_ARN" --region "$REGION" 2>/dev/null; then
aws stepfunctions update-state-machine \
--state-machine-arn "$SFN_ARN" \
--definition "$SFN_DEFINITION" \
--region "$REGION"
echo " Updated $SFN_NAME"
else
aws stepfunctions create-state-machine \
--name "$SFN_NAME" \
--definition "$SFN_DEFINITION" \
--role-arn "$SFN_ROLE_ARN" \
--region "$REGION"
echo " Created $SFN_NAME"
fi
# ─── Summary ──────────────────────────────────────────────────────────
echo ""
echo -e "${GREEN}Deployment complete!${NC}"
echo ""
echo "Add these to your .env:"
echo " MPR_EXECUTOR=lambda"
echo " STEP_FUNCTION_ARN=${SFN_ARN}"
echo " LAMBDA_FUNCTION_ARN=${LAMBDA_ARN}"
echo " S3_BUCKET_IN=${BUCKET_IN}"
echo " S3_BUCKET_OUT=${BUCKET_OUT}"
echo " CALLBACK_URL=${CALLBACK_URL}"
echo " CALLBACK_API_KEY=${CALLBACK_API_KEY}"
}
# ─── Main ──────────────────────────────────────────────────────────────────────
COMMAND="${1:-}"
shift || true
case "$COMMAND" in
rsync) deploy_rsync "$@" ;;
aws) deploy_aws "$@" ;;
*)
echo "Usage: ./ctrl/deploy.sh <command> [options]"
echo ""
echo "Commands:"
echo " rsync [--restart] [--dry-run] Sync to remote server"
echo " aws Deploy AWS infrastructure"
exit 1
;;
esac

View File

@@ -5,8 +5,12 @@ x-common-env: &common-env
DEBUG: 1
GRPC_HOST: grpc
GRPC_PORT: 50051
MEDIA_IN: ${MEDIA_IN:-/app/media/in}
MEDIA_OUT: ${MEDIA_OUT:-/app/media/out}
S3_ENDPOINT_URL: http://minio:9000
S3_BUCKET_IN: mpr-media-in
S3_BUCKET_OUT: mpr-media-out
AWS_ACCESS_KEY_ID: minioadmin
AWS_SECRET_ACCESS_KEY: minioadmin
AWS_REGION: us-east-1
x-healthcheck-defaults: &healthcheck-defaults
interval: 5s
@@ -42,17 +46,46 @@ services:
<<: *healthcheck-defaults
test: ["CMD", "redis-cli", "ping"]
minio:
image: minio/minio
command: ["server", "/data", "--console-address", ":9001"]
ports:
- "9000:9000"
- "9001:9001"
environment:
MINIO_ROOT_USER: minioadmin
MINIO_ROOT_PASSWORD: minioadmin
volumes:
- minio-data:/data
healthcheck:
<<: *healthcheck-defaults
test: ["CMD", "mc", "ready", "local"]
minio-init:
image: minio/mc
depends_on:
minio:
condition: service_healthy
entrypoint: ["/bin/sh", "-c"]
command:
- |
mc alias set local http://minio:9000 minioadmin minioadmin
mc mb --ignore-existing local/mpr-media-in
mc mb --ignore-existing local/mpr-media-out
mc anonymous set download local/mpr-media-in
mc anonymous set download local/mpr-media-out
nginx:
image: nginx:alpine
ports:
- "80:80"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf:ro
- ../media:/app/media:ro
depends_on:
- django
- fastapi
- timeline
- minio
# =============================================================================
# Application Services
@@ -72,7 +105,6 @@ services:
<<: *common-env
volumes:
- ..:/app
- ../media:/app/media
depends_on:
postgres:
condition: service_healthy
@@ -90,7 +122,6 @@ services:
<<: *common-env
volumes:
- ..:/app
- ../media:/app/media
depends_on:
postgres:
condition: service_healthy
@@ -110,7 +141,6 @@ services:
GRPC_MAX_WORKERS: 10
volumes:
- ..:/app
- ../media:/app/media
depends_on:
postgres:
condition: service_healthy
@@ -127,7 +157,6 @@ services:
MPR_EXECUTOR: local
volumes:
- ..:/app
- ../media:/app/media
depends_on:
postgres:
condition: service_healthy
@@ -150,6 +179,7 @@ services:
volumes:
postgres-data:
redis-data:
minio-data:
networks:
default:

View File

@@ -29,6 +29,13 @@ python -m modelgen from-schema \
--targets typescript \
--include dataclasses,enums,api
# Graphene types for GraphQL: domain models + enums + API types
python -m modelgen from-schema \
--schema schema/models \
--output api/schemas/graphql_types.py \
--targets graphene \
--include dataclasses,enums,api
# Protobuf for gRPC: gRPC messages + service
python -m modelgen from-schema \
--schema schema/models \

21
ctrl/lambda/Dockerfile Normal file
View File

@@ -0,0 +1,21 @@
FROM public.ecr.aws/lambda/python:3.11
# Install ffmpeg static binary
RUN yum install -y tar xz && \
curl -L https://johnvansickle.com/ffmpeg/releases/ffmpeg-release-amd64-static.tar.xz -o /tmp/ffmpeg.tar.xz && \
tar -xf /tmp/ffmpeg.tar.xz -C /tmp && \
cp /tmp/ffmpeg-*-amd64-static/ffmpeg /usr/local/bin/ffmpeg && \
cp /tmp/ffmpeg-*-amd64-static/ffprobe /usr/local/bin/ffprobe && \
rm -rf /tmp/ffmpeg* && \
yum clean all
# Install Python dependencies
COPY ctrl/lambda/requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY task/lambda_handler.py ${LAMBDA_TASK_ROOT}/task/lambda_handler.py
COPY task/__init__.py ${LAMBDA_TASK_ROOT}/task/__init__.py
COPY core/ ${LAMBDA_TASK_ROOT}/core/
CMD ["task.lambda_handler.handler"]

View File

@@ -0,0 +1,2 @@
ffmpeg-python>=0.2.0
requests>=2.31.0

View File

@@ -21,6 +21,10 @@ http {
server timeline:5173;
}
upstream minio {
server minio:9000;
}
server {
listen 80;
server_name mpr.local.ar;
@@ -67,16 +71,15 @@ http {
proxy_set_header Host $host;
}
# Media files - input (source)
location /media/in {
alias /app/media/in;
autoindex on;
# Media files - proxied from MinIO (local) or S3 (AWS)
location /media/in/ {
proxy_pass http://minio/mpr-media-in/;
proxy_set_header Host $http_host;
}
# Media files - output (transcoded)
location /media/out {
alias /app/media/out;
autoindex on;
location /media/out/ {
proxy_pass http://minio/mpr-media-out/;
proxy_set_header Host $http_host;
}
# Default to Timeline UI

39
ctrl/state-machine.json Normal file
View File

@@ -0,0 +1,39 @@
{
"Comment": "MPR Transcode Job - orchestrates Lambda-based media transcoding",
"StartAt": "Transcode",
"States": {
"Transcode": {
"Type": "Task",
"Resource": "${TranscodeLambdaArn}",
"TimeoutSeconds": 900,
"Retry": [
{
"ErrorEquals": ["States.TaskFailed", "Lambda.ServiceException"],
"IntervalSeconds": 10,
"MaxAttempts": 2,
"BackoffRate": 2.0
}
],
"Catch": [
{
"ErrorEquals": ["States.ALL"],
"Next": "HandleError",
"ResultPath": "$.error"
}
],
"Next": "Done"
},
"HandleError": {
"Type": "Pass",
"Parameters": {
"status": "failed",
"job_id.$": "$.job_id",
"error.$": "$.error.Cause"
},
"Next": "Done"
},
"Done": {
"Type": "Succeed"
}
}
}