From 2ffabb672e472f74cb0add273084909ddee24a01 Mon Sep 17 00:00:00 2001 From: buenosairesam Date: Mon, 11 May 2026 20:13:11 -0300 Subject: [PATCH] update docs --- .gitignore | 5 + Makefile | 44 + docker-compose.yml | 21 + docs/graphs/cold_warm_timeline.dot | 55 + docs/graphs/cold_warm_timeline.svg | 158 ++ docs/graphs/lifecycle.dot | 58 + docs/graphs/lifecycle.svg | 159 ++ docs/graphs/system_overview.dot | 70 + docs/graphs/system_overview.svg | 193 ++ docs/index.html | 1612 +++++++++++++++++ docs/lambdas-md/lambda-01-overview.md | 30 + docs/lambdas-md/lambda-02-mental-model.md | 48 + docs/lambdas-md/lambda-03-limits.md | 52 + docs/lambdas-md/lambda-04-cold-starts.md | 44 + docs/lambdas-md/lambda-05-concurrency.md | 35 + docs/lambdas-md/lambda-06-triggers.md | 33 + docs/lambdas-md/lambda-07-iam.md | 58 + docs/lambdas-md/lambda-08-packaging.md | 54 + docs/lambdas-md/lambda-09-vpc-networking.md | 46 + docs/lambdas-md/lambda-10-observability.md | 93 + docs/lambdas-md/lambda-11-async-errors.md | 73 + docs/lambdas-md/lambda-12-step-functions.md | 65 + docs/lambdas-md/lambda-13-cost.md | 42 + docs/lambdas-md/lambda-14-local-dev.md | 74 + docs/lambdas-md/lambda-15-cicd.md | 75 + docs/lambdas-md/lambda-16-pitfalls.md | 57 + docs/lambdas-md/lambda-17-adjacent.md | 40 + docs/lambdas-md/lambda-18-labs.md | 80 + docs/lambdas-md/lambda-19-repository.md | 258 +++ docs/lambdas-md/lambda-README.md | 40 + docs/lambdas-md/lambda-cold_warm_timeline.svg | 158 ++ docs/lambdas-md/lambda-function.py | 79 + docs/lambdas-md/lambda-lifecycle.svg | 159 ++ docs/lambdas-md/lambda-system_overview.svg | 193 ++ docs/lambdas-md/lambda_study_script.md | 1334 ++++++++++++++ docs/viewer.html | 101 ++ invoke.py | 15 + lambda_function.py | 79 + requirements.txt | 3 + seed.py | 76 + 40 files changed, 5869 insertions(+) create mode 100644 .gitignore create mode 100644 Makefile create mode 100644 docker-compose.yml create mode 100644 docs/graphs/cold_warm_timeline.dot create mode 100644 docs/graphs/cold_warm_timeline.svg create mode 100644 docs/graphs/lifecycle.dot create mode 100644 docs/graphs/lifecycle.svg create mode 100644 docs/graphs/system_overview.dot create mode 100644 docs/graphs/system_overview.svg create mode 100644 docs/index.html create mode 100644 docs/lambdas-md/lambda-01-overview.md create mode 100644 docs/lambdas-md/lambda-02-mental-model.md create mode 100644 docs/lambdas-md/lambda-03-limits.md create mode 100644 docs/lambdas-md/lambda-04-cold-starts.md create mode 100644 docs/lambdas-md/lambda-05-concurrency.md create mode 100644 docs/lambdas-md/lambda-06-triggers.md create mode 100644 docs/lambdas-md/lambda-07-iam.md create mode 100644 docs/lambdas-md/lambda-08-packaging.md create mode 100644 docs/lambdas-md/lambda-09-vpc-networking.md create mode 100644 docs/lambdas-md/lambda-10-observability.md create mode 100644 docs/lambdas-md/lambda-11-async-errors.md create mode 100644 docs/lambdas-md/lambda-12-step-functions.md create mode 100644 docs/lambdas-md/lambda-13-cost.md create mode 100644 docs/lambdas-md/lambda-14-local-dev.md create mode 100644 docs/lambdas-md/lambda-15-cicd.md create mode 100644 docs/lambdas-md/lambda-16-pitfalls.md create mode 100644 docs/lambdas-md/lambda-17-adjacent.md create mode 100644 docs/lambdas-md/lambda-18-labs.md create mode 100644 docs/lambdas-md/lambda-19-repository.md create mode 100644 docs/lambdas-md/lambda-README.md create mode 100644 docs/lambdas-md/lambda-cold_warm_timeline.svg create mode 100644 docs/lambdas-md/lambda-function.py create mode 100644 docs/lambdas-md/lambda-lifecycle.svg create mode 100644 docs/lambdas-md/lambda-system_overview.svg create mode 100644 docs/lambdas-md/lambda_study_script.md create mode 100644 docs/viewer.html create mode 100644 invoke.py create mode 100644 lambda_function.py create mode 100644 requirements.txt create mode 100644 seed.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a24adde --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +.venv/ +__pycache__/ +*.pyc +.env +def diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..e61dc82 --- /dev/null +++ b/Makefile @@ -0,0 +1,44 @@ +.PHONY: up down seed invoke install logs console clean graphs docs + +PY ?= .venv/bin/python +PIP ?= .venv/bin/pip +DOT_SRC := $(wildcard docs/graphs/*.dot) +SVG_OUT := $(DOT_SRC:.dot=.svg) + +install: + python3 -m venv .venv + $(PIP) install -U pip + $(PIP) install -r requirements.txt + +up: + docker compose up -d + @echo "MinIO API: http://localhost:9000" + @echo "MinIO console: http://localhost:9001 (minioadmin / minioadmin)" + +down: + docker compose down + +clean: + docker compose down -v + +logs: + docker compose logs -f minio + +seed: + @if [ -z "$$SOURCE_DIR" ]; then echo "set SOURCE_DIR="; exit 2; fi + $(PY) seed.py "$$SOURCE_DIR" + +invoke: + $(PY) invoke.py + +console: + xdg-open http://localhost:9001 >/dev/null 2>&1 || true + +docs/graphs/%.svg: docs/graphs/%.dot + dot -Tsvg $< -o $@ + +graphs: $(SVG_OUT) + @echo "rendered $(words $(SVG_OUT)) svg(s) from $(words $(DOT_SRC)) dot file(s)" + +docs: $(SVG_OUT) + xdg-open docs/index.html >/dev/null 2>&1 || echo "open docs/index.html in your browser" diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..4dcd8a4 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,21 @@ +services: + minio: + image: minio/minio:latest + container_name: ethics-minio + ports: + - "9000:9000" + - "9001:9001" + environment: + MINIO_ROOT_USER: minioadmin + MINIO_ROOT_PASSWORD: minioadmin + volumes: + - minio-data:/data + command: server /data --console-address ":9001" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] + interval: 5s + timeout: 3s + retries: 10 + +volumes: + minio-data: diff --git a/docs/graphs/cold_warm_timeline.dot b/docs/graphs/cold_warm_timeline.dot new file mode 100644 index 0000000..61fc414 --- /dev/null +++ b/docs/graphs/cold_warm_timeline.dot @@ -0,0 +1,55 @@ +digraph cold_warm_timeline { + rankdir=LR + bgcolor="#0a0e17" + fontname="Helvetica" + node [fontname="Helvetica" fontsize=11 style=filled color="#1e2a4a" fontcolor="#e8eaf0" shape=box] + edge [fontname="Helvetica" fontsize=9 fontcolor="#8892a8" color="#4a5568"] + + label="Cold vs warm — what gets billed, what gets measured" + labelloc=t + fontsize=16 + fontcolor="#0066ff" + + subgraph cluster_cold { + label="INVOCATION 1 — cold (Init Duration shows in CloudWatch)" + style=dashed + color="#ff3d00" + fontcolor="#ff3d00" + + c_dl [label="Download code\n~50–200 ms\n(NOT billed)" fillcolor="#121829" fontcolor="#8892a8"] + c_init [label="Init phase\n~200–800 ms typical\n(boto3/aioboto3 imports,\nclient build)\n(billed at full mem)" fillcolor="#1a1a3a" fontcolor="#ffc107"] + c_handler [label="Handler\n~5–500 ms\n(billed)" fillcolor="#0d1a33"] + c_freeze [label="freeze" fillcolor="#121829" fontcolor="#8892a8"] + } + + subgraph cluster_warm1 { + label="INVOCATION 2 — warm (no Init Duration logged)" + style=dashed + color="#00c853" + fontcolor="#00c853" + + w_thaw [label="thaw\nmicroseconds\n(NOT billed)" fillcolor="#121829" fontcolor="#8892a8"] + w_handler [label="Handler\n~5–500 ms\n(billed)" fillcolor="#0d1a33" fontcolor="#00c853"] + w_freeze [label="freeze" fillcolor="#121829" fontcolor="#8892a8"] + } + + subgraph cluster_warm2 { + label="INVOCATION 3 — warm" + style=dashed + color="#00c853" + fontcolor="#00c853" + + w2_thaw [label="thaw" fillcolor="#121829" fontcolor="#8892a8"] + w2_handler [label="Handler\n(billed)" fillcolor="#0d1a33" fontcolor="#00c853"] + } + + notes [label="Init Duration is ONLY in cold-start logs.\nDuration is the handler portion only.\nBilled Duration rounds Duration up to 1 ms.\nWith Provisioned Concurrency, init runs ahead of time —\nyou pay for it in PC pricing, not per invocation." fillcolor="#0a0e17" color="#1e2a4a" shape=note fontcolor="#b4bccf" fontsize=10] + + c_dl -> c_init -> c_handler -> c_freeze + c_freeze -> w_thaw [label="next event\n(< idle window)" color="#00c853"] + w_thaw -> w_handler -> w_freeze + w_freeze -> w2_thaw [color="#00c853"] + w2_thaw -> w2_handler + + {rank=sink; notes} +} diff --git a/docs/graphs/cold_warm_timeline.svg b/docs/graphs/cold_warm_timeline.svg new file mode 100644 index 0000000..be40362 --- /dev/null +++ b/docs/graphs/cold_warm_timeline.svg @@ -0,0 +1,158 @@ + + + + + + +cold_warm_timeline + +Cold vs warm — what gets billed, what gets measured + +cluster_cold + +INVOCATION 1 — cold (Init Duration shows in CloudWatch) + + +cluster_warm1 + +INVOCATION 2 — warm (no Init Duration logged) + + +cluster_warm2 + +INVOCATION 3 — warm + + + +c_dl + +Download code +~50–200 ms +(NOT billed) + + + +c_init + +Init phase +~200–800 ms typical +(boto3/aioboto3 imports, +client build) +(billed at full mem) + + + +c_dl->c_init + + + + + +c_handler + +Handler +~5–500 ms +(billed) + + + +c_init->c_handler + + + + + +c_freeze + +freeze + + + +c_handler->c_freeze + + + + + +w_thaw + +thaw +microseconds +(NOT billed) + + + +c_freeze->w_thaw + + +next event +(< idle window) + + + +w_handler + +Handler +~5–500 ms +(billed) + + + +w_thaw->w_handler + + + + + +w_freeze + +freeze + + + +w_handler->w_freeze + + + + + +w2_thaw + +thaw + + + +w_freeze->w2_thaw + + + + + +w2_handler + +Handler +(billed) + + + +w2_thaw->w2_handler + + + + + +notes + + + +Init Duration is ONLY in cold-start logs. +Duration is the handler portion only. +Billed Duration rounds Duration up to 1 ms. +With Provisioned Concurrency, init runs ahead of time — +you pay for it in PC pricing, not per invocation. + + + diff --git a/docs/graphs/lifecycle.dot b/docs/graphs/lifecycle.dot new file mode 100644 index 0000000..2d8abe8 --- /dev/null +++ b/docs/graphs/lifecycle.dot @@ -0,0 +1,58 @@ +digraph lifecycle { + rankdir=TB + bgcolor="#0a0e17" + fontname="Helvetica" + node [fontname="Helvetica" fontsize=11 style=filled color="#1e2a4a" fontcolor="#e8eaf0"] + edge [fontname="Helvetica" fontsize=9 fontcolor="#8892a8" color="#4a5568"] + + label="Lambda execution environment lifecycle" + labelloc=t + fontsize=16 + fontcolor="#0066ff" + + subgraph cluster_cold { + label="Cold start (first invocation on a fresh execution environment)" + style=dashed + color="#ff3d00" + fontcolor="#ff3d00" + + download [label="1. Download code\nzip / container layers" fillcolor="#243056" shape=box] + bootstrap [label="2. Start runtime\nbootstrap (python3.x)" fillcolor="#243056" shape=box] + init [label="3. Init phase\nrun module-level code\nimport boto3 / aioboto3\nbuild clients\n(billed; capped at 10 s)" fillcolor="#1a1a3a" shape=box fontcolor="#ffc107"] + } + + subgraph cluster_invoke { + label="Invocation" + style=dashed + color="#1e2a4a" + fontcolor="#8892a8" + + handler [label="handler(event, context)\nyour code runs\n(billed)" fillcolor="#0d1a33" shape=box] + respond [label="return / raise" fillcolor="#121829" shape=box] + } + + subgraph cluster_warm { + label="Warm reuse (subsequent invocations on the same environment)" + style=dashed + color="#00c853" + fontcolor="#00c853" + + thaw [label="thaw\n(microseconds)" fillcolor="#1a3a1a" shape=box] + reuse [label="globals retained:\nclients, /tmp,\nin-memory caches" fillcolor="#1a3a1a" shape=note fontcolor="#00c853"] + } + + freeze [label="freeze\nprocess paused\n(after handler returns)" fillcolor="#121829" shape=box] + shutdown [label="shutdown\nidle ~5–15 min →\nenv torn down\n/tmp gone" fillcolor="#121829" shape=box fontcolor="#ff3d00"] + + download -> bootstrap + bootstrap -> init + init -> handler [label="event arrives" color="#0066ff"] + handler -> respond + respond -> freeze + + freeze -> thaw [label="next event" color="#00c853"] + thaw -> handler [label="reuse env" color="#00c853"] + reuse -> handler [style=dotted color="#00c853"] + + freeze -> shutdown [label="idle too long" style=dashed color="#ff3d00"] +} diff --git a/docs/graphs/lifecycle.svg b/docs/graphs/lifecycle.svg new file mode 100644 index 0000000..2dc1f21 --- /dev/null +++ b/docs/graphs/lifecycle.svg @@ -0,0 +1,159 @@ + + + + + + +lifecycle + +Lambda execution environment lifecycle + +cluster_cold + +Cold start (first invocation on a fresh execution environment) + + +cluster_invoke + +Invocation + + +cluster_warm + +Warm reuse (subsequent invocations on the same environment) + + + +download + +1. Download code +zip / container layers + + + +bootstrap + +2. Start runtime +bootstrap (python3.x) + + + +download->bootstrap + + + + + +init + +3. Init phase +run module-level code +import boto3 / aioboto3 +build clients +(billed; capped at 10 s) + + + +bootstrap->init + + + + + +handler + +handler(event, context) +your code runs +(billed) + + + +init->handler + + +event arrives + + + +respond + +return / raise + + + +handler->respond + + + + + +freeze + +freeze +process paused +(after handler returns) + + + +respond->freeze + + + + + +thaw + +thaw +(microseconds) + + + +thaw->handler + + +reuse env + + + +reuse + + + +globals retained: +clients, /tmp, +in-memory caches + + + +reuse->handler + + + + + +freeze->thaw + + +next event + + + +shutdown + +shutdown +idle ~5–15 min → +env torn down +/tmp gone + + + +freeze->shutdown + + +idle too long + + + diff --git a/docs/graphs/system_overview.dot b/docs/graphs/system_overview.dot new file mode 100644 index 0000000..ffcb564 --- /dev/null +++ b/docs/graphs/system_overview.dot @@ -0,0 +1,70 @@ +digraph system_overview { + rankdir=LR + bgcolor="#0a0e17" + fontname="Helvetica" + node [fontname="Helvetica" fontsize=11 style=filled color="#1e2a4a" fontcolor="#e8eaf0"] + edge [fontname="Helvetica" fontsize=9 fontcolor="#8892a8" color="#4a5568"] + + label="Sample app — Lambda + MinIO sandbox" + labelloc=t + fontsize=16 + fontcolor="#0066ff" + + subgraph cluster_caller { + label="Caller" + style=dashed + color="#1e2a4a" + fontcolor="#8892a8" + + invoke [label="invoke.py\n(local) /\nAPI Gateway,\nS3 event,\nStep Functions\n(real AWS)" fillcolor="#243056" shape=box] + } + + subgraph cluster_lambda { + label="Lambda execution environment" + style=dashed + color="#0066ff" + fontcolor="#0066ff" + + handler [label="handler(event, context)\nlambda_function.py" fillcolor="#1a1a3a" shape=box] + + subgraph cluster_async { + label="asyncio.Queue producer / consumer" + style=dotted + color="#0066ff" + fontcolor="#8892a8" + + producer [label="producer\nlist_objects_v2 (paginator)\nfilter *.pdf" fillcolor="#0d1a33" shape=box] + queue [label="asyncio.Queue\nmaxsize=2000\n(backpressure)" fillcolor="#121829" shape=cylinder] + consumer [label="consumer\ngenerate_presigned_url\nappend JSONL" fillcolor="#0d1a33" shape=box] + } + + tmp [label="/tmp/.jsonl\nstreamed manifest\n(ephemeral, 512 MB default)" fillcolor="#121829" shape=cylinder fontcolor="#ffc107"] + } + + subgraph cluster_storage { + label="Object storage" + style=dashed + color="#1e2a4a" + fontcolor="#8892a8" + + minio [label="MinIO (local)\nor real S3" fillcolor="#1a3a1a" shape=cylinder fontcolor="#00c853"] + bucket [label="my-company-reports-bucket\n2026/04/*.pdf\nmanifests/.jsonl" fillcolor="#121829" shape=folder] + } + + response [label="response\n{count, manifest_key,\nmanifest_url}\n(< 1 KB; sidesteps 6 MB cap)" fillcolor="#243056" shape=note fontcolor="#00c853"] + + invoke -> handler [label="event"] + handler -> producer [label="spawn task"] + handler -> consumer [label="spawn task"] + + producer -> minio [label="LIST"] + minio -> producer [label="page (1000 keys)" style=dashed] + producer -> queue [label="key" color="#0066ff"] + queue -> consumer [label="key"] + consumer -> minio [label="presign\n(local HMAC)" style=dotted] + consumer -> tmp [label="JSONL line"] + + tmp -> minio [label="put_object\nmanifests/.jsonl"] + handler -> response [label="return"] + minio -> bucket [style=invis] +} diff --git a/docs/graphs/system_overview.svg b/docs/graphs/system_overview.svg new file mode 100644 index 0000000..a27ec81 --- /dev/null +++ b/docs/graphs/system_overview.svg @@ -0,0 +1,193 @@ + + + + + + +system_overview + +Sample app — Lambda + MinIO sandbox + +cluster_caller + +Caller + + +cluster_lambda + +Lambda execution environment + + +cluster_async + +asyncio.Queue producer / consumer + + +cluster_storage + +Object storage + + + +invoke + +invoke.py +(local) / +API Gateway, +S3 event, +Step Functions +(real AWS) + + + +handler + +handler(event, context) +lambda_function.py + + + +invoke->handler + + +event + + + +producer + +producer +list_objects_v2 (paginator) +filter *.pdf + + + +handler->producer + + +spawn task + + + +consumer + +consumer +generate_presigned_url +append JSONL + + + +handler->consumer + + +spawn task + + + +response + + + +response +{count, manifest_key, +manifest_url} +(< 1 KB; sidesteps 6 MB cap) + + + +handler->response + + +return + + + +queue + + +asyncio.Queue +maxsize=2000 +(backpressure) + + + +producer->queue + + +key + + + +minio + + +MinIO (local) +or real S3 + + + +producer->minio + + +LIST + + + +queue->consumer + + +key + + + +tmp + + +/tmp/<uuid>.jsonl +streamed manifest +(ephemeral, 512 MB default) + + + +consumer->tmp + + +JSONL line + + + +consumer->minio + + +presign +(local HMAC) + + + +tmp->minio + + +put_object +manifests/<uuid>.jsonl + + + +minio->producer + + +page (1000 keys) + + + +bucket + +my-company-reports-bucket +2026/04/*.pdf +manifests/<uuid>.jsonl + + + + diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 0000000..dd76247 --- /dev/null +++ b/docs/index.html @@ -0,0 +1,1612 @@ + + + + + +AWS Lambda — Study notes & sandbox + + + + +
+

AWS LAMBDA

+ Study notes & sandbox — built from the interview exercise + +
+ +
+ + + + +
+ + + + +
+

OVERVIEW

+

A study site built on top of a working Lambda + MinIO sandbox. Read the page, run the code, break things on purpose.

+ +
+

What this is

+

The repo at the root of this site (ethics/) holds a Python AWS Lambda function — lambda_function.py — that lists PDFs in an S3 bucket under a prefix, paginates, generates 15-minute presigned URLs, and writes a JSONL manifest. It runs locally against MinIO via docker compose, with the same handler signature as a real Lambda. This site explains the surrounding mental model in the order you'd want to study it before walking into a Lambda-heavy interview or production rotation.

+ +

How it's organised

+

The sidebar groups topics into four reading orders. Foundations is the picture in your head. Operating covers the day-to-day knobs. Production covers what changes when real users and real money are involved. Reference holds the must-know checklist (Pitfalls), brief orientations on adjacent tools (Glue, Prometheus/Grafana), the hands-on labs (Labs), and the repo tree (Repository).

+ +

How to use it

+
    +
  1. Read top-to-bottom — the order in the sidebar is the recommended study path.
  2. +
  3. Run the sandbox. make install && make up && SOURCE_DIR=<dir> make seed && make invoke. The handler executes locally against MinIO; you can break it without burning AWS credit.
  4. +
  5. Do the labs. Each one mutates the existing app: deploy to real AWS, add an S3 trigger, switch to arm64, enable Provisioned Concurrency, fan out across prefixes with Step Functions, and so on.
  6. +
  7. Skim Pitfalls the night before any interview or design review.
  8. +
+
+ +

System overview

+

Caller → handler → MinIO/S3 → manifest write-back. The async producer/consumer overlaps S3 LIST calls with presigning + JSONL writes, so the manifest streams to /tmp rather than buffering in memory.

+
+ System overview +
+
+ Real / live + Ephemeral / caveat + Lambda boundary + Pitfall +
+
+ + + + +
+

MENTAL MODEL

+

Lambda is a Linux process whose lifecycle is managed for you. Most of the surprise comes from forgetting that it's still a process.

+ +
+

What Lambda actually is

+

Each invocation runs inside an execution environment: a Firecracker microVM running the Lambda runtime (e.g. python3.13), with your code unpacked into /var/task and an ephemeral /tmp. AWS owns the VM; you own everything inside the process. The microVM is created on demand, kept warm for a while, then torn down when idle traffic stops feeding it. You don't pick a server, but there is a server, and it has memory, a clock, and a filesystem.

+ +

The two phases

+

Every cold start splits cleanly into two:

+
    +
  • Init phase — your module-level code runs once: imports, client construction, anything outside the handler function. Capped at 10 s. Billed at full configured memory. The os.environ reads at the top of lambda_function.py happen here.
  • +
  • Handler phasehandler(event, context) runs once per invocation. Billed per-millisecond at configured memory. Subsequent invocations on the same environment skip the init phase and go straight here.
  • +
+

This split is the single most useful thing to internalise. Heavy work at module level → pay it once per cold start. Heavy work inside the handler → pay it every invocation.

+ +

Globals persist across warm invocations

+

Anything assigned at module scope survives between handler calls on the same environment. That includes the boto3 client (good — connection reuse, TCP keep-alive, no re-handshake) and any in-memory cache you build (good — but be careful, see Pitfalls). It also includes mutations you didn't mean to keep, like a list you appended to without thinking. The same warm container can serve thousands of invocations in a row, then disappear.

+ +
# module level — runs once per cold start, reused across warm invocations
+BUCKET   = os.environ["BUCKET_NAME"]
+ENDPOINT = os.environ.get("S3_ENDPOINT_URL")
+
+# handler level — runs every invocation
+def handler(event, context):
+    return asyncio.run(_run())
+ +

/tmp is real but local

+

Each environment has its own /tmp (default 512 MB, configurable to 10 GB). It persists across warm invocations on that environment, so you can stash artefacts you'd rather not rebuild — but it is not shared between concurrent executions, and it's gone when the environment dies. lambda_function.py writes /tmp/<uuid>.jsonl per invocation and uploads it to S3 at the end; the file then becomes garbage, and the next invocation starts fresh.

+ +

Concurrency is horizontal

+

If two events arrive while one is being processed, AWS spins up a second execution environment. Each environment processes one invocation at a time, single-threaded relative to your handler. The "concurrency" you see in CloudWatch is the count of environments running in parallel. There is no thread pool to tune. There is no shared memory between environments. If you need shared state, externalise it (DynamoDB, Redis, S3).

+ +

The reuse window

+

Idle environments stick around for roughly 5–15 minutes (AWS doesn't promise a number) before being recycled. That's why a function that sees one request a minute almost never cold-starts, and a function that sees one a day always does. Cold Starts covers what that costs and how to mitigate it.

+
+ +

Lifecycle

+

Init is paid once, handler is paid every time. Freeze/thaw is free. Shutdown happens when nobody's looking.

+
+ Lambda execution environment lifecycle +
+
+ + + + +
+

LIMITS — CHEATSHEET

+

Every number worth memorising. The "why it matters" column is the part interviews actually probe.

+ +
+

Per-function compute & storage

+ + + + + + + + +
LimitDefaultMaxWhy it matters
Memory128 MB10 240 MBCPU scales linearly with memory. More memory ≠ just more headroom — at >1769 MB you get a full vCPU; at higher tiers, multiple. Often cheaper to bump memory because duration drops faster than cost rises.
Timeout3 s900 s (15 min)3 s default is too short for almost anything that talks to S3. Set explicitly; don't accept the default. API Gateway caps at 29 s no matter what your function says (see below).
Ephemeral storage (/tmp)512 MB10 240 MBPersists across warm invocations on the same env, vanishes on cold start. Not shared between concurrent envs. Pay per-invocation for >512 MB.
Init phase10 s hard capModule-level code (imports, client construction). Heavy ML model loads, custom JIT warmups — measure them or you'll trip this.
+ +

Payloads & responses

+ + + + + + + + + + +
LimitValueWhy it matters
Sync invocation request6 MBHard cap on the event body for RequestResponse invocations.
Sync invocation response6 MBTruncated silently above this — your handler "succeeds" but the caller gets a 413. lambda_function.py sidesteps this by returning a manifest URL instead of inlining all presigned URLs.
Async invocation event256 KBFor Event invocations and most event-source-mapped triggers (S3, EventBridge, SNS).
Response streaming20 MB (soft) / unlimited (with bandwidth cap)Function URLs and Lambda Streaming response mode break the 6 MB cap by flushing chunks. Not all clients/SDKs support it.
Environment variables4 KB totalPer function, all keys+values combined. Big config → Parameter Store / Secrets Manager.
Event size (SQS, SNS, EventBridge)256 KB eachProducer-side limit. Larger payloads → store in S3, send a pointer.
+ +

Packaging

+ + + + + + + + +
LimitValueWhy it matters
Zip upload (direct)50 MBAbove this you must upload via S3 first.
Zip unzipped (function + layers)250 MBTotal of /var/task + all layers extracted. aioboto3+deps is ~50 MB; you have headroom but not infinite.
Container image10 GBPer image. Preferred when you'd otherwise blow the 250 MB zip ceiling — e.g. ML deps with native binaries.
Layers5 per functionOrdering matters: later layers overwrite earlier. Layers count toward the 250 MB unzipped cap.
+ +

Concurrency & scaling

+ + + + + + + + +
LimitDefaultNotes
Account concurrent executions1 000 / regionSoft quota — request increase via Service Quotas. The single most common throttling cause in production.
Burst concurrency500–3 000 (region-dependent)How many fresh environments AWS will spin up immediately at traffic spike. Beyond this, scale-up is +500 envs / min.
Reserved concurrency0 to account quotaCarves a slice of the account pool for a function. Setting it to 0 effectively disables the function.
Provisioned concurrency0 by defaultPre-warmed envs. Eliminates cold starts at the cost of paying for idle capacity. Bills as PC-seconds + invocation cost.
+ +

Time & rate limits at the edges

+ + + + + + + + +
SurfaceLimitWhy it matters
API Gateway integration timeout29 sCaps your effective Lambda timeout when fronted by API GW, regardless of what the Lambda timeout says. Function URLs allow up to 15 min.
Async invocation event age6 hIf retries don't succeed in this window, the event is dropped (or sent to DLQ / on-failure destination).
Async retry attempts2 (default)Total of 3 attempts (initial + 2). Configurable down to 0.
SQS visibility timeout requirement≥ 6× function timeoutAWS recommendation. Otherwise messages reappear while still being processed.
+ +
+ Memorisation hack. Three numbers cover most interview questions: 15 minutes (timeout), 10 GB (memory and /tmp ceiling), 6 MB (sync payload). Everything else is a footnote until you hit a specific design. +
+
+
+ + + + +
+

COLD STARTS

+

Init Duration vs warm path. Mitigations: Provisioned Concurrency, arm64, lazy imports, smaller packages, SnapStart.

+
+ Cold vs warm timeline +
+ +
+

What triggers a cold start

+

A cold start happens whenever Lambda must create a new execution environment: the very first request after a deployment, when traffic spikes beyond the number of warm environments, and after an environment has been idle long enough to be recycled (typically 5–15 minutes, unspecified by AWS). Deployments always cold-start the incoming version — you can't avoid the first one, only reduce how long it takes.

+ +

The cold path

+

AWS provisions a Firecracker microVM, downloads and unpacks your code (or pulls the container image), starts the language runtime, then runs your module-level code. Only after all of that does your handler function get called. The timeline is roughly:

+
    +
  1. Environment provisioning — microVM boot, network attachment, filesystem mount. Not billed; AWS absorbs this.
  2. +
  3. Init phase — your module-level code: imports, client construction, config reads. Billed at full configured memory. Capped at 10 s.
  4. +
  5. Handler phasehandler(event, context) runs. Billed per-ms.
  6. +
+

CloudWatch shows this split: the REPORT line includes Init Duration only on cold invocations. Warm invocations have no Init Duration line.

+ +

Typical numbers

+ + + + + + + + + + +
RuntimeTypical cold start (p50)Typical cold start (p99)
Python 3.13 (zip, minimal deps)~150 ms~400 ms
Python 3.13 (zip, aioboto3 + aiofiles)~300 ms~700 ms
Node.js 22~100 ms~300 ms
Java 21 (without SnapStart)~1–2 s~3–5 s
Java 21 (SnapStart enabled)~200 ms~600 ms
Container image (any runtime)+100–300 msfirst pull can be 1–3 s
+ +

Mitigations

+

Provisioned Concurrency (PC) — pre-warms N environments so they're always in the "warm" state. Eliminates cold starts for the provisioned slots. You pay for those slots 24/7 even when idle. Use for latency-sensitive, predictable-traffic paths. Schedule PC changes via Application Auto Scaling for cost efficiency.

+

arm64 — Graviton2 executes the init phase ~10% faster than x86_64 for CPU-bound init work. Combined with the ~20% price reduction, arm64 is the default choice unless native wheels block you.

+

Smaller packages — Lambda downloads and unpacks your zip on every cold start. Trimming unused transitive dependencies (use pip install --no-deps audit or pipdeptree) and stripping test/doc files shaves real time. Every MB of extracted code costs a few ms.

+

Lazy imports — move rarely-used or slow imports inside the handler (or into a lazy-init guard). The most common win is heavy ML libraries only needed for inference: import them on first call, cache the result in a module-level variable.

+

SnapStart (Java only) — takes a snapshot of the initialised JVM state after your init phase, then restores from that snapshot on cold starts. Collapses 1–5 s JVM startup to ~200 ms. Not available for Python or Node.

+ +
+ When cold starts don't matter: batch jobs, async event pipelines, scheduled tasks — nobody is waiting on the p99. Only optimise cold starts when a human is waiting synchronously for the response. +
+
+
+ + + + +
+

CONCURRENCY

+

Account quota, reserved, provisioned. The "100 RPS × 200 ms" math.

+ +
+

The fundamental model

+

Lambda concurrency = the number of execution environments processing requests at the same instant. Each environment handles exactly one invocation at a time. There is no thread pool, no event loop shared across invocations — if two requests arrive simultaneously, AWS spins up two separate environments.

+

The key formula: concurrency ≈ RPS × average duration (in seconds). At 100 requests/s with a 200 ms average handler duration, you need 100 × 0.2 = 20 concurrent environments. At 500 ms average, you need 50. At 2 s average, 200 — and so on. Latency optimisation directly reduces your concurrency footprint.

+ +

Account concurrency pool

+

Every AWS account has a regional concurrency quota — default 1 000 concurrent executions per region, shared across all functions. When the pool is full, new invocations get throttled (sync → HTTP 429 TooManyRequestsException; async → queued and retried). Raising the limit requires a Service Quotas increase request; AWS typically grants up to 10 000 with a business justification.

+

This is the single most common production surprise: one function spikes and starves all others in the same region. Reserved concurrency is the fix.

+ +

Types of concurrency

+ + + + + + + +
TypeWhat it doesCostUse for
UnreservedDraws from the shared regional pool on demandInvocation + duration onlyMost functions
ReservedCarves a slice of the regional pool exclusively for this function; acts as both a floor and a ceilingNo extra chargeProtecting critical paths from noisy neighbours; throttling cost runaway
ProvisionedPre-warms N environments; they stay initialised 24/7PC-hours + invocationLatency-sensitive functions where cold starts are unacceptable
+ +

Reserved concurrency edge cases

+
    +
  • Setting reserved concurrency to 0 disables the function entirely — useful as a circuit breaker.
  • +
  • Reserved concurrency counts against the account pool even when idle. If you set 500 reserved on a function, only 500 remain for all other functions (at default 1 000).
  • +
  • Reserved concurrency does not pre-warm. You still cold-start; you just can't scale past the cap.
  • +
+ +

Burst scaling

+

When traffic spikes from zero, Lambda can spin up environments quickly — but not infinitely fast. The burst limit (region-dependent, typically 500–3 000 immediate) is how many environments AWS will create right now. Beyond that, it adds 500 new environments per minute. A spike from 0 to 5 000 concurrent requests takes several minutes to fully absorb. Provisioned Concurrency or pre-warming via a ping mechanism is the fix for sudden large spikes.

+ +
+ Interview answer template: "Concurrency = RPS × duration. Default pool is 1 000/region. Reserved carves a slice and prevents both starvation and runaway. Provisioned pre-warms to eliminate cold starts, but you pay for idle capacity." +
+
+
+ + + + +
+

TRIGGERS

+

Fan-in catalogue: API GW, Function URL, S3, SQS, SNS, EventBridge, DynamoDB streams, Kinesis, ALB, schedule, Step Functions.

+ +
+

Three invocation models

+

Every trigger falls into one of three models, and the model determines retry behaviour, error handling, and whether the caller can see the response.

+ + + + + + + + +
ModelCaller behaviourRetries on errorMax event size
SynchronousBlocks for response; gets result or error directlyNone — caller decides6 MB request + response
AsynchronousGets 202 immediately; Lambda queues + retries internally2 retries (3 total) over up to 6 h256 KB event
Poll-based (ESM)Lambda polls the source on your behalf; batches recordsKeeps retrying until success or record expires/goes to DLQDepends on source
+ +

Trigger catalogue

+ + + + + + + + + + + + + + + +
TriggerModelKey notes
API Gateway (REST / HTTP)Sync29 s integration timeout regardless of Lambda timeout. HTTP API is cheaper and lower-latency than REST API. Transforms request/response.
Function URLSyncDirect HTTPS endpoint on the function; no API Gateway layer. Supports up to 15 min timeout and response streaming. Simpler, cheaper, fewer features.
ALB (Application Load Balancer)SyncLike API GW but routes at L7; useful when Lambda is one target among EC2/ECS targets. 29 s timeout.
S3 event notificationAsyncFires on object create/delete/etc. At-least-once delivery. Large PUT creates exactly one event per object but notifications can duplicate. Common pattern: S3 → SNS → SQS → Lambda for fan-out + replay.
SNSAsyncFan-out: one message → multiple subscribers. At-least-once. Dead-letter queue on the subscription, not the topic.
EventBridge (CloudWatch Events)AsyncEvent bus with content-based routing rules. Also the managed scheduler (cron/rate expressions, timezone-aware since 2022). At-least-once.
SQSPoll-based (ESM)Lambda polls and batches (up to 10 000 msg). Standard: at-least-once, unordered. FIFO: ordered per message group, exactly-once with dedup. Visibility timeout must be ≥ 6× function timeout. Partial batch failure via batchItemFailures.
Kinesis Data StreamsPoll-based (ESM)One Lambda shard per stream shard. Records expire (24 h–1 yr); Lambda retries until success or expiry. Use bisect-on-error and batchItemFailures to avoid one bad record blocking an entire shard.
DynamoDB StreamsPoll-based (ESM)Captures item-level changes. Ordered per partition key. 24 h retention. Same retry behaviour as Kinesis. Use for CDC (change-data-capture) patterns.
Step FunctionsSync (Task state)Step Functions calls the function synchronously and waits for the result. Retries and timeouts are defined in the state machine, not Lambda. See the Step Functions section.
Cognito / SES / IoT etc.Sync or AsyncService-specific; check the docs for each. Cognito triggers (pre-signup, pre-token) are sync and block the auth flow.
+ +

Choosing between SQS and SNS+SQS

+

Use plain SQS → Lambda when you have one consumer and want to buffer, batch, and retry. Use SNS → SQS → Lambda when you need fan-out (multiple independent consumers each get a copy) or when the producer is an AWS service that speaks SNS natively (S3 event notifications, for example). The SNS layer decouples producers from the queue topology.

+
+
+ + + + +
+

IAM & PERMISSIONS

+

Execution role vs resource policy. The two policies most people confuse.

+ +
+

Two independent permission layers

+

Lambda has two separate permission surfaces that must each be correct independently. Confusing them is the most common "it works locally but not in AWS" failure.

+ + + + + + + +
LayerQuestion it answersWho creates it
Execution roleWhat can this Lambda function do once running? (call S3, write to DynamoDB, publish to SNS…)You — attached at function creation
Resource policyWho is allowed to invoke this Lambda function? (API Gateway, another account, EventBridge…)AWS adds it automatically for most triggers; you add it for cross-account or manual grants
+ +

Execution role

+

The execution role is an IAM role that Lambda assumes when running your function. Every Lambda must have one. The role's attached policies determine what AWS API calls the function can make. At minimum, every function needs:

+
# minimum: write its own logs
+logs:CreateLogGroup
+logs:CreateLogStream
+logs:PutLogEvents
+

Common additions for a function that reads/writes S3:

+
s3:GetObject
+s3:PutObject
+s3:ListBucket        # needed for paginator; often forgotten
+kms:Decrypt          # if the bucket uses a CMK, this is also required
+

The AWSLambdaBasicExecutionRole managed policy covers logs only — it is intentionally minimal. AWSLambdaVPCAccessExecutionRole adds the ENI permissions needed when the function is in a VPC.

+ +

Resource policy

+

The resource policy is attached to the Lambda function itself (not an IAM identity). When you add an S3 event notification or API Gateway integration in the console, AWS automatically adds a resource policy entry allowing that service to invoke the function. For cross-account invocations you add this manually via aws lambda add-permission.

+
# grant another account permission to invoke
+aws lambda add-permission \
+  --function-name my-function \
+  --principal 123456789012 \  # the other AWS account
+  --action lambda:InvokeFunction \
+  --statement-id cross-account-invoke
+ +

Common mistakes

+
    +
  • Missing s3:ListBucket on the bucket resource. ListObjectsV2 requires this on the bucket ARN (not the object ARN). Forgetting it causes AccessDenied on the paginator even when GetObject works fine.
  • +
  • Wrong resource ARN scope. s3:GetObject must be on arn:aws:s3:::bucket-name/*; s3:ListBucket must be on arn:aws:s3:::bucket-name. Swapping them is a frequent typo.
  • +
  • CMK not in execution role. KMS-encrypted bucket objects require both s3:GetObject and kms:Decrypt. The KMS key policy must also allow the role. Two separate policy documents, two separate denial points.
  • +
  • No resource policy for new trigger. If you wire up EventBridge manually (not via the console), the trigger silently fails because there's no resource policy entry granting EventBridge lambda:InvokeFunction.
  • +
+ +

Diagnosing permission errors

+

CloudTrail is the ground truth. Filter by errorCode: "AccessDenied" and userIdentity.arn matching the execution role ARN. The event tells you exactly which action on which resource was denied. CloudWatch will show the error in the Lambda log if you let the exception propagate, but CloudTrail shows it even when the call is made from a library that swallows the error.

+
+
+ + + + +
+

PACKAGING

+

Zip vs layers vs container images. arm64 vs x86_64. Native wheels.

+ +
+

Three deployment formats

+ + + + + + + + +
FormatSize limitBest forCaveats
Zip (direct)50 MB upload / 250 MB unzippedMost Python/Node functions with pure-Python or pre-built wheelsMust match Lambda's architecture; no custom runtime
Zip via S3250 MB unzippedSame as above but when zip exceeds 50 MBS3 bucket must be in the same region
Layers250 MB total (function + all layers)Shared dependencies across functions (e.g. a company-wide logging layer)Max 5 layers per function; later layers overwrite earlier ones
Container image10 GBML models, native binary deps, custom runtimesSlower first cold start (image pull); larger attack surface
+ +

Layers in practice

+

A layer is a zip file that Lambda extracts into /opt before running your function. Your code in /var/task can import from /opt/python (for Python) without any path manipulation. Use cases:

+
    +
  • Shared internal libraries deployed independently of business logic
  • +
  • Large dependencies that change rarely (numpy, pandas) — cache them in a layer so deployments of the business logic are fast
  • +
  • AWS-provided layers: Lambda Insights extension, X-Ray SDK
  • +
+

Layers count toward the 250 MB unzipped limit. If you have 5 layers at 40 MB each and your function zip is 50 MB, you're at 250 MB — no room left.

+ +

Container images

+

Container images must be based on AWS-provided base images (public.ecr.aws/lambda/python:3.13) or implement the Lambda Runtime Interface. They must be stored in ECR (Elastic Container Registry) in the same region. The Lambda service caches images on the underlying host after the first pull, so subsequent cold starts on the same host are fast — but the very first invocation after a new image is deployed can be slow for large images.

+

Container images bypass the 250 MB unzipped limit, which is why they're the standard choice for Python ML workloads that bundle PyTorch or TensorFlow.

+ +

arm64 vs x86_64

+

Graviton2-based arm64 is ~20% cheaper per GB-second than x86_64 and typically faster at compute-heavy work. The decision tree:

+
    +
  1. Check all your dependencies for arm64 wheels: pip download --platform manylinux2014_aarch64 --only-binary :all: -r requirements.txt. If any fail, you either build from source (needs Dockerfile) or stay on x86.
  2. +
  3. For pure-Python deps and most modern packages, arm64 works out of the box.
  4. +
  5. Native extensions (cryptography, numpy, psycopg2) have arm64 wheels on PyPI since ~2022. Check the exact version you need.
  6. +
+ +

Building for Lambda (the common foot-gun)

+

Lambda runs on Amazon Linux 2023. pip install on macOS produces wheels compiled for macOS, which will segfault or import-error on Lambda. The correct approach:

+
# build inside the Lambda runtime image
+docker run --rm \
+  -v "$PWD":/var/task \
+  public.ecr.aws/lambda/python:3.13 \
+  pip install -r requirements.txt -t python/
+
+zip -r layer.zip python/
+

This is also where architecture matters: use the :3.13-arm64 tag when building for arm64.

+ +
+ This project uses a zip deployment. aioboto3 and aiofiles are pure-Python and have no native extensions, so they build cleanly on any architecture. The Makefile's install target creates a local .venv for development; a real CI pipeline would build the deployment zip inside the Lambda image. +
+
+
+ + + + +
+

VPC & NETWORKING

+

When to put Lambda in a VPC (rarely). ENI cold start cost. NAT money pit.

+ +
+

Default: no VPC

+

By default, Lambda runs in an AWS-managed network with internet access. It can reach S3, DynamoDB, SQS, and other AWS services via their public endpoints. Do not put Lambda in a VPC unless you have a specific reason. Most applications don't need it.

+ +

When you actually need VPC

+
    +
  • Connecting to RDS or Aurora (which live in a private subnet)
  • +
  • ElastiCache (Redis/Memcached) — VPC-only by design
  • +
  • Private REST APIs or internal services on private subnets
  • +
  • Compliance requirements mandating network isolation
  • +
+

S3, DynamoDB, SQS, SNS, and most AWS managed services do not require VPC placement — they're public services with public endpoints.

+ +

ENI attachment and cold start

+

When Lambda is VPC-attached, each execution environment gets an Elastic Network Interface (ENI) in your VPC. Pre-2019, ENIs were allocated per cold start, adding 10–30 s to init. AWS fixed this in 2019 with hyperplane ENIs shared across environments — today the VPC cold start penalty is ~100–500 ms on the first cold start of a new deployment, then negligible. It's no longer the dealbreaker it used to be, but it's not zero.

+ +

Subnet and AZ placement

+

Specify at least two subnets in different AZs for availability. Lambda will distribute environments across AZs. If a subnet runs out of available ENI slots (IP exhaustion), Lambda scaling fails — size subnets with this in mind. /24 (254 IPs) is often too small for high-concurrency functions.

+ +

The NAT money pit

+

VPC Lambda can't reach the internet by default. If your function needs to call an external API or reach an AWS service without a VPC endpoint, you need a NAT gateway in a public subnet. NAT gateways cost:

+
    +
  • $0.045/hour (~$32/month) just to exist, per AZ
  • +
  • $0.045/GB of data processed
  • +
+

A function that sends 100 GB/month through NAT costs $4.50 in data alone, on top of the always-on hourly charge. Two AZs for HA = ~$64/month base cost before a single byte of traffic. This is frequently the largest unexpected cost in VPC Lambda setups.

+ +

VPC endpoints: the free alternative

+

For AWS services, VPC endpoints bypass NAT and the public internet entirely. Two types:

+
    +
  • Gateway endpoints — S3 and DynamoDB only. Free. Route table entries. No data charge.
  • +
  • Interface endpoints (PrivateLink) — any AWS service. $0.01/AZ/hr + $0.01/GB. Expensive for high throughput but often cheaper than NAT for AWS-service-heavy workloads.
  • +
+

For a VPC Lambda that only talks to S3 and DynamoDB: create gateway endpoints for both → no NAT needed → near-zero networking cost.

+ +

Security groups

+

VPC Lambda gets a security group. Outbound rules control where it can connect. The security group of RDS/ElastiCache must allow inbound from the Lambda security group. A common pattern is to create a dedicated Lambda SG and reference it in the database SG's inbound rules — this avoids IP-range rules that break when Lambda ENIs change.

+
+
+ + + + +
+

OBSERVABILITY

+

CloudWatch logs, structured JSON, X-Ray, Lambda Insights, EMF. Brief Prometheus/Grafana orientation.

+ +
+

CloudWatch Logs — what you get for free

+

Every Lambda function automatically writes to a CloudWatch Log Group named /aws/lambda/<function-name>. Each execution environment gets its own Log Stream. Lambda writes two special lines automatically:

+
START RequestId: abc-123 Version: $LATEST
+END RequestId: abc-123
+REPORT RequestId: abc-123  Duration: 312.45 ms  Billed Duration: 313 ms
+        Memory Size: 256 MB  Max Memory Used: 89 MB
+        Init Duration: 423.12 ms   # only on cold starts
+

The REPORT line is your free performance telemetry. Init Duration appears only on cold invocations. Max Memory Used helps right-size memory configuration.

+

Retention: Default is "Never Expire." Set it explicitly — 7, 14, or 30 days covers most needs. Every MB of retained logs costs money.

+ +

Structured logging

+

Emit JSON instead of plain strings. CloudWatch Logs Insights can filter and aggregate JSON fields efficiently; plain strings require regex and are slow. Example:

+
import json, logging
+logger = logging.getLogger()
+logger.setLevel(logging.INFO)
+
+def handler(event, context):
+    logger.info(json.dumps({
+        "event": "pdf_scan_start",
+        "bucket": BUCKET,
+        "prefix": PREFIX,
+        "request_id": context.aws_request_id,
+    }))
+

With this, Logs Insights can run: filter event = "pdf_scan_start" | stats count() by bin(5m) in seconds.

+ +

X-Ray tracing

+

X-Ray gives you request traces across services — how long the Lambda itself ran vs how long S3 calls took. Three things must all be true:

+
    +
  1. Tracing enabled on the function — console toggle or TracingConfig: Active in SAM/CDK
  2. +
  3. X-Ray SDK instrumented in your codefrom aws_xray_sdk.core import patch_all; patch_all() wraps boto3 calls automatically
  4. +
  5. IAM permission — execution role needs xray:PutTraceSegments and xray:PutTelemetryRecords
  6. +
+

Without all three, traces are either absent or incomplete. People flip one and conclude X-Ray is broken.

+ +

Lambda Insights

+

Lambda Insights is a CloudWatch feature (not a separate service) that surfaces system-level metrics: CPU usage, memory utilisation, network I/O, disk I/O — things the REPORT line doesn't include. To enable it:

+
    +
  • Add the Lambda Insights extension layer (arn:aws:lambda:<region>:580247275435:layer:LambdaInsightsExtension:38)
  • +
  • Add cloudwatch:PutMetricData to the execution role
  • +
+

It's useful when you suspect memory or CPU contention but the REPORT line's "Max Memory Used" isn't granular enough.

+ +

EMF — Embedded Metrics Format

+

EMF lets you emit custom CloudWatch metrics by writing structured JSON to stdout. No PutMetricData API call needed — the Lambda runtime parses the log line and publishes the metric asynchronously. This is far more efficient than calling CloudWatch from inside the handler (which adds latency + cost per invocation).

+
import json
+
+def emit_metric(name, value, unit="Count", **dims):
+    print(json.dumps({
+        "_aws": {
+            "Timestamp": int(time.time() * 1000),
+            "CloudWatchMetrics": [{
+                "Namespace": "MyApp",
+                "Dimensions": [list(dims.keys())],
+                "Metrics": [{"Name": name, "Unit": unit}]
+            }]
+        },
+        name: value,
+        **dims,
+    }))
+
+# usage
+emit_metric("PDFsProcessed", count, Unit="Count", Function="pdf-scanner")
+ +

Prometheus & Grafana (brief)

+

Prometheus uses a pull model — it scrapes HTTP endpoints. Lambda functions are ephemeral and have no persistent HTTP endpoint, so Prometheus can't scrape them directly. Approaches:

+
    +
  • EMF → CloudWatch → Grafana CloudWatch plugin — easiest; Grafana queries CW as a data source
  • +
  • Amazon Managed Prometheus (AMP) + remote_write — Lambda pushes metrics to AMP via the Prometheus remote write API; Grafana (or Amazon Managed Grafana) reads from AMP
  • +
  • Statsd/push gateway — Lambda pushes to a persistent push gateway; Prometheus scrapes the gateway. More infra to manage.
  • +
+

For Lambda-centric dashboards, the CloudWatch → Grafana path is usually the simplest to operate.

+
+
+ + + + +
+

ASYNC & ERRORS

+

Sync vs async invoke. Retries, DLQ, destinations, idempotency, partial-batch failures.

+ +
+

Sync vs async invocation

+ + + + + + + + + + +
Synchronous (RequestResponse)Asynchronous (Event)
Caller blocks?Yes — waits for resultNo — gets 202 immediately
Response visible to caller?YesNo
Retries on errorNone (caller's responsibility)2 retries = 3 total attempts
Retry backoff~1 min then ~2 min
Event age limit6 hours
Max event size6 MB256 KB
+ +

Async retry flow

+

When Lambda invokes asynchronously and the function throws an unhandled exception (or is throttled), Lambda retries automatically — twice, with exponential backoff starting at ~1 minute. If all three attempts fail, or if the event ages past 6 hours, Lambda sends the event to the configured failure destination or DLQ. If neither is configured, the event is silently dropped.

+ +

DLQ vs Destinations

+

These are two different mechanisms that overlap in purpose but have different capabilities:

+ + + + + + + + +
Dead-Letter Queue (DLQ)Event Destinations
Introduced2016 (legacy)2019 (preferred)
Triggers onFailure onlySuccess or failure (separate configs)
PayloadThe original event onlyOriginal event + result/error + metadata
TargetsSQS or SNSSQS, SNS, Lambda, EventBridge
+

Use Destinations for new code. DLQ remains useful when the downstream consumer must be SQS and you don't need success notifications.

+ +

Idempotency

+

Because async invocations retry and most event sources are at-least-once, your handler will occasionally execute more than once for the same logical event. Design handlers to be idempotent — the same input produces the same outcome regardless of how many times it runs.

+

Standard pattern: use a unique key from the event (S3 ETag + key, SQS MessageId, EventBridge detail.id) as a deduplication key. On first execution, write the key + result to DynamoDB with a TTL. On retry, check DynamoDB first — if already processed, return the cached result without re-running the work.

+
# pseudo-code
+dedup_key = event["Records"][0]["messageId"]
+existing = table.get_item(Key={"id": dedup_key})
+if existing.get("Item"):
+    return existing["Item"]["result"]
+
+result = do_the_work(event)
+table.put_item(Item={"id": dedup_key, "result": result, "ttl": now + 86400})
+return result
+

AWS PowerTools for Lambda (Python) has a built-in @idempotent decorator that implements this pattern with DynamoDB.

+ +

Partial batch failures (SQS / Kinesis / DynamoDB Streams)

+

When Lambda processes a batch of records and one record fails, the default behaviour differs by source:

+
    +
  • SQS (default): if the handler raises an exception, the entire batch is retried. One bad message blocks all others and can cause infinite retry loops.
  • +
  • With ReportBatchItemFailures enabled: return a batchItemFailures list containing only the failed message IDs. Lambda re-queues only those; successful messages are deleted.
  • +
+
def handler(event, context):
+    failures = []
+    for record in event["Records"]:
+        try:
+            process(record)
+        except Exception:
+            failures.append({"itemIdentifier": record["messageId"]})
+    return {"batchItemFailures": failures}
+

Enable ReportBatchItemFailures in the ESM configuration and always implement partial-batch failure reporting for SQS and Kinesis handlers. A single poison-pill record can otherwise block an entire shard or queue indefinitely.

+ +
+ The idempotency–partial-batch intersection: with partial failures, successful records in the batch are deleted from SQS, but if your function crashes before returning the failure list, the entire batch including the successes gets retried. Idempotency guards must still cover every record, not just the ones in batchItemFailures. +
+
+
+ + + + +
+

STEP FUNCTIONS

+

When Lambda alone isn't enough. Standard vs Express. Map state for fan-out. Comparison with Airflow.

+ +
+

When Lambda alone isn't enough

+

A single Lambda function works well for one discrete task. Problems start when you need to chain multiple tasks, retry selectively, wait on human approval, or fan out across thousands of items. Doing this with Lambda alone means writing orchestration logic inside your functions — tracking state, implementing retry delays, deciding what "done" means. Step Functions externalises that orchestration into a state machine where every state transition is durable, auditable, and resumable.

+

Reach for Step Functions when you need: sequential steps with state passing, conditional branching, parallel fan-out with join, wait states longer than 15 minutes, or retry-with-exponential-backoff built in.

+ +

Standard vs Express workflows

+ + + + + + + + + +
StandardExpress
Max duration1 year5 minutes
Execution semanticsExactly-once per stateAt-least-once
Execution historyFull audit trail in AWS consoleCloudWatch Logs only
Pricing$0.025 per 1 000 state transitions$0.00001 per state transition + duration
Use forLong-running business workflows, human approvals, compliance audit trailsHigh-volume, short-duration event processing (IoT, streaming)
+

For most application orchestration, Standard is the right choice — the exactly-once semantic matters when steps have side effects (charging a card, sending an email). Express is for high-throughput pipelines where at-least-once is acceptable and cost per transition is a concern.

+ +

Map state for fan-out

+

The Map state runs the same workflow branch for every item in an array, in parallel. This is the core fan-out primitive. For this project's use case, a Step Functions version could fan out across S3 prefixes — run one Lambda per prefix, collect results in a fan-in step:

+
{
+  "Type": "Map",
+  "ItemsPath": "$.prefixes",
+  "MaxConcurrency": 10,       // cap parallelism
+  "Iterator": {
+    "StartAt": "ScanPrefix",
+    "States": {
+      "ScanPrefix": {
+        "Type": "Task",
+        "Resource": "arn:aws:lambda:...:function:pdf-scanner",
+        "End": true
+      }
+    }
+  }
+}
+

MaxConcurrency: 0 means unlimited — bounded only by the Lambda concurrency pool. Set an explicit cap to avoid saturating the account concurrency quota.

+ +

Other useful states

+
    +
  • Wait — pause for a duration or until a timestamp. The only way to implement delays longer than 15 minutes without polling.
  • +
  • Choice — conditional branching on input values. Replaces if/else logic that would otherwise live inside a Lambda.
  • +
  • Parallel — run multiple independent branches simultaneously and join their results.
  • +
  • Task (SDK integrations) — Step Functions can call DynamoDB, SQS, ECS, Glue, etc. directly without a Lambda wrapper, reducing cost and latency for simple operations.
  • +
+ +

Step Functions vs Airflow

+ + + + + + + + + + +
Step FunctionsApache Airflow (MWAA)
DAG definitionJSON/YAML state machine (ASL)Python code (DAG files)
SchedulingEvent-driven / on-demand; cron via EventBridgeBuilt-in rich scheduler (cron, data-interval-aware)
BackfillManual / customFirst-class, built-in
OperatorsAWS services + Lambda (AWS ecosystem only)600+ providers: Spark, BigQuery, dbt, Kubernetes…
InfrastructureServerless — zero infraManaged Airflow (MWAA) starts at ~$400/month
DebuggingConsole execution graph; CloudWatch for logsAirflow UI with task logs, Gantt charts, retries
+

Step Functions is the right choice when your workflow is AWS-native, event-driven, and you want zero infrastructure. Airflow is the right choice when you need complex scheduling, data-interval backfill, cross-cloud operators, or a data-engineering team that already knows Python DAGs.

+
+
+ + + + +
+

COST

+

Pricing model, memory/cost trade-off, x86 vs arm64, free tier, common surprises.

+ +
+

The pricing formula

+

Lambda billing has two components, both permanent free tiers included:

+ + + + + + +
Componentx86_64arm64Free tier (permanent)
Requests$0.20 / 1M$0.20 / 1M1M / month
Duration$0.0000166667 / GB-s$0.0000133334 / GB-s400 000 GB-s / month
+

GB-seconds = memory configured (GB) × duration (seconds). A 512 MB function running for 300 ms = 0.5 × 0.3 = 0.15 GB-s. At 1 million invocations, that's 150 000 GB-s — well inside the free tier.

+

Duration is billed in 1 ms increments. The old 100 ms minimum is gone (removed in 2020).

+ +

Memory vs cost: more can be cheaper

+

CPU scales linearly with memory. A function configured at 1 769 MB gets a full vCPU; below that it's a fraction. Doubling memory often more than halves duration for CPU-bound work, which means the total GB-s cost stays the same or decreases — while latency drops.

+

AWS Lambda Power Tuning is a Step Functions state machine that automatically benchmarks your function at multiple memory sizes and produces a cost/performance curve. Run it before guessing at the right memory setting. The optimal point is almost never the default 128 MB.

+ +

arm64 saves ~20%

+

arm64 duration pricing is 20% cheaper than x86. Same request price. If your function is compute-bound (not I/O-bound sleeping on S3 calls), arm64 also runs faster, compounding the saving. For I/O-bound functions (like lambda_function.py, which spends most of its time waiting on S3), the duration difference is smaller but the 20% price reduction still applies.

+ +

Provisioned Concurrency billing

+

PC is billed separately: $0.0000097222 per GB-s of provisioned time (x86) — even when idle. If you have 10 × 512 MB environments provisioned for 24 hours: 10 × 0.5 GB × 86 400 s = 432 000 GB-s/day = ~$4.20/day = ~$126/month just for the warm slots, before counting actual invocation cost on top. PC is for latency, not cost — it always increases your bill.

+ +

Hidden costs (the real bill)

+
    +
  • NAT Gateway — $0.045/hr per AZ (~$32/month) + $0.045/GB data. Often the largest line item for VPC Lambda.
  • +
  • API Gateway — REST API: $3.50/1M calls. HTTP API: $1/1M. Can dwarf Lambda cost at high RPS.
  • +
  • CloudWatch Logs — $0.50/GB ingestion + $0.03/GB storage/month. Verbose Lambda logs accumulate fast; set retention.
  • +
  • Lambda Insights — additional CW Logs + custom metrics charges.
  • +
  • X-Ray — $5/million traces (after free 100K/month).
  • +
  • Data transfer — traffic leaving a region or going through a NAT has per-GB charges.
  • +
  • S3 API calls — LIST and GET requests are billed per 1 000. A function that does 10 000 LIST calls/invocation at 1M invocations = 10B API calls = real money.
  • +
+ +
+ For this project's function: at 1 000 invocations/day with 500 ms average duration and 256 MB memory, cost is ~$0.002/day — essentially free. Lambda's economics only require attention above ~100K invocations/day with non-trivial memory or duration. +
+
+
+ + + + +
+

LOCAL DEV

+

SAM CLI, Lambda RIE, LocalStack, MinIO — when to reach for which.

+ +
+

The local dev problem

+

Lambda has no local runtime by default. Your only loop without tooling is: zip, upload, invoke, read CloudWatch logs, repeat — minutes per cycle. The tools below collapse that to seconds, with different trade-offs between fidelity, setup cost, and scope.

+ +

SAM CLI

+

What it is: AWS's official local Lambda emulator. Wraps Docker to run your function inside a container that matches the Lambda runtime environment exactly. Also emulates API Gateway.

+

Commands:

+
sam local invoke -e event.json          # invoke once
+sam local start-api                       # spin up local HTTP API gateway
+sam local invoke --debug-port 5858       # attach debugger
+

Fidelity: high — same Amazon Linux image, same runtime, same filesystem layout. Catches architecture issues (x86 wheel on arm64) that a plain venv misses.

+

Downsides: requires Docker, slow to start (pulls image on first run), no MinIO/SQS/DynamoDB emulation built in. You wire those up separately.

+ +

Lambda Runtime Interface Emulator (RIE)

+

A lightweight binary embedded in all AWS-provided Lambda base images. When you run the image locally, RIE exposes a local HTTP endpoint that accepts invocations in the Lambda API format. You don't need SAM CLI — just Docker:

+
docker build -t my-fn .
+docker run -p 9000:8080 my-fn
+curl -XPOST http://localhost:9000/2015-03-31/functions/function/invocations \
+  -d '{"key": "value"}'
+

Use RIE when you're building container-image Lambdas and want to test them without SAM overhead.

+ +

LocalStack

+

A full AWS mock that emulates Lambda, S3, SQS, DynamoDB, API Gateway, and dozens more services in a single container. Community edition is free; Pro ($35/month) adds more services and persistent state.

+

When to use: integration tests that span multiple AWS services (e.g. an EventBridge rule that triggers a Lambda that writes to DynamoDB). Without LocalStack you'd need a real AWS account for these tests.

+

When to avoid: if you only need one service (just S3 → use MinIO; just Lambda → use SAM/RIE). LocalStack's Lambda emulation has occasional edge-case differences from the real runtime.

+
docker run --rm -p 4566:4566 localstack/localstack
+AWS_DEFAULT_REGION=us-east-1 \
+  AWS_ACCESS_KEY_ID=test \
+  AWS_SECRET_ACCESS_KEY=test \
+  aws --endpoint-url=http://localhost:4566 s3 ls
+ +

MinIO (this project)

+

MinIO is an S3-compatible object store that runs locally in Docker. It implements the S3 API precisely enough that boto3/aioboto3 needs only an endpoint_url override to work against it. It is not a Lambda emulator — it replaces S3 only.

+
make up           # starts MinIO on :9000 (API) and :9001 (console)
+SOURCE_DIR=~/pdfs make seed   # uploads PDFs to MinIO
+make invoke       # runs lambda_function.py against MinIO via invoke.py
+

This is the lightest possible local setup: no Docker-in-Docker, no SAM overhead, minimal latency. The function handler runs in your local Python process against a real S3-compatible store. Differences from real Lambda (no execution environment lifecycle, no /tmp isolation between runs) are acceptable for the development loop but not for environment-fidelity tests.

+ +

Decision matrix

+ + + + + + + + + +
NeedReach for
Fast iteration on handler logicMinIO + python invoke.py (this project's setup)
Emulate Lambda runtime + API Gateway locallySAM CLI
Test a container-image LambdaLambda RIE via Docker
Integration test across multiple AWS servicesLocalStack
Full-fidelity staging before prodReal AWS account, separate environment
+
+
+ + + + +
+

CI/CD

+

Aliases, versions, traffic shifting, blue/green. Plain CLI → SAM → CDK → Terraform.

+ +
+

Versions and aliases

+

Versions are immutable snapshots of a function's code and configuration. When you publish a version (aws lambda publish-version), AWS creates an immutable ARN like arn:…:function:my-fn:7. $LATEST is the only mutable version — always reflects the most recent code upload.

+

Aliases are named pointers to a version. prod might point to version 7; staging might point to version 8. Event source mappings, API Gateway integrations, and Step Functions tasks should target aliases, not version ARNs — this decouples deployment (publishing a new version) from promotion (updating the alias).

+ +

Traffic shifting (blue/green)

+

An alias can split traffic across two versions with weighted routing:

+
aws lambda update-alias \
+  --function-name my-fn \
+  --name prod \
+  --function-version 8 \
+  --routing-config AdditionalVersionWeights={"7"=0.9}
+# result: 10% of prod traffic goes to v8, 90% still to v7
+

Start at 10% canary, watch error rates in CloudWatch, shift to 50%, then 100%. Rollback is instant: point the alias back to the stable version. No instance drain, no connection draining — Lambda is stateless, cutover is atomic.

+ +

CodeDeploy integration

+

SAM and CDK can wire up CodeDeploy for automatic traffic shifting with automatic rollback on CloudWatch alarms. You declare the deployment preference in the template:

+
# SAM template.yaml
+DeploymentPreference:
+  Type: Canary10Percent5Minutes   # 10% for 5 min, then 100%
+  Alarms:
+    - !Ref ErrorRateAlarm          # rolls back if alarm triggers
+

CodeDeploy manages the alias weight changes and calls the rollback if the alarm fires — fully automated blue/green without manual traffic management.

+ +

Deployment tooling progression

+ + + + + + + + + +
ToolGood forCaveats
AWS CLI / SDKOne-off deployments, scripting, deep controlVerbose; no state management; drift-prone at scale
SAM (CloudFormation extension)Lambda-first projects; built-in local testing; CodeDeploy integrationCloudFormation speed; YAML verbosity; AWS-only
CDKComplex infra in TypeScript/Python; reusable constructs; type safetyStill compiles to CloudFormation; learning curve; bootstrapping required
Terraform (AWS provider)Multi-cloud orgs; large existing Terraform estate; strong community modulesNo built-in Lambda local testing; plan/apply cycle slower than SAM deploy
Serverless FrameworkMulti-cloud serverless; plugin ecosystemV3 → V4 became paid for teams; community plugins vary in quality
+ +

CI pipeline skeleton

+
# GitHub Actions example
+jobs:
+  deploy:
+    steps:
+      - uses: actions/checkout@v4
+      - name: Build zip
+        run: |
+          docker run --rm -v $PWD:/var/task \
+            public.ecr.aws/lambda/python:3.13 \
+            pip install -r requirements.txt -t package/
+          cd package && zip -r ../function.zip . && cd ..
+          zip function.zip lambda_function.py
+      - name: Deploy
+        run: |
+          aws lambda update-function-code \
+            --function-name my-fn --zip-file fileb://function.zip
+          aws lambda wait function-updated --function-name my-fn
+          aws lambda publish-version --function-name my-fn
+          aws lambda update-alias --function-name my-fn \
+            --name prod --function-version $VERSION
+

The wait function-updated call is important — update-function-code is asynchronous and publish-version must wait for it to complete.

+
+
+ + + + +
+

PITFALLS — THE MUST-KNOWS

+

The list to skim before the next interview or design review. Each item has bitten someone in production.

+ +
+ +

Execution model

+
    +
  1. Module-level state leaks across invocations. A list you append to in the handler grows forever on warm calls. A counter you increment is wrong by the second request. If it's mutable and lives at module scope, treat it as either a deliberate cache or a bug.
  2. +
  3. Handler globals are shared by every invocation on that env, but not across envs. "I cached the result" works locally; in production half your traffic gets the cached value, the other half doesn't, depending on which warm container they hit. Externalise (Redis, DynamoDB) or accept the variance.
  4. +
  5. /tmp is per-environment, not per-invocation. If you write /tmp/output.json with a fixed name, the next warm invocation finds yesterday's file. Always use a per-invocation suffix (UUID, request ID).
  6. +
  7. Init phase has a hard 10 s cap. If you import TensorFlow, hydrate a 500 MB model, or do a network call at module scope, you can blow this budget on cold start. Defer expensive work until first handler call (lazy init), or move it to a layer that ships pre-warmed.
  8. +
  9. Async asyncio.run in a sync handler creates a fresh event loop per invocation. Acceptable, but means async clients can't be shared across invocations the way sync boto3 clients can. Profile before assuming async is faster.
  10. +
+ +

Payload & size limits

+
    +
  1. 6 MB sync response cap is silent. Returning a JSON list of 50 000 items "works" in the function but the API GW caller gets 413. The fix in lambda_function.py — return a presigned URL to a manifest file rather than the full list — is the standard pattern.
  2. +
  3. API Gateway caps integration time at 29 s. Doesn't matter if your Lambda timeout is 15 minutes. For longer work, return a job ID and poll, or use Function URLs (15 min) with response streaming.
  4. +
  5. Environment variables max 4 KB total. Big secrets (RSA keys, JSON config blobs) blow this. Parameter Store / Secrets Manager and read on init.
  6. +
+ +

Concurrency & throttling

+
    +
  1. Default account concurrency is 1 000 per region. Most teams hit this before they realise. Sets a hard ceiling on RPS — at 100 ms latency, that's 10 000 RPS account-wide; at 1 s, 1 000 RPS.
  2. +
  3. Reserved concurrency = 0 disables the function. Looks weird, used as a circuit breaker.
  4. +
  5. Provisioned concurrency double-bills. You pay for the warm slots and for invocations against them. Worth it for latency-sensitive paths; wasteful for batch.
  6. +
  7. Burst limit is regional and finite. A traffic spike from 0 to 5 000 RPS will throttle until AWS scales up at +500 envs/min. Provisioned concurrency or pre-warming is the fix.
  8. +
+ +

Triggers, retries, idempotency

+
    +
  1. Async invocation retries 2 times by default. Total 3 attempts. If your handler isn't idempotent, you can charge a card three times.
  2. +
  3. S3, SNS, EventBridge invoke async — at-least-once. Plan for duplicates. SQS standard is also at-least-once. SQS FIFO and Kinesis are exactly-once-ish per shard but with their own quirks.
  4. +
  5. SQS visibility timeout must be ≥ 6× function timeout. Otherwise the message comes back while you're still processing it, and you do the work twice (or more).
  6. +
  7. Partial batch failures need explicit signalling. Returning batchItemFailures for SQS/Kinesis tells AWS which records to retry; otherwise the entire batch retries or none does.
  8. +
  9. API Gateway error responses are JSON-shaped if you don't say otherwise. Throw an unhandled exception and the client sees {"errorMessage": "...", "errorType": "..."} with status 502. Map errors yourself.
  10. +
+ +

Networking, IAM, observability

+
    +
  1. Putting Lambda in a VPC adds an ENI cold-start penalty (improved a lot in 2019, but still real for first invocation). Only do it if you genuinely need private-subnet resources. Outbound internet from VPC Lambda needs NAT, which costs money 24/7.
  2. +
  3. S3 access from a VPC Lambda needs a VPC gateway endpoint or NAT. Without one, your S3 calls hang and time out — looks like a code bug, isn't.
  4. +
  5. CloudWatch log groups default to "Never expire" retention. Verbose Lambdas can rack up real cost in CW Logs alone — set retention (7/14/30 days) on every log group you create.
  6. +
  7. Lambda execution role is implicit on every action. Forgetting s3:GetObject or kms:Decrypt on the bucket's CMK is the most common "but it works locally" failure. CloudTrail tells you what was denied.
  8. +
  9. Resource policy vs execution role are different layers. Resource policy says "who can invoke this Lambda"; execution role says "what this Lambda can do". Both must allow.
  10. +
  11. X-Ray needs an SDK call and tracing enabled on the function and IAM permission. Three switches. People flip one and conclude X-Ray is broken.
  12. +
+ +

Deployment, dependencies, runtimes

+
    +
  1. The boto3 in the Python runtime lags pip. If you need a recent API (e.g. new S3 features), bundle current boto3 in your zip. The runtime version is "good enough" for stable APIs, "sometimes wrong" for fresh ones.
  2. +
  3. Native wheels must match Lambda's runtime architecture. pip install on a Mac and zip-uploading cryptography is a classic foot-gun. Build in a Docker image matching public.ecr.aws/lambda/python:3.13.
  4. +
  5. arm64 saves ~20 % at the same memory but some wheels are still x86-only. Audit your deps before flipping the architecture.
  6. +
  7. Layers are merge-ordered; later layers overwrite earlier. A "base" layer for your shared dependencies works; conflicting layers silently shadow each other.
  8. +
  9. Container-image deploys are cached on the Lambda host. First cold start can be slow (image pull); subsequent are normal. Keep images small even though the limit is 10 GB.
  10. +
+ +

Time, scheduling, secrets

+
    +
  1. EventBridge schedule (cron/rate) is always UTC. "9 AM" in your local time means something different in production. Use the new EventBridge Scheduler (2022) for time-zone-aware schedules.
  2. +
  3. Async invocations have a 6-hour event age. If retries fail past that, the event is silently dropped unless you've set a DLQ or on-failure destination.
  4. +
  5. Secrets in env vars are visible to anyone with lambda:GetFunctionConfiguration. Encrypted at rest, plaintext in the console. Use Secrets Manager / Parameter Store for actual secrets.
  6. +
+ +
+ Skim test: if you can re-state the cold-start split (Init / Handler), the 6 MB / 256 KB / 4 KB / 250 MB / 10 GB constants, and the difference between resource policy and execution role from memory, you'll handle most "tell me about Lambda" interview questions. +
+
+
+ + + + +
+

ADJACENT

+

Brief orientation on AWS Glue and Prometheus/Grafana — the secondary gaps from the interview.

+ +
+

AWS Glue

+

Glue is a managed Spark-based ETL service. Lambda and Glue solve different problems:

+ + + + + + + + + + +
LambdaGlue
Runtime modelServerless; up to 15 min; one handler at a time per envManaged Spark cluster; hours-long jobs; distributed compute
Data scaleUp to a few GB comfortablyTB to PB natively
LanguagePython, Node, Java, Go, custom runtimePySpark, Scala; Glue Studio for no-code
Startup timeMilliseconds (warm)1–2 minutes to provision Spark cluster
Cost modelPer request + per msPer DPU-hour (1 DPU = $0.44/hr); 10-minute minimum billing
Use forLight transforms, event reactions, API backendsLarge-scale joins, aggregations, schema inference on data lake
+

Key Glue concepts to know: DynamicFrame (Glue's DataFrame variant with schema flexibility), Glue Catalog (centralised metadata store for table schemas — also used by Athena), Job Bookmarks (Glue tracks processed S3 partitions to avoid reprocessing on incremental runs).

+

The decision is usually straightforward: if the data fits in Lambda's memory and the job finishes in under 15 minutes, use Lambda. If you're joining multiple large S3 datasets or transforming daily partition files, use Glue.

+ +

Prometheus

+

Prometheus is a pull-based time-series metrics system. It scrapes HTTP /metrics endpoints on a schedule. The fundamental tension with Lambda: Lambda functions are ephemeral — there's no persistent HTTP endpoint to scrape, and the function may be at zero concurrency between invocations.

+

Options for Lambda → Prometheus:

+
    +
  • EMF → CloudWatch → Grafana CloudWatch plugin — no Prometheus involved. Grafana reads directly from CloudWatch. Easiest for AWS-native stacks.
  • +
  • Remote write to Amazon Managed Prometheus (AMP) — the function pushes metrics to AMP via the Prometheus remote_write API at the end of each invocation. Grafana or Amazon Managed Grafana reads from AMP. Requires the prometheus_client library and SIGV4 signing on the remote_write request.
  • +
  • Push gateway — a persistent intermediate that Lambda pushes to; Prometheus scrapes the gateway. More infrastructure to manage, stale metric risk if the push gateway isn't flushed between invocations.
  • +
+ +

Grafana

+

Grafana is a dashboarding layer — it doesn't store data, it queries data sources. Relevant data sources for Lambda observability:

+
    +
  • CloudWatch — built-in Grafana plugin; queries CW Metrics and CW Logs Insights. Zero extra infrastructure. The standard choice for Lambda metrics (invocations, errors, duration, throttles, concurrent executions).
  • +
  • Amazon Managed Prometheus — query via PromQL if you've pushed custom metrics.
  • +
  • Amazon Managed Grafana (AMG) — Grafana-as-a-service; integrates with AWS IAM; auto-discovers CW namespaces. Avoids self-hosting Grafana.
  • +
+

For a Lambda-only stack with no existing Prometheus investment, the practical answer is: use EMF for custom metrics, use CloudWatch for the built-in Lambda metrics, and connect Grafana to CloudWatch. It requires no extra infrastructure and gives you dashboards in an hour.

+
+
+ + + + +
+

LABS

+

Hands-on walkthroughs that modify the existing app. Each mutates what you already have — no throw-away exercises.

+ +
+ +

Lab 0 — Local sandbox (start here)

+

Goal: run the full stack locally against MinIO with real PDFs.

+
    +
  1. make install — creates .venv and installs deps
  2. +
  3. make up — starts MinIO on :9000 (API) and :9001 (console)
  4. +
  5. SOURCE_DIR=~/path/to/pdfs make seed — uploads PDFs to MinIO bucket
  6. +
  7. make invoke — runs invoke.py which calls handler() with a minimal event
  8. +
  9. Open http://localhost:9001 (minioadmin/minioadmin) and find the generated manifest in the manifests/ prefix
  10. +
+

What you can break: set PREFIX to a non-existent prefix and observe the handler returns count=0. Set QUEUE_MAX=1 and observe the backpressure on the producer. Remove S3_ENDPOINT_URL and watch it fail to connect.

+ +

Lab 1 — Deploy to real AWS

+

Goal: package and deploy the function to AWS Lambda, invoke it against a real S3 bucket.

+
    +
  1. Create an S3 bucket and upload sample PDFs to 2026/04/ prefix
  2. +
  3. Create an IAM execution role with s3:GetObject, s3:PutObject, s3:ListBucket, and logs:*
  4. +
  5. Build the deployment zip inside the Lambda image:
    docker run --rm -v $PWD:/var/task public.ecr.aws/lambda/python:3.13 pip install -r requirements.txt -t package/
  6. +
  7. Create the function: aws lambda create-function --handler lambda_function.handler …
  8. +
  9. Invoke: aws lambda invoke --function-name pdf-scanner --payload '{}' out.json
  10. +
  11. Verify the manifest appeared in S3 and the presigned URL works
  12. +
+

What you can break: invoke without s3:ListBucket on the bucket (not the object ARN) — observe AccessDenied. Watch CloudTrail to see the denied call.

+ +

Lab 2 — Add an S3 trigger

+

Goal: make the function fire automatically when a PDF is uploaded.

+
    +
  1. Add a resource policy entry granting S3 lambda:InvokeFunction
  2. +
  3. Configure an S3 event notification on the bucket for s3:ObjectCreated:* filtered to *.pdf
  4. +
  5. Upload a PDF and check CloudWatch Logs for the invocation
  6. +
  7. Notice the event structure differs from the manual invoke — update the handler to extract the key from event["Records"][0]["s3"]["object"]["key"]
  8. +
+

What you can break: upload a non-PDF to the same prefix and verify the filter prevents invocation. Remove the resource policy and verify the trigger silently stops firing (no error to the uploader — this is the async invocation model).

+ +

Lab 3 — Switch to arm64

+

Goal: migrate to Graviton2 and verify 20% cost reduction.

+
    +
  1. Rebuild the zip using the arm64 Lambda image: public.ecr.aws/lambda/python:3.13-arm64
  2. +
  3. Update the function architecture: aws lambda update-function-configuration --architectures arm64
  4. +
  5. Update the function code with the arm64 zip
  6. +
  7. Invoke and compare REPORT duration and billed duration in CloudWatch
  8. +
+

What you can break: try deploying the x86 zip against the arm64 architecture — the function will import-error on any C-extension wheels.

+ +

Lab 4 — Enable Provisioned Concurrency

+

Goal: eliminate cold starts on the production alias.

+
    +
  1. Publish version 1: aws lambda publish-version --function-name pdf-scanner
  2. +
  3. Create alias prod pointing to version 1
  4. +
  5. Enable PC: aws lambda put-provisioned-concurrency-config --function-name pdf-scanner --qualifier prod --provisioned-concurrent-executions 2
  6. +
  7. Invoke via the alias ARN and confirm Init Duration is absent from REPORT lines
  8. +
  9. Check your AWS bill after 1 hour — note the PC charges
  10. +
+ +

Lab 5 — Add X-Ray tracing

+

Goal: see a trace with S3 subsegments in the X-Ray console.

+
    +
  1. Add aws-xray-sdk to requirements.txt and rebuild the zip
  2. +
  3. Add to lambda_function.py: from aws_xray_sdk.core import patch_all; patch_all()
  4. +
  5. Enable active tracing on the function and add X-Ray permissions to the execution role
  6. +
  7. Invoke and open X-Ray → Traces in the console — verify S3 list_objects_v2 and generate_presigned_url appear as subsegments
  8. +
+ +

Lab 6 — Fan out with Step Functions

+

Goal: process multiple S3 prefixes in parallel using a Map state.

+
    +
  1. Update the handler to accept a prefix key in the event (instead of reading from env var)
  2. +
  3. Create a Step Functions state machine with a Map state that iterates over a list of prefixes and invokes the Lambda for each
  4. +
  5. Start an execution with input: {"prefixes": ["2026/01/", "2026/02/", "2026/03/"]}
  6. +
  7. Observe parallel Lambda invocations in the execution graph and CloudWatch
  8. +
  9. Add error handling: configure the Map state to catch Lambda errors and continue rather than fail the whole execution
  10. +
+
+
+ + + + +
+

REPOSITORY

+

Tree of eth/ — the sandbox plus this study site.

+ +
+
+eth/ +├── lambda_function.py — handler: async PDF scan → presigned URLs → JSONL manifest +├── invoke.py — local runner: calls handler() with a minimal event, prints result +├── seed.py — uploads PDFs from a local directory to MinIO +├── requirements.txt — aioboto3, aiofiles (+ transitive: aiobotocore, botocore…) +├── docker-compose.yml — runs MinIO on :9000 (S3 API) and :9001 (web console) +├── Makefile — install / up / down / seed / invoke / graphs / docs +├── def/ +│ └── task.md — original interview exercise specification +└── docs/ + ├── index.html — this study site (single-page, no build step) + ├── viewer.html — pan/zoom SVG viewer (opened by graph links) + └── graphs/ + ├── system_overview.dot / .svg — caller → handler → MinIO/S3 → manifest + ├── lifecycle.dot / .svg — init / handler / freeze / thaw / shutdown + └── cold_warm_timeline.dot / .svg — cold vs warm invocation timeline +
+
+ +
+ +

What the function does, end to end

+

The function lists every PDF inside an S3 prefix. For each one, it generates a presigned download URL that expires in 15 minutes. It writes those (key, URL) pairs into a JSONL file in /tmp as it goes. When the listing is done, it uploads the JSONL to S3 as a manifest, generates one more presigned URL pointing to the manifest itself, deletes the local file, and returns the manifest URL plus the count.

+

The use case: you want to ship a batch of files to someone who isn't on your AWS account. Send them one URL. They open it, get back a list of links, every link works for 15 minutes, then everything dies.

+ +

Imports and module-scope config

+
import asyncio, json, os, uuid
+import aioboto3
+import aiofiles
+
+BUCKET   = os.environ.get("BUCKET_NAME", "my-company-reports-bucket")
+PREFIX   = os.environ.get("PREFIX", "2026/04/")
+EXPIRY   = int(os.environ.get("URL_EXPIRY_SECONDS", "900"))
+ENDPOINT = os.environ.get("S3_ENDPOINT_URL") or None
+QUEUE_MAX = int(os.environ.get("QUEUE_MAX", "2000"))
+_DONE = object()
+

Five environment reads at module scope — init phase. They run once per cold start and every warm invocation reuses them for free. ENDPOINT is the MinIO trick: on real Lambda the var is unset, value is None, aioboto3 talks to real S3. Locally, set it to http://localhost:9000 and the same code talks to MinIO with no other changes. _DONE is a sentinel: an object() instance whose identity is unique and can't collide with any real S3 key — comparing with is (not ==) is unambiguous.

+ +

The handler — minimal on purpose

+
def handler(event, context):
+    result = asyncio.run(_run())
+    return {"statusCode": 200, "body": json.dumps(result)}
+

Sync because Lambda's contract is sync. asyncio.run opens a fresh event loop per invocation — means async clients can't be shared across invocations the way sync boto3 clients could, which is why the S3 client lives inside _run. The API-Gateway response shape is a habit: harmless for direct invoke, required if you later front this with API Gateway.

+

Why async at all? Lambda bills per millisecond of wall-clock time. Anything you can overlap, you save money on. S3 LIST calls overlap with presigning and file writes. That overlap directly reduces duration and cost.

+ +

_run() — the actual work

+
async def _run():
+    session = aioboto3.Session()
+    async with session.client("s3", endpoint_url=ENDPOINT) as s3:
+        queue = asyncio.Queue(maxsize=QUEUE_MAX)
+        manifest_path = f"/tmp/{uuid.uuid4()}.jsonl"
+

Session created inside _run (not module scope) because aioboto3 async clients are tied to the event loop — and each invocation gets a fresh loop. The queue bound gives backpressure: when full, await queue.put(...) blocks until the consumer takes something off. Without the bound, a million-file bucket would OOM before the first URL is presigned. UUID in the manifest path prevents collision between back-to-back warm invocations sharing the same /tmp.

+ +

The producer

+
        async def producer():
+            paginator = s3.get_paginator("list_objects_v2")
+            async for page in paginator.paginate(Bucket=BUCKET, Prefix=PREFIX):
+                for obj in page.get("Contents", []) or []:
+                    key = obj["Key"]
+                    if key.lower().endswith(".pdf"):
+                        await queue.put(key)
+            await queue.put(_DONE)
+

Defined as a closure inside _run — captures s3 and queue without arguments; signals it's a private implementation detail. The paginator transparently fetches subsequent pages (S3 returns ≤1000 per page). await queue.put(key) blocks when the queue is full — that's the backpressure. After all pages, it puts _DONE to signal the consumer to stop (asyncio.Queue has no close method; the sentinel is the standard pattern).

+ +

The consumer

+
        async def consumer():
+            count = 0
+            async with aiofiles.open(manifest_path, "w") as f:
+                while True:
+                    item = await queue.get()
+                    if item is _DONE:
+                        break
+                    url = await s3.generate_presigned_url(
+                        "get_object",
+                        Params={"Bucket": BUCKET, "Key": item},
+                        ExpiresIn=EXPIRY,
+                    )
+                    await f.write(json.dumps({"key": item, "url": url}) + "\n")
+                    count += 1
+            return count
+

Same closure pattern. generate_presigned_url is a local computation — no network call. It uses your credentials, bucket, key, and expiry to produce a signed URL deterministically. Fast. JSONL (one JSON object per line) instead of a JSON array because it streams: write one line at a time without buffering the whole array, read one line at a time. Stays usable even at gigabyte scale.

+ +

Running them together

+
        prod_task = asyncio.create_task(producer())
+        count = await consumer()
+        await prod_task
+

create_task schedules the producer on the event loop and returns immediately — producer runs in the background. await consumer() runs in the foreground until it sees the sentinel. await prod_task makes the guarantee explicit and propagates any producer exceptions. The overlap: while S3 prepares the next LIST page (network), the consumer presigns and writes the previous page. Sequential would stack list latency + presign latency. Async pays only the larger of the two.

+ +

Upload, presign, clean up

+
        manifest_key = f"manifests/{uuid.uuid4()}.jsonl"
+        body = await (aiofiles.open(manifest_path, "rb")).__aenter__() ...
+        await s3.put_object(Bucket=BUCKET, Key=manifest_key, Body=body,
+                            ContentType="application/x-ndjson")
+        manifest_url = await s3.generate_presigned_url("get_object", ...)
+        os.unlink(manifest_path)
+        return {"count": count, "manifest_key": manifest_key, "manifest_url": manifest_url}
+

put_object over upload_file because aioboto3's async multipart handling is simpler this way for files in the KB–MB range. Content type application/x-ndjson is the registered MIME for newline-delimited JSON. os.unlink is required — /tmp persists across warm invocations; a thousand runs without cleanup would fill it and crash the next.

+ +

Why this design?

+
    +
  • Presigned URLs, not raw data. Recipient needs no AWS account. URL expires automatically. No egress from Lambda.
  • +
  • Manifest in S3, not inline. The 6 MB sync response cap is silent — function succeeds, caller gets 413 with no warning. Manifest in S3 has no upper bound.
  • +
  • Bounded queue. Backpressure prevents producer from outrunning consumer and exhausting memory regardless of bucket size.
  • +
  • Sentinel _DONE = object(). asyncio.Queue has no close. An object() instance can't collide with any S3 key; is comparison is unambiguous.
  • +
  • Nested functions as closures. Capture s3, queue, manifest_path from the enclosing scope without arguments. Scope is explicit — nobody outside _run can call them.
  • +
  • UUID in /tmp. /tmp persists across warm invocations. Fixed filename = race condition between back-to-back runs on the same environment.
  • +
+ +

Cold start vs warm — CloudWatch REPORT line

+
# Cold start
+REPORT RequestId: ...  Duration: 312.45 ms  Billed Duration: 313 ms
+       Memory Size: 256 MB  Max Memory Used: 89 MB
+       Init Duration: 423.12 ms
+
+# Warm (next invocation within ~30 s)
+REPORT RequestId: ...  Duration: 287.91 ms  Billed Duration: 288 ms
+       Memory Size: 256 MB  Max Memory Used: 91 MB
+

Init Duration ~400 ms covers importing aioboto3 → aiobotocore → botocore (heavy chain). No Init Duration on warm runs — saved ~30 ms. For a function that runs once a day every invocation is cold. For one that runs every few seconds, init is irrelevant.

+ +

What happens if it times out

+

Default timeout is 3 s — too short. Set it explicitly to 30–60 s for a small prefix, up to 900 s (15 min) for large ones. On timeout, Lambda kills the process. The /tmp file may not have been deleted; the manifest may not have been uploaded. Re-running produces a fresh manifest with new UUIDs — no dedup, so two manifests for the same job can coexist in S3. If "exactly one manifest per job" is required, add a DynamoDB dedup table keyed on request ID.

+ +

How would you scale this

+

Fan out by prefix. Wrap in a Step Functions Map state. Pass a list of prefixes; each iteration runs one Lambda for one prefix. MaxConcurrency controls parallelism without saturating the account concurrency quota.

+

Go event-driven. Subscribe to S3 ObjectCreated filtered to *.pdf. The function fires once per upload, handles one file at a time — no producer/consumer needed. Simpler, but semantically different: "process new files as they arrive" vs "scan the existing bucket."

+ +

What I'd change before production

+
    +
  1. Move BUCKET and PREFIX to the event payload. Currently set at deploy time (one function per prefix). Event-driven config lets one function serve many prefixes.
  2. +
  3. Structured logging. JSON to stdout with request_id, bucket, prefix, count. Logs Insights can aggregate without regex.
  4. +
  5. EMF metric for count. Free CloudWatch metric, no additional API call. Dashboard "PDFs processed per invocation" over time.
  6. +
  7. Producer error handling. If paginator.paginate raises, the producer task fails but the consumer blocks on queue.get() forever — function times out. Wrap producer body in try/finally that always puts _DONE so the consumer exits cleanly.
  8. +
  9. Explicit timeout on queue.get(). asyncio.wait_for(queue.get(), timeout=X) prevents the consumer hanging indefinitely if the producer dies without putting the sentinel.
  10. +
  11. Consider sync boto3. aioboto3 adds ~200 ms to the cold start. If cold start matters and file counts are small, sync boto3 with threading is simpler and starts faster. Async pays off only when file counts are large enough that overlap is significant.
  12. +
+ +

Makefile targets

+ + + + + + + + + + + + +
TargetWhat it does
make installCreates .venv, installs requirements.txt
make upStarts MinIO via docker compose up -d
make downStops MinIO (keeps volumes)
make cleanStops MinIO and deletes volumes (wipes bucket data)
SOURCE_DIR=path make seedUploads all files from path to MinIO
make invokeRuns invoke.py (calls handler() directly)
make graphsRenders all docs/graphs/*.dot.svg via Graphviz dot
make docsRenders graphs then opens docs/index.html
+
+
+ +
+ +
+ + + + + diff --git a/docs/lambdas-md/lambda-01-overview.md b/docs/lambdas-md/lambda-01-overview.md new file mode 100644 index 0000000..ab1d795 --- /dev/null +++ b/docs/lambdas-md/lambda-01-overview.md @@ -0,0 +1,30 @@ +# Overview + +> A study site built on top of a working Lambda + MinIO sandbox. Read the page, run the code, break things on purpose. + +## What this is + +The repo at the root of this site (`ethics/`) holds a Python AWS Lambda function — `lambda_function.py` — that lists PDFs in an S3 bucket under a prefix, paginates, generates 15-minute presigned URLs, and writes a JSONL manifest. It runs locally against MinIO via `docker compose`, with the same handler signature as a real Lambda. This site explains the surrounding mental model in the order you'd want to study it before walking into a Lambda-heavy interview or production rotation. + +## How it's organised + +The sidebar groups topics into four reading orders. **Foundations** is the picture in your head. **Operating** covers the day-to-day knobs. **Production** covers what changes when real users and real money are involved. **Reference** holds the must-know checklist ([Pitfalls](lambda-16-pitfalls.md)), brief orientations on adjacent tools ([Glue, Prometheus/Grafana](lambda-17-adjacent.md)), the hands-on labs ([Labs](lambda-18-labs.md)), and the repo tree ([Repository](lambda-19-repository.md)). + +## How to use it + +1. **Read top-to-bottom** — the order in the sidebar is the recommended study path. +2. **Run the sandbox.** `make install && make up && SOURCE_DIR= make seed && make invoke`. The handler executes locally against MinIO; you can break it without burning AWS credit. +3. **Do the labs.** Each one mutates the existing app: deploy to real AWS, add an S3 trigger, switch to arm64, enable Provisioned Concurrency, fan out across prefixes with Step Functions, and so on. +4. **Skim Pitfalls** the night before any interview or design review. + +## System overview + +> Caller → handler → MinIO/S3 → manifest write-back. The async producer/consumer overlaps S3 LIST calls with presigning + JSONL writes, so the manifest streams to `/tmp` rather than buffering in memory. + +![System overview](lambda-system_overview.svg) + +**Legend:** +- 🟢 Real / live +- 🟡 Ephemeral / caveat +- 🔵 Lambda boundary +- 🔴 Pitfall diff --git a/docs/lambdas-md/lambda-02-mental-model.md b/docs/lambdas-md/lambda-02-mental-model.md new file mode 100644 index 0000000..e4c098f --- /dev/null +++ b/docs/lambdas-md/lambda-02-mental-model.md @@ -0,0 +1,48 @@ +# Mental Model + +> Lambda is a Linux process whose lifecycle is managed for you. Most of the surprise comes from forgetting that it's still a process. + +## What Lambda actually is + +Each invocation runs inside an **execution environment**: a Firecracker microVM running the Lambda runtime (e.g. `python3.13`), with your code unpacked into `/var/task` and an ephemeral `/tmp`. AWS owns the VM; you own everything inside the process. The microVM is created on demand, kept warm for a while, then torn down when idle traffic stops feeding it. You don't pick a server, but there *is* a server, and it has memory, a clock, and a filesystem. + +## The two phases + +Every cold start splits cleanly into two: + +- **Init phase** — your module-level code runs once: imports, client construction, anything outside the handler function. Capped at 10 s. Billed at full configured memory. The `os.environ` reads at the top of `lambda_function.py` happen here. +- **Handler phase** — `handler(event, context)` runs once per invocation. Billed per-millisecond at configured memory. Subsequent invocations on the same environment skip the init phase and go straight here. + +This split is the single most useful thing to internalise. Heavy work at module level → pay it once per cold start. Heavy work inside the handler → pay it every invocation. + +## Globals persist across warm invocations + +Anything assigned at module scope survives between handler calls on the same environment. That includes the boto3 client (good — connection reuse, TCP keep-alive, no re-handshake) and any in-memory cache you build (good — but be careful, see Pitfalls). It also includes mutations you didn't mean to keep, like a list you appended to without thinking. The same warm container can serve thousands of invocations in a row, then disappear. + +```python +# module level — runs once per cold start, reused across warm invocations +BUCKET = os.environ["BUCKET_NAME"] +ENDPOINT = os.environ.get("S3_ENDPOINT_URL") + +# handler level — runs every invocation +def handler(event, context): + return asyncio.run(_run()) +``` + +## /tmp is real but local + +Each environment has its own `/tmp` (default 512 MB, configurable to 10 GB). It persists across warm invocations on that environment, so you can stash artefacts you'd rather not rebuild — but it is **not** shared between concurrent executions, and it's gone when the environment dies. `lambda_function.py` writes `/tmp/.jsonl` per invocation and uploads it to S3 at the end; the file then becomes garbage, and the next invocation starts fresh. + +## Concurrency is horizontal + +If two events arrive while one is being processed, AWS spins up a second execution environment. Each environment processes one invocation at a time, single-threaded relative to your handler. The "concurrency" you see in CloudWatch is the count of environments running in parallel. There is no thread pool to tune. There is no shared memory between environments. If you need shared state, externalise it (DynamoDB, Redis, S3). + +## The reuse window + +Idle environments stick around for roughly 5–15 minutes (AWS doesn't promise a number) before being recycled. That's why a function that sees one request a minute almost never cold-starts, and a function that sees one a day always does. [Cold Starts](lambda-04-cold-starts.md) covers what that costs and how to mitigate it. + +## Lifecycle + +> Init is paid once, handler is paid every time. Freeze/thaw is free. Shutdown happens when nobody's looking. + +![Lambda execution environment lifecycle](lambda-lifecycle.svg) diff --git a/docs/lambdas-md/lambda-03-limits.md b/docs/lambdas-md/lambda-03-limits.md new file mode 100644 index 0000000..cef36aa --- /dev/null +++ b/docs/lambdas-md/lambda-03-limits.md @@ -0,0 +1,52 @@ +# Limits — Cheatsheet + +> Every number worth memorising. The "why it matters" column is the part interviews actually probe. + +## Per-function compute & storage + +| Limit | Default | Max | Why it matters | +|-------|---------|-----|----------------| +| Memory | 128 MB | 10 240 MB | CPU scales linearly with memory. More memory ≠ just more headroom — at >1769 MB you get a full vCPU; at higher tiers, multiple. Often *cheaper* to bump memory because duration drops faster than cost rises. | +| Timeout | 3 s | 900 s (15 min) | 3 s default is too short for almost anything that talks to S3. Set explicitly; don't accept the default. API Gateway caps at 29 s no matter what your function says (see below). | +| Ephemeral storage (/tmp) | 512 MB | 10 240 MB | Persists across warm invocations on the same env, vanishes on cold start. Not shared between concurrent envs. Pay per-invocation for >512 MB. | +| Init phase | 10 s hard cap | 10 s hard cap | Module-level code (imports, client construction). Heavy ML model loads, custom JIT warmups — measure them or you'll trip this. | + +## Payloads & responses + +| Limit | Value | Why it matters | +|-------|-------|----------------| +| Sync invocation request | 6 MB | Hard cap on the event body for `RequestResponse` invocations. | +| Sync invocation response | 6 MB | Truncated silently above this — your handler "succeeds" but the caller gets a 413. `lambda_function.py` sidesteps this by returning a manifest URL instead of inlining all presigned URLs. | +| Async invocation event | 256 KB | For `Event` invocations and most event-source-mapped triggers (S3, EventBridge, SNS). | +| Response streaming | 20 MB (soft) / unlimited (with bandwidth cap) | Function URLs and Lambda Streaming response mode break the 6 MB cap by flushing chunks. Not all clients/SDKs support it. | +| Environment variables | 4 KB total | Per function, all keys+values combined. Big config → Parameter Store / Secrets Manager. | +| Event size (SQS, SNS, EventBridge) | 256 KB each | Producer-side limit. Larger payloads → store in S3, send a pointer. | + +## Packaging + +| Limit | Value | Why it matters | +|-------|-------|----------------| +| Zip upload (direct) | 50 MB | Above this you must upload via S3 first. | +| Zip unzipped (function + layers) | 250 MB | Total of `/var/task` + all layers extracted. `aioboto3`+deps is ~50 MB; you have headroom but not infinite. | +| Container image | 10 GB | Per image. Preferred when you'd otherwise blow the 250 MB zip ceiling — e.g. ML deps with native binaries. | +| Layers | 5 per function | Ordering matters: later layers overwrite earlier. Layers count toward the 250 MB unzipped cap. | + +## Concurrency & scaling + +| Limit | Default | Notes | +|-------|---------|-------| +| Account concurrent executions | 1 000 / region | Soft quota — request increase via Service Quotas. The single most common throttling cause in production. | +| Burst concurrency | 500–3 000 (region-dependent) | How many fresh environments AWS will spin up immediately at traffic spike. Beyond this, scale-up is +500 envs / min. | +| Reserved concurrency | 0 to account quota | Carves a slice of the account pool for a function. Setting it to 0 effectively disables the function. | +| Provisioned concurrency | 0 by default | Pre-warmed envs. Eliminates cold starts at the cost of paying for idle capacity. Bills as PC-seconds + invocation cost. | + +## Time & rate limits at the edges + +| Surface | Limit | Why it matters | +|---------|-------|----------------| +| API Gateway integration timeout | 29 s | Caps your effective Lambda timeout when fronted by API GW, regardless of what the Lambda timeout says. Function URLs allow up to 15 min. | +| Async invocation event age | 6 h | If retries don't succeed in this window, the event is dropped (or sent to DLQ / on-failure destination). | +| Async retry attempts | 2 (default) | Total of 3 attempts (initial + 2). Configurable down to 0. | +| SQS visibility timeout requirement | ≥ 6× function timeout | AWS recommendation. Otherwise messages reappear while still being processed. | + +> **Memorisation hack.** Three numbers cover most interview questions: **15 minutes** (timeout), **10 GB** (memory and /tmp ceiling), **6 MB** (sync payload). Everything else is a footnote until you hit a specific design. diff --git a/docs/lambdas-md/lambda-04-cold-starts.md b/docs/lambdas-md/lambda-04-cold-starts.md new file mode 100644 index 0000000..345b4e8 --- /dev/null +++ b/docs/lambdas-md/lambda-04-cold-starts.md @@ -0,0 +1,44 @@ +# Cold Starts + +> Init Duration vs warm path. Mitigations: Provisioned Concurrency, arm64, lazy imports, smaller packages, SnapStart. + +![Cold vs warm timeline](lambda-cold_warm_timeline.svg) + +## What triggers a cold start + +A cold start happens whenever Lambda must create a new execution environment: the very first request after a deployment, when traffic spikes beyond the number of warm environments, and after an environment has been idle long enough to be recycled (typically 5–15 minutes, unspecified by AWS). Deployments always cold-start the incoming version — you can't avoid the first one, only reduce how long it takes. + +## The cold path + +AWS provisions a Firecracker microVM, downloads and unpacks your code (or pulls the container image), starts the language runtime, then runs your module-level code. Only after all of that does your handler function get called. The timeline is roughly: + +1. **Environment provisioning** — microVM boot, network attachment, filesystem mount. Not billed; AWS absorbs this. +2. **Init phase** — your module-level code: imports, client construction, config reads. Billed at full configured memory. Capped at 10 s. +3. **Handler phase** — `handler(event, context)` runs. Billed per-ms. + +CloudWatch shows this split: the `REPORT` line includes `Init Duration` only on cold invocations. Warm invocations have no `Init Duration` line. + +## Typical numbers + +| Runtime | Typical cold start (p50) | Typical cold start (p99) | +|---------|--------------------------|--------------------------| +| Python 3.13 (zip, minimal deps) | ~150 ms | ~400 ms | +| Python 3.13 (zip, aioboto3 + aiofiles) | ~300 ms | ~700 ms | +| Node.js 22 | ~100 ms | ~300 ms | +| Java 21 (without SnapStart) | ~1–2 s | ~3–5 s | +| Java 21 (SnapStart enabled) | ~200 ms | ~600 ms | +| Container image (any runtime) | +100–300 ms | first pull can be 1–3 s | + +## Mitigations + +**Provisioned Concurrency (PC)** — pre-warms N environments so they're always in the "warm" state. Eliminates cold starts for the provisioned slots. You pay for those slots 24/7 even when idle. Use for latency-sensitive, predictable-traffic paths. Schedule PC changes via Application Auto Scaling for cost efficiency. + +**arm64** — Graviton2 executes the init phase ~10% faster than x86_64 for CPU-bound init work. Combined with the ~20% price reduction, arm64 is the default choice unless native wheels block you. + +**Smaller packages** — Lambda downloads and unpacks your zip on every cold start. Trimming unused transitive dependencies (use `pip install --no-deps` audit or `pipdeptree`) and stripping test/doc files shaves real time. Every MB of extracted code costs a few ms. + +**Lazy imports** — move rarely-used or slow imports inside the handler (or into a lazy-init guard). The most common win is heavy ML libraries only needed for inference: import them on first call, cache the result in a module-level variable. + +**SnapStart (Java only)** — takes a snapshot of the initialised JVM state after your init phase, then restores from that snapshot on cold starts. Collapses 1–5 s JVM startup to ~200 ms. Not available for Python or Node. + +> **When cold starts don't matter:** batch jobs, async event pipelines, scheduled tasks — nobody is waiting on the p99. Only optimise cold starts when a human is waiting synchronously for the response. diff --git a/docs/lambdas-md/lambda-05-concurrency.md b/docs/lambdas-md/lambda-05-concurrency.md new file mode 100644 index 0000000..5f0fe9d --- /dev/null +++ b/docs/lambdas-md/lambda-05-concurrency.md @@ -0,0 +1,35 @@ +# Concurrency + +> Account quota, reserved, provisioned. The "100 RPS × 200 ms" math. + +## The fundamental model + +Lambda concurrency = the number of execution environments processing requests at the same instant. Each environment handles exactly one invocation at a time. There is no thread pool, no event loop shared across invocations — if two requests arrive simultaneously, AWS spins up two separate environments. + +The key formula: **concurrency ≈ RPS × average duration (in seconds)**. At 100 requests/s with a 200 ms average handler duration, you need 100 × 0.2 = **20 concurrent environments**. At 500 ms average, you need 50. At 2 s average, 200 — and so on. Latency optimisation directly reduces your concurrency footprint. + +## Account concurrency pool + +Every AWS account has a regional concurrency quota — default **1 000 concurrent executions** per region, shared across all functions. When the pool is full, new invocations get throttled (sync → HTTP 429 TooManyRequestsException; async → queued and retried). Raising the limit requires a Service Quotas increase request; AWS typically grants up to 10 000 with a business justification. + +This is the single most common production surprise: one function spikes and starves all others in the same region. Reserved concurrency is the fix. + +## Types of concurrency + +| Type | What it does | Cost | Use for | +|------|--------------|------|---------| +| **Unreserved** | Draws from the shared regional pool on demand | Invocation + duration only | Most functions | +| **Reserved** | Carves a slice of the regional pool exclusively for this function; acts as both a floor and a ceiling | No extra charge | Protecting critical paths from noisy neighbours; throttling cost runaway | +| **Provisioned** | Pre-warms N environments; they stay initialised 24/7 | PC-hours + invocation | Latency-sensitive functions where cold starts are unacceptable | + +## Reserved concurrency edge cases + +- Setting reserved concurrency to **0** disables the function entirely — useful as a circuit breaker. +- Reserved concurrency counts against the account pool even when idle. If you set 500 reserved on a function, only 500 remain for all other functions (at default 1 000). +- Reserved concurrency does **not** pre-warm. You still cold-start; you just can't scale past the cap. + +## Burst scaling + +When traffic spikes from zero, Lambda can spin up environments quickly — but not infinitely fast. The burst limit (region-dependent, typically 500–3 000 immediate) is how many environments AWS will create right now. Beyond that, it adds **500 new environments per minute**. A spike from 0 to 5 000 concurrent requests takes several minutes to fully absorb. Provisioned Concurrency or pre-warming via a ping mechanism is the fix for sudden large spikes. + +> **Interview answer template:** "Concurrency = RPS × duration. Default pool is 1 000/region. Reserved carves a slice and prevents both starvation and runaway. Provisioned pre-warms to eliminate cold starts, but you pay for idle capacity." diff --git a/docs/lambdas-md/lambda-06-triggers.md b/docs/lambdas-md/lambda-06-triggers.md new file mode 100644 index 0000000..ebe0f90 --- /dev/null +++ b/docs/lambdas-md/lambda-06-triggers.md @@ -0,0 +1,33 @@ +# Triggers + +> Fan-in catalogue: API GW, Function URL, S3, SQS, SNS, EventBridge, DynamoDB streams, Kinesis, ALB, schedule, Step Functions. + +## Three invocation models + +Every trigger falls into one of three models, and the model determines retry behaviour, error handling, and whether the caller can see the response. + +| Model | Caller behaviour | Retries on error | Max event size | +|-------|------------------|------------------|----------------| +| **Synchronous** | Blocks for response; gets result or error directly | None — caller decides | 6 MB request + response | +| **Asynchronous** | Gets 202 immediately; Lambda queues + retries internally | 2 retries (3 total) over up to 6 h | 256 KB event | +| **Poll-based (ESM)** | Lambda polls the source on your behalf; batches records | Keeps retrying until success or record expires/goes to DLQ | Depends on source | + +## Trigger catalogue + +| Trigger | Model | Key notes | +|---------|-------|-----------| +| **API Gateway (REST / HTTP)** | Sync | 29 s integration timeout regardless of Lambda timeout. HTTP API is cheaper and lower-latency than REST API. Transforms request/response. | +| **Function URL** | Sync | Direct HTTPS endpoint on the function; no API Gateway layer. Supports up to 15 min timeout and response streaming. Simpler, cheaper, fewer features. | +| **ALB (Application Load Balancer)** | Sync | Like API GW but routes at L7; useful when Lambda is one target among EC2/ECS targets. 29 s timeout. | +| **S3 event notification** | Async | Fires on object create/delete/etc. At-least-once delivery. Large PUT creates exactly one event per object but notifications can duplicate. Common pattern: S3 → SNS → SQS → Lambda for fan-out + replay. | +| **SNS** | Async | Fan-out: one message → multiple subscribers. At-least-once. Dead-letter queue on the subscription, not the topic. | +| **EventBridge (CloudWatch Events)** | Async | Event bus with content-based routing rules. Also the managed scheduler (cron/rate expressions, timezone-aware since 2022). At-least-once. | +| **SQS** | Poll-based (ESM) | Lambda polls and batches (up to 10 000 msg). Standard: at-least-once, unordered. FIFO: ordered per message group, exactly-once with dedup. Visibility timeout must be ≥ 6× function timeout. Partial batch failure via `batchItemFailures`. | +| **Kinesis Data Streams** | Poll-based (ESM) | One Lambda shard per stream shard. Records expire (24 h–1 yr); Lambda retries until success or expiry. Use bisect-on-error and `batchItemFailures` to avoid one bad record blocking an entire shard. | +| **DynamoDB Streams** | Poll-based (ESM) | Captures item-level changes. Ordered per partition key. 24 h retention. Same retry behaviour as Kinesis. Use for CDC (change-data-capture) patterns. | +| **Step Functions** | Sync (Task state) | Step Functions calls the function synchronously and waits for the result. Retries and timeouts are defined in the state machine, not Lambda. See the [Step Functions](lambda-12-step-functions.md) section. | +| **Cognito / SES / IoT etc.** | Sync or Async | Service-specific; check the docs for each. Cognito triggers (pre-signup, pre-token) are sync and block the auth flow. | + +## Choosing between SQS and SNS+SQS + +Use plain **SQS → Lambda** when you have one consumer and want to buffer, batch, and retry. Use **SNS → SQS → Lambda** when you need fan-out (multiple independent consumers each get a copy) or when the producer is an AWS service that speaks SNS natively (S3 event notifications, for example). The SNS layer decouples producers from the queue topology. diff --git a/docs/lambdas-md/lambda-07-iam.md b/docs/lambdas-md/lambda-07-iam.md new file mode 100644 index 0000000..ebcbb8a --- /dev/null +++ b/docs/lambdas-md/lambda-07-iam.md @@ -0,0 +1,58 @@ +# IAM & Permissions + +> Execution role vs resource policy. The two policies most people confuse. + +## Two independent permission layers + +Lambda has two separate permission surfaces that must each be correct independently. Confusing them is the most common "it works locally but not in AWS" failure. + +| Layer | Question it answers | Who creates it | +|-------|---------------------|----------------| +| **Execution role** | What can *this Lambda function do* once running? (call S3, write to DynamoDB, publish to SNS…) | You — attached at function creation | +| **Resource policy** | Who is *allowed to invoke* this Lambda function? (API Gateway, another account, EventBridge…) | AWS adds it automatically for most triggers; you add it for cross-account or manual grants | + +## Execution role + +The execution role is an IAM role that Lambda assumes when running your function. Every Lambda must have one. The role's attached policies determine what AWS API calls the function can make. At minimum, every function needs: + +``` +# minimum: write its own logs +logs:CreateLogGroup +logs:CreateLogStream +logs:PutLogEvents +``` + +Common additions for a function that reads/writes S3: + +``` +s3:GetObject +s3:PutObject +s3:ListBucket # needed for paginator; often forgotten +kms:Decrypt # if the bucket uses a CMK, this is also required +``` + +The `AWSLambdaBasicExecutionRole` managed policy covers logs only — it is intentionally minimal. `AWSLambdaVPCAccessExecutionRole` adds the ENI permissions needed when the function is in a VPC. + +## Resource policy + +The resource policy is attached to the Lambda function itself (not an IAM identity). When you add an S3 event notification or API Gateway integration in the console, AWS automatically adds a resource policy entry allowing that service to invoke the function. For cross-account invocations you add this manually via `aws lambda add-permission`. + +```bash +# grant another account permission to invoke +aws lambda add-permission \ + --function-name my-function \ + --principal 123456789012 \ # the other AWS account + --action lambda:InvokeFunction \ + --statement-id cross-account-invoke +``` + +## Common mistakes + +- **Missing `s3:ListBucket` on the bucket resource.** `ListObjectsV2` requires this on the *bucket ARN* (not the object ARN). Forgetting it causes AccessDenied on the paginator even when GetObject works fine. +- **Wrong resource ARN scope.** `s3:GetObject` must be on `arn:aws:s3:::bucket-name/*`; `s3:ListBucket` must be on `arn:aws:s3:::bucket-name`. Swapping them is a frequent typo. +- **CMK not in execution role.** KMS-encrypted bucket objects require both `s3:GetObject` and `kms:Decrypt`. The KMS key policy must also allow the role. Two separate policy documents, two separate denial points. +- **No resource policy for new trigger.** If you wire up EventBridge manually (not via the console), the trigger silently fails because there's no resource policy entry granting EventBridge `lambda:InvokeFunction`. + +## Diagnosing permission errors + +CloudTrail is the ground truth. Filter by `errorCode: "AccessDenied"` and `userIdentity.arn` matching the execution role ARN. The event tells you exactly which action on which resource was denied. CloudWatch will show the error in the Lambda log if you let the exception propagate, but CloudTrail shows it even when the call is made from a library that swallows the error. diff --git a/docs/lambdas-md/lambda-08-packaging.md b/docs/lambdas-md/lambda-08-packaging.md new file mode 100644 index 0000000..b95da6d --- /dev/null +++ b/docs/lambdas-md/lambda-08-packaging.md @@ -0,0 +1,54 @@ +# Packaging + +> Zip vs layers vs container images. arm64 vs x86_64. Native wheels. + +## Three deployment formats + +| Format | Size limit | Best for | Caveats | +|--------|-----------|----------|---------| +| **Zip (direct)** | 50 MB upload / 250 MB unzipped | Most Python/Node functions with pure-Python or pre-built wheels | Must match Lambda's architecture; no custom runtime | +| **Zip via S3** | 250 MB unzipped | Same as above but when zip exceeds 50 MB | S3 bucket must be in the same region | +| **Layers** | 250 MB total (function + all layers) | Shared dependencies across functions (e.g. a company-wide logging layer) | Max 5 layers per function; later layers overwrite earlier ones | +| **Container image** | 10 GB | ML models, native binary deps, custom runtimes | Slower first cold start (image pull); larger attack surface | + +## Layers in practice + +A layer is a zip file that Lambda extracts into `/opt` before running your function. Your code in `/var/task` can import from `/opt/python` (for Python) without any path manipulation. Use cases: + +- Shared internal libraries deployed independently of business logic +- Large dependencies that change rarely (numpy, pandas) — cache them in a layer so deployments of the business logic are fast +- AWS-provided layers: Lambda Insights extension, X-Ray SDK + +Layers count toward the 250 MB unzipped limit. If you have 5 layers at 40 MB each and your function zip is 50 MB, you're at 250 MB — no room left. + +## Container images + +Container images must be based on AWS-provided base images (`public.ecr.aws/lambda/python:3.13`) or implement the Lambda Runtime Interface. They must be stored in ECR (Elastic Container Registry) in the same region. The Lambda service caches images on the underlying host after the first pull, so subsequent cold starts on the same host are fast — but the very first invocation after a new image is deployed can be slow for large images. + +Container images bypass the 250 MB unzipped limit, which is why they're the standard choice for Python ML workloads that bundle PyTorch or TensorFlow. + +## arm64 vs x86_64 + +Graviton2-based arm64 is ~20% cheaper per GB-second than x86_64 and typically faster at compute-heavy work. The decision tree: + +1. Check all your dependencies for arm64 wheels: `pip download --platform manylinux2014_aarch64 --only-binary :all: -r requirements.txt`. If any fail, you either build from source (needs Dockerfile) or stay on x86. +2. For pure-Python deps and most modern packages, arm64 works out of the box. +3. Native extensions (cryptography, numpy, psycopg2) have arm64 wheels on PyPI since ~2022. Check the exact version you need. + +## Building for Lambda (the common foot-gun) + +Lambda runs on Amazon Linux 2023. `pip install` on macOS produces wheels compiled for macOS, which will segfault or import-error on Lambda. The correct approach: + +```bash +# build inside the Lambda runtime image +docker run --rm \ + -v "$PWD":/var/task \ + public.ecr.aws/lambda/python:3.13 \ + pip install -r requirements.txt -t python/ + +zip -r layer.zip python/ +``` + +This is also where architecture matters: use the `:3.13-arm64` tag when building for arm64. + +> **This project** uses a zip deployment. `aioboto3` and `aiofiles` are pure-Python and have no native extensions, so they build cleanly on any architecture. The Makefile's `install` target creates a local `.venv` for development; a real CI pipeline would build the deployment zip inside the Lambda image. diff --git a/docs/lambdas-md/lambda-09-vpc-networking.md b/docs/lambdas-md/lambda-09-vpc-networking.md new file mode 100644 index 0000000..2ae2b4c --- /dev/null +++ b/docs/lambdas-md/lambda-09-vpc-networking.md @@ -0,0 +1,46 @@ +# VPC & Networking + +> When to put Lambda in a VPC (rarely). ENI cold start cost. NAT money pit. + +## Default: no VPC + +By default, Lambda runs in an AWS-managed network with internet access. It can reach S3, DynamoDB, SQS, and other AWS services via their public endpoints. **Do not put Lambda in a VPC unless you have a specific reason.** Most applications don't need it. + +## When you actually need VPC + +- Connecting to RDS or Aurora (which live in a private subnet) +- ElastiCache (Redis/Memcached) — VPC-only by design +- Private REST APIs or internal services on private subnets +- Compliance requirements mandating network isolation + +S3, DynamoDB, SQS, SNS, and most AWS managed services do **not** require VPC placement — they're public services with public endpoints. + +## ENI attachment and cold start + +When Lambda is VPC-attached, each execution environment gets an Elastic Network Interface (ENI) in your VPC. Pre-2019, ENIs were allocated per cold start, adding 10–30 s to init. AWS fixed this in 2019 with hyperplane ENIs shared across environments — today the VPC cold start penalty is ~100–500 ms on the first cold start of a new deployment, then negligible. It's no longer the dealbreaker it used to be, but it's not zero. + +## Subnet and AZ placement + +Specify at least two subnets in different AZs for availability. Lambda will distribute environments across AZs. If a subnet runs out of available ENI slots (IP exhaustion), Lambda scaling fails — size subnets with this in mind. /24 (254 IPs) is often too small for high-concurrency functions. + +## The NAT money pit + +VPC Lambda can't reach the internet by default. If your function needs to call an external API or reach an AWS service without a VPC endpoint, you need a NAT gateway in a public subnet. NAT gateways cost: + +- **$0.045/hour** (~$32/month) just to exist, per AZ +- **$0.045/GB** of data processed + +A function that sends 100 GB/month through NAT costs $4.50 in data alone, on top of the always-on hourly charge. Two AZs for HA = ~$64/month base cost before a single byte of traffic. This is frequently the largest unexpected cost in VPC Lambda setups. + +## VPC endpoints: the free alternative + +For AWS services, VPC endpoints bypass NAT and the public internet entirely. Two types: + +- **Gateway endpoints** — S3 and DynamoDB only. Free. Route table entries. No data charge. +- **Interface endpoints (PrivateLink)** — any AWS service. $0.01/AZ/hr + $0.01/GB. Expensive for high throughput but often cheaper than NAT for AWS-service-heavy workloads. + +For a VPC Lambda that only talks to S3 and DynamoDB: create gateway endpoints for both → no NAT needed → near-zero networking cost. + +## Security groups + +VPC Lambda gets a security group. Outbound rules control where it can connect. The security group of RDS/ElastiCache must allow inbound from the Lambda security group. A common pattern is to create a dedicated Lambda SG and reference it in the database SG's inbound rules — this avoids IP-range rules that break when Lambda ENIs change. diff --git a/docs/lambdas-md/lambda-10-observability.md b/docs/lambdas-md/lambda-10-observability.md new file mode 100644 index 0000000..809def8 --- /dev/null +++ b/docs/lambdas-md/lambda-10-observability.md @@ -0,0 +1,93 @@ +# Observability + +> CloudWatch logs, structured JSON, X-Ray, Lambda Insights, EMF. Brief Prometheus/Grafana orientation. + +## CloudWatch Logs — what you get for free + +Every Lambda function automatically writes to a CloudWatch Log Group named `/aws/lambda/`. Each execution environment gets its own Log Stream. Lambda writes two special lines automatically: + +``` +START RequestId: abc-123 Version: $LATEST +END RequestId: abc-123 +REPORT RequestId: abc-123 Duration: 312.45 ms Billed Duration: 313 ms + Memory Size: 256 MB Max Memory Used: 89 MB + Init Duration: 423.12 ms # only on cold starts +``` + +The REPORT line is your free performance telemetry. `Init Duration` appears only on cold invocations. `Max Memory Used` helps right-size memory configuration. + +**Retention:** Default is "Never Expire." Set it explicitly — 7, 14, or 30 days covers most needs. Every MB of retained logs costs money. + +## Structured logging + +Emit JSON instead of plain strings. CloudWatch Logs Insights can filter and aggregate JSON fields efficiently; plain strings require regex and are slow. Example: + +```python +import json, logging +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +def handler(event, context): + logger.info(json.dumps({ + "event": "pdf_scan_start", + "bucket": BUCKET, + "prefix": PREFIX, + "request_id": context.aws_request_id, + })) +``` + +With this, Logs Insights can run: `filter event = "pdf_scan_start" | stats count() by bin(5m)` in seconds. + +## X-Ray tracing + +X-Ray gives you request traces across services — how long the Lambda itself ran vs how long S3 calls took. Three things must all be true: + +1. **Tracing enabled on the function** — console toggle or `TracingConfig: Active` in SAM/CDK +2. **X-Ray SDK instrumented in your code** — `from aws_xray_sdk.core import patch_all; patch_all()` wraps boto3 calls automatically +3. **IAM permission** — execution role needs `xray:PutTraceSegments` and `xray:PutTelemetryRecords` + +Without all three, traces are either absent or incomplete. People flip one and conclude X-Ray is broken. + +## Lambda Insights + +Lambda Insights is a CloudWatch feature (not a separate service) that surfaces system-level metrics: CPU usage, memory utilisation, network I/O, disk I/O — things the REPORT line doesn't include. To enable it: + +- Add the Lambda Insights extension layer (`arn:aws:lambda::580247275435:layer:LambdaInsightsExtension:38`) +- Add `cloudwatch:PutMetricData` to the execution role + +It's useful when you suspect memory or CPU contention but the REPORT line's "Max Memory Used" isn't granular enough. + +## EMF — Embedded Metrics Format + +EMF lets you emit custom CloudWatch metrics by writing structured JSON to stdout. No `PutMetricData` API call needed — the Lambda runtime parses the log line and publishes the metric asynchronously. This is far more efficient than calling CloudWatch from inside the handler (which adds latency + cost per invocation). + +```python +import json + +def emit_metric(name, value, unit="Count", **dims): + print(json.dumps({ + "_aws": { + "Timestamp": int(time.time() * 1000), + "CloudWatchMetrics": [{ + "Namespace": "MyApp", + "Dimensions": [list(dims.keys())], + "Metrics": [{"Name": name, "Unit": unit}] + }] + }, + name: value, + **dims, + })) + +# usage +emit_metric("PDFsProcessed", count, Unit="Count", Function="pdf-scanner") +``` + +## Prometheus & Grafana (brief) + +Prometheus uses a **pull model** — it scrapes HTTP endpoints. Lambda functions are ephemeral and have no persistent HTTP endpoint, so Prometheus can't scrape them directly. Approaches: + +- **EMF → CloudWatch → Grafana CloudWatch plugin** — easiest; Grafana queries CW as a data source +- **Amazon Managed Prometheus (AMP) + remote_write** — Lambda pushes metrics to AMP via the Prometheus remote write API; Grafana (or Amazon Managed Grafana) reads from AMP. Requires the `prometheus_client` library and SIGV4 signing on the remote_write request. +- **Statsd/push gateway** — Lambda pushes to a persistent push gateway; Prometheus scrapes the gateway. More infra to manage, stale metric risk if the push gateway isn't flushed between invocations. + +For Lambda-centric dashboards, the CloudWatch → Grafana path is usually the simplest to operate. diff --git a/docs/lambdas-md/lambda-11-async-errors.md b/docs/lambdas-md/lambda-11-async-errors.md new file mode 100644 index 0000000..3a974b2 --- /dev/null +++ b/docs/lambdas-md/lambda-11-async-errors.md @@ -0,0 +1,73 @@ +# Async & Errors + +> Sync vs async invoke. Retries, DLQ, destinations, idempotency, partial-batch failures. + +## Sync vs async invocation + +| | Synchronous (RequestResponse) | Asynchronous (Event) | +|--|-------------------------------|----------------------| +| **Caller blocks?** | Yes — waits for result | No — gets 202 immediately | +| **Response visible to caller?** | Yes | No | +| **Retries on error** | None (caller's responsibility) | 2 retries = 3 total attempts | +| **Retry backoff** | — | ~1 min then ~2 min | +| **Event age limit** | — | 6 hours | +| **Max event size** | 6 MB | 256 KB | + +## Async retry flow + +When Lambda invokes asynchronously and the function throws an unhandled exception (or is throttled), Lambda retries automatically — twice, with exponential backoff starting at ~1 minute. If all three attempts fail, or if the event ages past 6 hours, Lambda sends the event to the configured failure destination or DLQ. If neither is configured, the event is silently dropped. + +## DLQ vs Destinations + +These are two different mechanisms that overlap in purpose but have different capabilities: + +| | Dead-Letter Queue (DLQ) | Event Destinations | +|--|-------------------------|---------------------| +| **Introduced** | 2016 (legacy) | 2019 (preferred) | +| **Triggers on** | Failure only | Success or failure (separate configs) | +| **Payload** | The original event only | Original event + result/error + metadata | +| **Targets** | SQS or SNS | SQS, SNS, Lambda, EventBridge | + +Use Destinations for new code. DLQ remains useful when the downstream consumer must be SQS and you don't need success notifications. + +## Idempotency + +Because async invocations retry and most event sources are at-least-once, your handler will occasionally execute more than once for the same logical event. Design handlers to be idempotent — the same input produces the same outcome regardless of how many times it runs. + +Standard pattern: use a unique key from the event (S3 ETag + key, SQS MessageId, EventBridge detail.id) as a deduplication key. On first execution, write the key + result to DynamoDB with a TTL. On retry, check DynamoDB first — if already processed, return the cached result without re-running the work. + +```python +# pseudo-code +dedup_key = event["Records"][0]["messageId"] +existing = table.get_item(Key={"id": dedup_key}) +if existing.get("Item"): + return existing["Item"]["result"] + +result = do_the_work(event) +table.put_item(Item={"id": dedup_key, "result": result, "ttl": now + 86400}) +return result +``` + +AWS PowerTools for Lambda (Python) has a built-in `@idempotent` decorator that implements this pattern with DynamoDB. + +## Partial batch failures (SQS / Kinesis / DynamoDB Streams) + +When Lambda processes a batch of records and one record fails, the default behaviour differs by source: + +- **SQS (default)**: if the handler raises an exception, the entire batch is retried. One bad message blocks all others and can cause infinite retry loops. +- **With `ReportBatchItemFailures` enabled**: return a `batchItemFailures` list containing only the failed message IDs. Lambda re-queues only those; successful messages are deleted. + +```python +def handler(event, context): + failures = [] + for record in event["Records"]: + try: + process(record) + except Exception: + failures.append({"itemIdentifier": record["messageId"]}) + return {"batchItemFailures": failures} +``` + +Enable `ReportBatchItemFailures` in the ESM configuration and always implement partial-batch failure reporting for SQS and Kinesis handlers. A single poison-pill record can otherwise block an entire shard or queue indefinitely. + +> ⚠️ **The idempotency–partial-batch intersection:** with partial failures, successful records in the batch are deleted from SQS, but if your function crashes before returning the failure list, the entire batch including the successes gets retried. Idempotency guards must still cover every record, not just the ones in `batchItemFailures`. diff --git a/docs/lambdas-md/lambda-12-step-functions.md b/docs/lambdas-md/lambda-12-step-functions.md new file mode 100644 index 0000000..9162821 --- /dev/null +++ b/docs/lambdas-md/lambda-12-step-functions.md @@ -0,0 +1,65 @@ +# Step Functions + +> When Lambda alone isn't enough. Standard vs Express. Map state for fan-out. Comparison with Airflow. + +## When Lambda alone isn't enough + +A single Lambda function works well for one discrete task. Problems start when you need to chain multiple tasks, retry selectively, wait on human approval, or fan out across thousands of items. Doing this with Lambda alone means writing orchestration logic inside your functions — tracking state, implementing retry delays, deciding what "done" means. Step Functions externalises that orchestration into a state machine where every state transition is durable, auditable, and resumable. + +Reach for Step Functions when you need: sequential steps with state passing, conditional branching, parallel fan-out with join, wait states longer than 15 minutes, or retry-with-exponential-backoff built in. + +## Standard vs Express workflows + +| | Standard | Express | +|--|----------|---------| +| **Max duration** | 1 year | 5 minutes | +| **Execution semantics** | Exactly-once per state | At-least-once | +| **Execution history** | Full audit trail in AWS console | CloudWatch Logs only | +| **Pricing** | $0.025 per 1 000 state transitions | $0.00001 per state transition + duration | +| **Use for** | Long-running business workflows, human approvals, compliance audit trails | High-volume, short-duration event processing (IoT, streaming) | + +For most application orchestration, Standard is the right choice — the exactly-once semantic matters when steps have side effects (charging a card, sending an email). Express is for high-throughput pipelines where at-least-once is acceptable and cost per transition is a concern. + +## Map state for fan-out + +The Map state runs the same workflow branch for every item in an array, in parallel. This is the core fan-out primitive. For this project's use case, a Step Functions version could fan out across S3 prefixes — run one Lambda per prefix, collect results in a fan-in step: + +```json +{ + "Type": "Map", + "ItemsPath": "$.prefixes", + "MaxConcurrency": 10, + "Iterator": { + "StartAt": "ScanPrefix", + "States": { + "ScanPrefix": { + "Type": "Task", + "Resource": "arn:aws:lambda:...:function:pdf-scanner", + "End": true + } + } + } +} +``` + +`MaxConcurrency: 0` means unlimited — bounded only by the Lambda concurrency pool. Set an explicit cap to avoid saturating the account concurrency quota. + +## Other useful states + +- **Wait** — pause for a duration or until a timestamp. The only way to implement delays longer than 15 minutes without polling. +- **Choice** — conditional branching on input values. Replaces `if/else` logic that would otherwise live inside a Lambda. +- **Parallel** — run multiple independent branches simultaneously and join their results. +- **Task (SDK integrations)** — Step Functions can call DynamoDB, SQS, ECS, Glue, etc. directly without a Lambda wrapper, reducing cost and latency for simple operations. + +## Step Functions vs Airflow + +| | Step Functions | Apache Airflow (MWAA) | +|--|----------------|------------------------| +| **DAG definition** | JSON/YAML state machine (ASL) | Python code (DAG files) | +| **Scheduling** | Event-driven / on-demand; cron via EventBridge | Built-in rich scheduler (cron, data-interval-aware) | +| **Backfill** | Manual / custom | First-class, built-in | +| **Operators** | AWS services + Lambda (AWS ecosystem only) | 600+ providers: Spark, BigQuery, dbt, Kubernetes… | +| **Infrastructure** | Serverless — zero infra | Managed Airflow (MWAA) starts at ~$400/month | +| **Debugging** | Console execution graph; CloudWatch for logs | Airflow UI with task logs, Gantt charts, retries | + +Step Functions is the right choice when your workflow is AWS-native, event-driven, and you want zero infrastructure. Airflow is the right choice when you need complex scheduling, data-interval backfill, cross-cloud operators, or a data-engineering team that already knows Python DAGs. diff --git a/docs/lambdas-md/lambda-13-cost.md b/docs/lambdas-md/lambda-13-cost.md new file mode 100644 index 0000000..81cf5bc --- /dev/null +++ b/docs/lambdas-md/lambda-13-cost.md @@ -0,0 +1,42 @@ +# Cost + +> Pricing model, memory/cost trade-off, x86 vs arm64, free tier, common surprises. + +## The pricing formula + +Lambda billing has two components, both permanent free tiers included: + +| Component | x86_64 | arm64 | Free tier (permanent) | +|-----------|--------|-------|------------------------| +| **Requests** | $0.20 / 1M | $0.20 / 1M | 1M / month | +| **Duration** | $0.0000166667 / GB-s | $0.0000133334 / GB-s | 400 000 GB-s / month | + +GB-seconds = memory configured (GB) × duration (seconds). A 512 MB function running for 300 ms = 0.5 × 0.3 = 0.15 GB-s. At 1 million invocations, that's 150 000 GB-s — well inside the free tier. + +Duration is billed in **1 ms increments**. The old 100 ms minimum is gone (removed in 2020). + +## Memory vs cost: more can be cheaper + +CPU scales linearly with memory. A function configured at 1 769 MB gets a full vCPU; below that it's a fraction. Doubling memory often more than halves duration for CPU-bound work, which means the total GB-s cost stays the same or decreases — while latency drops. + +**AWS Lambda Power Tuning** is a Step Functions state machine that automatically benchmarks your function at multiple memory sizes and produces a cost/performance curve. Run it before guessing at the right memory setting. The optimal point is almost never the default 128 MB. + +## arm64 saves ~20% + +arm64 duration pricing is 20% cheaper than x86. Same request price. If your function is compute-bound (not I/O-bound sleeping on S3 calls), arm64 also runs faster, compounding the saving. For I/O-bound functions (like `lambda_function.py`, which spends most of its time waiting on S3), the duration difference is smaller but the 20% price reduction still applies. + +## Provisioned Concurrency billing + +PC is billed separately: $0.0000097222 per GB-s of provisioned time (x86) — even when idle. If you have 10 × 512 MB environments provisioned for 24 hours: 10 × 0.5 GB × 86 400 s = 432 000 GB-s/day = ~$4.20/day = ~$126/month just for the warm slots, before counting actual invocation cost on top. PC is for latency, not cost — it always increases your bill. + +## Hidden costs (the real bill) + +- **NAT Gateway** — $0.045/hr per AZ (~$32/month) + $0.045/GB data. Often the largest line item for VPC Lambda. +- **API Gateway** — REST API: $3.50/1M calls. HTTP API: $1/1M. Can dwarf Lambda cost at high RPS. +- **CloudWatch Logs** — $0.50/GB ingestion + $0.03/GB storage/month. Verbose Lambda logs accumulate fast; set retention. +- **Lambda Insights** — additional CW Logs + custom metrics charges. +- **X-Ray** — $5/million traces (after free 100K/month). +- **Data transfer** — traffic leaving a region or going through a NAT has per-GB charges. +- **S3 API calls** — LIST and GET requests are billed per 1 000. A function that does 10 000 LIST calls/invocation at 1M invocations = 10B API calls = real money. + +> ✅ **For this project's function:** at 1 000 invocations/day with 500 ms average duration and 256 MB memory, cost is ~$0.002/day — essentially free. Lambda's economics only require attention above ~100K invocations/day with non-trivial memory or duration. diff --git a/docs/lambdas-md/lambda-14-local-dev.md b/docs/lambdas-md/lambda-14-local-dev.md new file mode 100644 index 0000000..822d8cd --- /dev/null +++ b/docs/lambdas-md/lambda-14-local-dev.md @@ -0,0 +1,74 @@ +# Local Dev + +> SAM CLI, Lambda RIE, LocalStack, MinIO — when to reach for which. + +## The local dev problem + +Lambda has no local runtime by default. Your only loop without tooling is: zip, upload, invoke, read CloudWatch logs, repeat — minutes per cycle. The tools below collapse that to seconds, with different trade-offs between fidelity, setup cost, and scope. + +## SAM CLI + +**What it is:** AWS's official local Lambda emulator. Wraps Docker to run your function inside a container that matches the Lambda runtime environment exactly. Also emulates API Gateway. + +**Commands:** + +```bash +sam local invoke -e event.json # invoke once +sam local start-api # spin up local HTTP API gateway +sam local invoke --debug-port 5858 # attach debugger +``` + +**Fidelity:** high — same Amazon Linux image, same runtime, same filesystem layout. Catches architecture issues (x86 wheel on arm64) that a plain venv misses. + +**Downsides:** requires Docker, slow to start (pulls image on first run), no MinIO/SQS/DynamoDB emulation built in. You wire those up separately. + +## Lambda Runtime Interface Emulator (RIE) + +A lightweight binary embedded in all AWS-provided Lambda base images. When you run the image locally, RIE exposes a local HTTP endpoint that accepts invocations in the Lambda API format. You don't need SAM CLI — just Docker: + +```bash +docker build -t my-fn . +docker run -p 9000:8080 my-fn +curl -XPOST http://localhost:9000/2015-03-31/functions/function/invocations \ + -d '{"key": "value"}' +``` + +Use RIE when you're building container-image Lambdas and want to test them without SAM overhead. + +## LocalStack + +A full AWS mock that emulates Lambda, S3, SQS, DynamoDB, API Gateway, and dozens more services in a single container. Community edition is free; Pro ($35/month) adds more services and persistent state. + +**When to use:** integration tests that span multiple AWS services (e.g. an EventBridge rule that triggers a Lambda that writes to DynamoDB). Without LocalStack you'd need a real AWS account for these tests. + +**When to avoid:** if you only need one service (just S3 → use MinIO; just Lambda → use SAM/RIE). LocalStack's Lambda emulation has occasional edge-case differences from the real runtime. + +```bash +docker run --rm -p 4566:4566 localstack/localstack +AWS_DEFAULT_REGION=us-east-1 \ + AWS_ACCESS_KEY_ID=test \ + AWS_SECRET_ACCESS_KEY=test \ + aws --endpoint-url=http://localhost:4566 s3 ls +``` + +## MinIO (this project) + +MinIO is an S3-compatible object store that runs locally in Docker. It implements the S3 API precisely enough that `boto3`/`aioboto3` needs only an `endpoint_url` override to work against it. It is **not** a Lambda emulator — it replaces S3 only. + +```bash +make up # starts MinIO on :9000 (API) and :9001 (console) +SOURCE_DIR=~/pdfs make seed # uploads PDFs to MinIO +make invoke # runs lambda_function.py against MinIO via invoke.py +``` + +This is the lightest possible local setup: no Docker-in-Docker, no SAM overhead, minimal latency. The function handler runs in your local Python process against a real S3-compatible store. Differences from real Lambda (no execution environment lifecycle, no /tmp isolation between runs) are acceptable for the development loop but not for environment-fidelity tests. + +## Decision matrix + +| Need | Reach for | +|------|-----------| +| Fast iteration on handler logic | MinIO + `python invoke.py` (this project's setup) | +| Emulate Lambda runtime + API Gateway locally | SAM CLI | +| Test a container-image Lambda | Lambda RIE via Docker | +| Integration test across multiple AWS services | LocalStack | +| Full-fidelity staging before prod | Real AWS account, separate environment | diff --git a/docs/lambdas-md/lambda-15-cicd.md b/docs/lambdas-md/lambda-15-cicd.md new file mode 100644 index 0000000..f3e6b41 --- /dev/null +++ b/docs/lambdas-md/lambda-15-cicd.md @@ -0,0 +1,75 @@ +# CI/CD + +> Aliases, versions, traffic shifting, blue/green. Plain CLI → SAM → CDK → Terraform. + +## Versions and aliases + +**Versions** are immutable snapshots of a function's code and configuration. When you publish a version (`aws lambda publish-version`), AWS creates an immutable ARN like `arn:…:function:my-fn:7`. `$LATEST` is the only mutable version — always reflects the most recent code upload. + +**Aliases** are named pointers to a version. `prod` might point to version 7; `staging` might point to version 8. Event source mappings, API Gateway integrations, and Step Functions tasks should target aliases, not version ARNs — this decouples deployment (publishing a new version) from promotion (updating the alias). + +## Traffic shifting (blue/green) + +An alias can split traffic across two versions with weighted routing: + +```bash +aws lambda update-alias \ + --function-name my-fn \ + --name prod \ + --function-version 8 \ + --routing-config AdditionalVersionWeights={"7"=0.9} +# result: 10% of prod traffic goes to v8, 90% still to v7 +``` + +Start at 10% canary, watch error rates in CloudWatch, shift to 50%, then 100%. Rollback is instant: point the alias back to the stable version. No instance drain, no connection draining — Lambda is stateless, cutover is atomic. + +## CodeDeploy integration + +SAM and CDK can wire up CodeDeploy for automatic traffic shifting with automatic rollback on CloudWatch alarms. You declare the deployment preference in the template: + +```yaml +# SAM template.yaml +DeploymentPreference: + Type: Canary10Percent5Minutes # 10% for 5 min, then 100% + Alarms: + - !Ref ErrorRateAlarm # rolls back if alarm triggers +``` + +CodeDeploy manages the alias weight changes and calls the rollback if the alarm fires — fully automated blue/green without manual traffic management. + +## Deployment tooling progression + +| Tool | Good for | Caveats | +|------|----------|---------| +| **AWS CLI / SDK** | One-off deployments, scripting, deep control | Verbose; no state management; drift-prone at scale | +| **SAM (CloudFormation extension)** | Lambda-first projects; built-in local testing; CodeDeploy integration | CloudFormation speed; YAML verbosity; AWS-only | +| **CDK** | Complex infra in TypeScript/Python; reusable constructs; type safety | Still compiles to CloudFormation; learning curve; bootstrapping required | +| **Terraform (AWS provider)** | Multi-cloud orgs; large existing Terraform estate; strong community modules | No built-in Lambda local testing; plan/apply cycle slower than SAM deploy | +| **Serverless Framework** | Multi-cloud serverless; plugin ecosystem | V3 → V4 became paid for teams; community plugins vary in quality | + +## CI pipeline skeleton + +```yaml +# GitHub Actions example +jobs: + deploy: + steps: + - uses: actions/checkout@v4 + - name: Build zip + run: | + docker run --rm -v $PWD:/var/task \ + public.ecr.aws/lambda/python:3.13 \ + pip install -r requirements.txt -t package/ + cd package && zip -r ../function.zip . && cd .. + zip function.zip lambda_function.py + - name: Deploy + run: | + aws lambda update-function-code \ + --function-name my-fn --zip-file fileb://function.zip + aws lambda wait function-updated --function-name my-fn + aws lambda publish-version --function-name my-fn + aws lambda update-alias --function-name my-fn \ + --name prod --function-version $VERSION +``` + +The `wait function-updated` call is important — `update-function-code` is asynchronous and `publish-version` must wait for it to complete. diff --git a/docs/lambdas-md/lambda-16-pitfalls.md b/docs/lambdas-md/lambda-16-pitfalls.md new file mode 100644 index 0000000..3078630 --- /dev/null +++ b/docs/lambdas-md/lambda-16-pitfalls.md @@ -0,0 +1,57 @@ +# Pitfalls — The Must-Knows + +> The list to skim before the next interview or design review. Each item has bitten someone in production. + +## Execution model + +1. **Module-level state leaks across invocations.** A list you append to in the handler grows forever on warm calls. A counter you increment is wrong by the second request. If it's mutable and lives at module scope, treat it as either a deliberate cache or a bug. +2. **Handler globals are shared by every invocation on that env, but not across envs.** "I cached the result" works locally; in production half your traffic gets the cached value, the other half doesn't, depending on which warm container they hit. Externalise (Redis, DynamoDB) or accept the variance. +3. **/tmp is per-environment, not per-invocation.** If you write `/tmp/output.json` with a fixed name, the next warm invocation finds yesterday's file. Always use a per-invocation suffix (UUID, request ID). +4. **Init phase has a hard 10 s cap.** If you import TensorFlow, hydrate a 500 MB model, or do a network call at module scope, you can blow this budget on cold start. Defer expensive work until first handler call (lazy init), or move it to a layer that ships pre-warmed. +5. **Async `asyncio.run` in a sync handler creates a fresh event loop per invocation.** Acceptable, but means async clients can't be shared across invocations the way sync boto3 clients can. Profile before assuming async is faster. + +## Payload & size limits + +6. **6 MB sync response cap is silent.** Returning a JSON list of 50 000 items "works" in the function but the API GW caller gets 413. The fix in `lambda_function.py` — return a presigned URL to a manifest file rather than the full list — is the standard pattern. +7. **API Gateway caps integration time at 29 s.** Doesn't matter if your Lambda timeout is 15 minutes. For longer work, return a job ID and poll, or use Function URLs (15 min) with response streaming. +8. **Environment variables max 4 KB total.** Big secrets (RSA keys, JSON config blobs) blow this. Parameter Store / Secrets Manager and read on init. + +## Concurrency & throttling + +9. **Default account concurrency is 1 000 per region.** Most teams hit this before they realise. Sets a hard ceiling on RPS — at 100 ms latency, that's 10 000 RPS account-wide; at 1 s, 1 000 RPS. +10. **Reserved concurrency = 0 disables the function.** Looks weird, used as a circuit breaker. +11. **Provisioned concurrency double-bills.** You pay for the warm slots *and* for invocations against them. Worth it for latency-sensitive paths; wasteful for batch. +12. **Burst limit is regional and finite.** A traffic spike from 0 to 5 000 RPS will throttle until AWS scales up at +500 envs/min. Provisioned concurrency or pre-warming is the fix. + +## Triggers, retries, idempotency + +13. **Async invocation retries 2 times by default.** Total 3 attempts. If your handler isn't idempotent, you can charge a card three times. +14. **S3, SNS, EventBridge invoke async — at-least-once.** Plan for duplicates. SQS standard is also at-least-once. SQS FIFO and Kinesis are exactly-once-ish per shard but with their own quirks. +15. **SQS visibility timeout must be ≥ 6× function timeout.** Otherwise the message comes back while you're still processing it, and you do the work twice (or more). +16. **Partial batch failures need explicit signalling.** Returning `batchItemFailures` for SQS/Kinesis tells AWS which records to retry; otherwise the entire batch retries or none does. +17. **API Gateway error responses are JSON-shaped if you don't say otherwise.** Throw an unhandled exception and the client sees `{"errorMessage": "...", "errorType": "..."}` with status 502. Map errors yourself. + +## Networking, IAM, observability + +18. **Putting Lambda in a VPC adds an ENI cold-start penalty** (improved a lot in 2019, but still real for first invocation). Only do it if you genuinely need private-subnet resources. Outbound internet from VPC Lambda needs NAT, which costs money 24/7. +19. **S3 access from a VPC Lambda needs a VPC gateway endpoint or NAT.** Without one, your S3 calls hang and time out — looks like a code bug, isn't. +20. **CloudWatch log groups default to "Never expire" retention.** Verbose Lambdas can rack up real cost in CW Logs alone — set retention (7/14/30 days) on every log group you create. +21. **Lambda execution role is implicit on every action.** Forgetting `s3:GetObject` or `kms:Decrypt` on the bucket's CMK is the most common "but it works locally" failure. CloudTrail tells you what was denied. +22. **Resource policy vs execution role are different layers.** Resource policy says "who can *invoke* this Lambda"; execution role says "what this Lambda can *do*". Both must allow. +23. **X-Ray needs an SDK call *and* tracing enabled on the function *and* IAM permission.** Three switches. People flip one and conclude X-Ray is broken. + +## Deployment, dependencies, runtimes + +24. **The boto3 in the Python runtime lags pip.** If you need a recent API (e.g. new S3 features), bundle current boto3 in your zip. The runtime version is "good enough" for stable APIs, "sometimes wrong" for fresh ones. +25. **Native wheels must match Lambda's runtime architecture.** `pip install` on a Mac and zip-uploading `cryptography` is a classic foot-gun. Build in a Docker image matching `public.ecr.aws/lambda/python:3.13`. +26. **arm64 saves ~20 % at the same memory** but *some* wheels are still x86-only. Audit your deps before flipping the architecture. +27. **Layers are merge-ordered; later layers overwrite earlier.** A "base" layer for your shared dependencies works; conflicting layers silently shadow each other. +28. **Container-image deploys are cached on the Lambda host.** First cold start can be slow (image pull); subsequent are normal. Keep images small even though the limit is 10 GB. + +## Time, scheduling, secrets + +29. **EventBridge schedule (cron/rate) is always UTC.** "9 AM" in your local time means something different in production. Use the new EventBridge Scheduler (2022) for time-zone-aware schedules. +30. **Async invocations have a 6-hour event age.** If retries fail past that, the event is silently dropped unless you've set a DLQ or on-failure destination. +31. **Secrets in env vars are visible to anyone with `lambda:GetFunctionConfiguration`.** Encrypted at rest, plaintext in the console. Use Secrets Manager / Parameter Store for actual secrets. + +> ⚠️ **Skim test:** if you can re-state the cold-start split (Init / Handler), the 6 MB / 256 KB / 4 KB / 250 MB / 10 GB constants, and the difference between resource policy and execution role from memory, you'll handle most "tell me about Lambda" interview questions. diff --git a/docs/lambdas-md/lambda-17-adjacent.md b/docs/lambdas-md/lambda-17-adjacent.md new file mode 100644 index 0000000..3df8dab --- /dev/null +++ b/docs/lambdas-md/lambda-17-adjacent.md @@ -0,0 +1,40 @@ +# Adjacent + +> Brief orientation on AWS Glue and Prometheus/Grafana — the secondary gaps from the interview. + +## AWS Glue + +Glue is a managed Spark-based ETL service. Lambda and Glue solve different problems: + +| | Lambda | Glue | +|--|--------|------| +| **Runtime model** | Serverless; up to 15 min; one handler at a time per env | Managed Spark cluster; hours-long jobs; distributed compute | +| **Data scale** | Up to a few GB comfortably | TB to PB natively | +| **Language** | Python, Node, Java, Go, custom runtime | PySpark, Scala; Glue Studio for no-code | +| **Startup time** | Milliseconds (warm) | 1–2 minutes to provision Spark cluster | +| **Cost model** | Per request + per ms | Per DPU-hour (1 DPU = $0.44/hr); 10-minute minimum billing | +| **Use for** | Light transforms, event reactions, API backends | Large-scale joins, aggregations, schema inference on data lake | + +Key Glue concepts to know: **DynamicFrame** (Glue's DataFrame variant with schema flexibility), **Glue Catalog** (centralised metadata store for table schemas — also used by Athena), **Job Bookmarks** (Glue tracks processed S3 partitions to avoid reprocessing on incremental runs). + +The decision is usually straightforward: if the data fits in Lambda's memory and the job finishes in under 15 minutes, use Lambda. If you're joining multiple large S3 datasets or transforming daily partition files, use Glue. + +## Prometheus + +Prometheus is a pull-based time-series metrics system. It scrapes HTTP `/metrics` endpoints on a schedule. The fundamental tension with Lambda: Lambda functions are ephemeral — there's no persistent HTTP endpoint to scrape, and the function may be at zero concurrency between invocations. + +Options for Lambda → Prometheus: + +- **EMF → CloudWatch → Grafana CloudWatch plugin** — no Prometheus involved. Grafana reads directly from CloudWatch. Easiest for AWS-native stacks. +- **Remote write to Amazon Managed Prometheus (AMP)** — the function pushes metrics to AMP via the Prometheus remote_write API at the end of each invocation. Grafana or Amazon Managed Grafana reads from AMP. Requires the `prometheus_client` library and SIGV4 signing on the remote_write request. +- **Push gateway** — a persistent intermediate that Lambda pushes to; Prometheus scrapes the gateway. More infrastructure to manage, stale metric risk if the push gateway isn't flushed between invocations. + +## Grafana + +Grafana is a dashboarding layer — it doesn't store data, it queries data sources. Relevant data sources for Lambda observability: + +- **CloudWatch** — built-in Grafana plugin; queries CW Metrics and CW Logs Insights. Zero extra infrastructure. The standard choice for Lambda metrics (invocations, errors, duration, throttles, concurrent executions). +- **Amazon Managed Prometheus** — query via PromQL if you've pushed custom metrics. +- **Amazon Managed Grafana (AMG)** — Grafana-as-a-service; integrates with AWS IAM; auto-discovers CW namespaces. Avoids self-hosting Grafana. + +For a Lambda-only stack with no existing Prometheus investment, the practical answer is: use EMF for custom metrics, use CloudWatch for the built-in Lambda metrics, and connect Grafana to CloudWatch. It requires no extra infrastructure and gives you dashboards in an hour. diff --git a/docs/lambdas-md/lambda-18-labs.md b/docs/lambdas-md/lambda-18-labs.md new file mode 100644 index 0000000..07e253b --- /dev/null +++ b/docs/lambdas-md/lambda-18-labs.md @@ -0,0 +1,80 @@ +# Labs + +> Hands-on walkthroughs that modify the existing app. Each mutates what you already have — no throw-away exercises. + +## Lab 0 — Local sandbox (start here) + +**Goal:** run the full stack locally against MinIO with real PDFs. + +1. `make install` — creates `.venv` and installs deps +2. `make up` — starts MinIO on :9000 (API) and :9001 (console) +3. `SOURCE_DIR=~/path/to/pdfs make seed` — uploads PDFs to MinIO bucket +4. `make invoke` — runs `invoke.py` which calls `handler()` with a minimal event +5. Open `http://localhost:9001` (minioadmin/minioadmin) and find the generated manifest in the `manifests/` prefix + +**What you can break:** set `PREFIX` to a non-existent prefix and observe the handler returns count=0. Set `QUEUE_MAX=1` and observe the backpressure on the producer. Remove `S3_ENDPOINT_URL` and watch it fail to connect. + +## Lab 1 — Deploy to real AWS + +**Goal:** package and deploy the function to AWS Lambda, invoke it against a real S3 bucket. + +1. Create an S3 bucket and upload sample PDFs to `2026/04/` prefix +2. Create an IAM execution role with `s3:GetObject`, `s3:PutObject`, `s3:ListBucket`, and `logs:*` +3. Build the deployment zip inside the Lambda image: + `docker run --rm -v $PWD:/var/task public.ecr.aws/lambda/python:3.13 pip install -r requirements.txt -t package/` +4. Create the function: `aws lambda create-function --handler lambda_function.handler …` +5. Invoke: `aws lambda invoke --function-name pdf-scanner --payload '{}' out.json` +6. Verify the manifest appeared in S3 and the presigned URL works + +**What you can break:** invoke without `s3:ListBucket` on the bucket (not the object ARN) — observe AccessDenied. Watch CloudTrail to see the denied call. + +## Lab 2 — Add an S3 trigger + +**Goal:** make the function fire automatically when a PDF is uploaded. + +1. Add a resource policy entry granting S3 `lambda:InvokeFunction` +2. Configure an S3 event notification on the bucket for `s3:ObjectCreated:*` filtered to `*.pdf` +3. Upload a PDF and check CloudWatch Logs for the invocation +4. Notice the event structure differs from the manual invoke — update the handler to extract the key from `event["Records"][0]["s3"]["object"]["key"]` + +**What you can break:** upload a non-PDF to the same prefix and verify the filter prevents invocation. Remove the resource policy and verify the trigger silently stops firing (no error to the uploader — this is the async invocation model). + +## Lab 3 — Switch to arm64 + +**Goal:** migrate to Graviton2 and verify 20% cost reduction. + +1. Rebuild the zip using the arm64 Lambda image: `public.ecr.aws/lambda/python:3.13-arm64` +2. Update the function architecture: `aws lambda update-function-configuration --architectures arm64` +3. Update the function code with the arm64 zip +4. Invoke and compare REPORT duration and billed duration in CloudWatch + +**What you can break:** try deploying the x86 zip against the arm64 architecture — the function will import-error on any C-extension wheels. + +## Lab 4 — Enable Provisioned Concurrency + +**Goal:** eliminate cold starts on the production alias. + +1. Publish version 1: `aws lambda publish-version --function-name pdf-scanner` +2. Create alias `prod` pointing to version 1 +3. Enable PC: `aws lambda put-provisioned-concurrency-config --function-name pdf-scanner --qualifier prod --provisioned-concurrent-executions 2` +4. Invoke via the alias ARN and confirm `Init Duration` is absent from REPORT lines +5. Check your AWS bill after 1 hour — note the PC charges + +## Lab 5 — Add X-Ray tracing + +**Goal:** see a trace with S3 subsegments in the X-Ray console. + +1. Add `aws-xray-sdk` to `requirements.txt` and rebuild the zip +2. Add to `lambda_function.py`: `from aws_xray_sdk.core import patch_all; patch_all()` +3. Enable active tracing on the function and add X-Ray permissions to the execution role +4. Invoke and open X-Ray → Traces in the console — verify S3 `list_objects_v2` and `generate_presigned_url` appear as subsegments + +## Lab 6 — Fan out with Step Functions + +**Goal:** process multiple S3 prefixes in parallel using a Map state. + +1. Update the handler to accept a `prefix` key in the event (instead of reading from env var) +2. Create a Step Functions state machine with a Map state that iterates over a list of prefixes and invokes the Lambda for each +3. Start an execution with input: `{"prefixes": ["2026/01/", "2026/02/", "2026/03/"]}` +4. Observe parallel Lambda invocations in the execution graph and CloudWatch +5. Add error handling: configure the Map state to catch Lambda errors and continue rather than fail the whole execution diff --git a/docs/lambdas-md/lambda-19-repository.md b/docs/lambdas-md/lambda-19-repository.md new file mode 100644 index 0000000..1fc59ef --- /dev/null +++ b/docs/lambdas-md/lambda-19-repository.md @@ -0,0 +1,258 @@ +# Repository + +> Tree of `eth/` — the sandbox plus this study site. + +``` +eth/ +├── lambda_function.py — handler: async PDF scan → presigned URLs → JSONL manifest +├── invoke.py — local runner: calls handler() with a minimal event, prints result +├── seed.py — uploads PDFs from a local directory to MinIO +├── requirements.txt — aioboto3, aiofiles (+ transitive: aiobotocore, botocore…) +├── docker-compose.yml — runs MinIO on :9000 (S3 API) and :9001 (web console) +├── Makefile — install / up / down / seed / invoke / graphs / docs +├── def/ +│ └── task.md — original interview exercise specification +└── docs/ + ├── index.html — this study site (single-page, no build step) + ├── viewer.html — pan/zoom SVG viewer (opened by graph links) + └── graphs/ + ├── system_overview.dot / .svg — caller → handler → MinIO/S3 → manifest + ├── lifecycle.dot / .svg — init / handler / freeze / thaw / shutdown + └── cold_warm_timeline.dot / .svg — cold vs warm invocation timeline +``` + +## Walking through lambda_function.py + +### What the function does, end to end + +In one paragraph: the function lists every PDF inside an S3 prefix. For each one, it generates a presigned download URL that expires in 15 minutes. It writes those (key, URL) pairs into a JSONL file in `/tmp` as it goes. When the listing is done, it uploads the JSONL to S3 as a manifest, generates one more presigned URL pointing to the manifest itself, deletes the local file, and returns the manifest URL plus the count. + +The use case: you want to ship a batch of files to someone who isn't on your AWS account. Send them one URL. They open it, get back a list of links, every link works for 15 minutes, then everything dies. + +### Imports and module-scope config + +```python +import asyncio +import json +import os +import uuid + +import aioboto3 +import aiofiles +``` + +`aioboto3` is the async version of boto3 — async S3 calls, so we can overlap I/O. `aiofiles` is async filesystem access — same reason. + +```python +BUCKET = os.environ.get("BUCKET_NAME", "my-company-reports-bucket") +PREFIX = os.environ.get("PREFIX", "2026/04/") +EXPIRY = int(os.environ.get("URL_EXPIRY_SECONDS", "900")) +ENDPOINT = os.environ.get("S3_ENDPOINT_URL") or None +QUEUE_MAX = int(os.environ.get("QUEUE_MAX", "2000")) + +_DONE = object() +``` + +Five environment reads at module scope — init phase. They run once per cold start, get cached as Python module attributes, and every warm invocation reuses them for free. + +`ENDPOINT` is the trick that lets this run against MinIO locally. When you run on real Lambda, you don't set the env var, the value is `None`, and aioboto3 talks to real S3. When you run locally, you set it to `http://localhost:9000` and the same code talks to MinIO. The function doesn't know the difference. + +`_DONE` is a sentinel — a unique singleton put on the queue to signal "no more items coming." The reason it's an `object()` and not a string: a string could theoretically collide with a real S3 key. An `object()` instance has a unique identity; comparing with `is` — not `==` — is unambiguous. + +### The handler — minimal on purpose + +```python +def handler(event, context): + result = asyncio.run(_run()) + return {"statusCode": 200, "body": json.dumps(result)} +``` + +The handler is sync because Lambda's contract is sync. AWS calls `handler(event, context)` and waits for it to return. Inside, `asyncio.run` opens a fresh event loop, runs the async coroutine, gets back a result. The API-Gateway-style response shape (`statusCode` + `body`) is a habit — useful when the function gets fronted by API Gateway later; a pure Lambda invoke doesn't need it but it doesn't hurt. + +`asyncio.run` creates a fresh event loop per invocation. This means async clients can't be shared across invocations the way sync boto3 clients can. The cost is small — tens of microseconds — but it's the reason the S3 client is created inside `_run`, not at module scope. + +Why async at all? Lambda bills per millisecond of wall-clock time. Anything you can overlap, you save money on. The function does a lot of S3 calls — listing pages, generating presigned URLs, writing files. While S3 is preparing the next page of results, the consumer is already presigning and writing the previous page. That overlap directly reduces duration and cost. + +### `_run()` — the actual work + +```python +async def _run(): + session = aioboto3.Session() + async with session.client("s3", endpoint_url=ENDPOINT) as s3: + queue: asyncio.Queue = asyncio.Queue(maxsize=QUEUE_MAX) + manifest_path = f"/tmp/{uuid.uuid4()}.jsonl" +``` + +The session is created inside `_run`, not at module scope, because aioboto3 async clients are tied to the event loop — and each invocation gets a fresh event loop via `asyncio.run`. Sync boto3 clients you'd put at module scope; async ones you create per invocation. + +The queue has a maximum size of 2000 by default. Without the bound, if the producer is faster than the consumer, the queue grows in memory. Lambda has at most 10 GB of memory, usually 256–512 MB. Scanning a bucket with a million PDFs and loading them all before presigning even one would OOM. The bounded queue gives backpressure: when full, `await queue.put(...)` blocks until the consumer takes something off. Memory stays flat. + +The manifest path uses a UUID so that back-to-back warm invocations on the same environment don't collide on `/tmp`. (`/tmp` persists across warm invocations; a fixed filename would be a race condition.) + +### The producer + +```python + async def producer(): + paginator = s3.get_paginator("list_objects_v2") + async for page in paginator.paginate(Bucket=BUCKET, Prefix=PREFIX): + for obj in page.get("Contents", []) or []: + key = obj["Key"] + if key.lower().endswith(".pdf"): + await queue.put(key) + await queue.put(_DONE) +``` + +Defined inside `_run` as a closure — captures `s3` and `queue` from the enclosing scope without arguments. Also signals it's a private implementation detail. + +S3 returns at most 1000 objects per page. The paginator hides the pagination — `async for page in paginator.paginate(...)` transparently fetches the next page when needed. For each object, filter by `.pdf` (case-insensitive) and put the key on the queue. + +When the paginator is exhausted, put `_DONE` on the queue. That tells the consumer to stop. `asyncio.Queue` has no close method — the sentinel is the standard pattern. + +`await queue.put(key)` blocks if the queue is full. That's the backpressure: producer pauses until consumer takes something off. + +### The consumer + +```python + async def consumer(): + count = 0 + async with aiofiles.open(manifest_path, "w") as f: + while True: + item = await queue.get() + if item is _DONE: + break + url = await s3.generate_presigned_url( + "get_object", + Params={"Bucket": BUCKET, "Key": item}, + ExpiresIn=EXPIRY, + ) + await f.write(json.dumps({"key": item, "url": url}) + "\n") + count += 1 + return count +``` + +Same closure pattern. Opens the manifest file async. Loops forever, pulling from the queue. On sentinel, breaks. Otherwise generates a presigned URL and writes a JSONL line. + +`generate_presigned_url` is a **local computation**, not a network call. It uses your credentials, bucket, key, expiry, and region to produce a signed URL deterministically. Fast — no HTTP request. + +Why JSONL instead of a JSON array? Because JSONL streams. You write one line at a time without buffering the whole array in memory. The reader can process one line at a time. If the manifest grows to gigabytes, JSONL stays usable. + +### Running them together + +```python + prod_task = asyncio.create_task(producer()) + count = await consumer() + await prod_task +``` + +`create_task` schedules the producer on the event loop and returns immediately — producer runs in the background. `await consumer()` runs the consumer in the foreground until it sees the sentinel and returns the count. `await prod_task` ensures the producer has fully completed and propagates any exceptions. + +This is the overlap: while S3 is preparing the next LIST page (network round trip), the consumer is presigning and writing the previous page. Sequential would be: list everything, then presign everything. With overlap you pay only the larger of the two latencies. For thousands of files, this cuts wall-clock time and cost noticeably. + +### Uploading the manifest + +```python + manifest_key = f"manifests/{uuid.uuid4()}.jsonl" + async with aiofiles.open(manifest_path, "rb") as f: + body = await f.read() + await s3.put_object( + Bucket=BUCKET, + Key=manifest_key, + Body=body, + ContentType="application/x-ndjson", + ) +``` + +Read the `/tmp` file as bytes and upload with `put_object`. Content type `application/x-ndjson` is the registered MIME type for newline-delimited JSON. `put_object` rather than `upload_file` because aioboto3's async multipart logic is simpler this way for files in the hundreds-of-KB to few-MB range. + +### Generating the manifest URL and cleaning up + +```python + manifest_url = await s3.generate_presigned_url( + "get_object", + Params={"Bucket": BUCKET, "Key": manifest_key}, + ExpiresIn=EXPIRY, + ) + + os.unlink(manifest_path) + + return { + "count": count, + "manifest_key": manifest_key, + "manifest_url": manifest_url, + } +``` + +Presign the manifest itself. Delete the `/tmp` file — `/tmp` persists across warm invocations; without cleanup, a thousand invocations on the same environment would fill it. Return count, S3 key, and the URL. The handler wraps that in `{"statusCode": 200, "body": ...}` and returns. + +### Why this design? + +**Why presigned URLs, not return the data directly?** The response is small (one URL), the recipient doesn't need an AWS account to use it, and it expires automatically. The URL is signed by your credentials and works for anyone who has it for 15 minutes. + +**Why upload the manifest to S3 and return a URL to it, instead of returning the manifest contents inline?** The 6 MB sync response cap. Ten thousand presigned URLs in JSONL is 3–5 MB. Twenty thousand blows the cap — silently: the function succeeds, the caller gets a 413. The manifest-in-S3 pattern has no upper bound. + +**Why async?** Overlap S3 LIST calls with presigning and file writes. Even though presigning is local, the LIST round trips and final upload benefit from non-blocking I/O. + +**Why producer and consumer instead of one loop?** The producer is bursty (up to 1000 keys per page dump). The consumer is steady. Decoupling with a queue means the producer races ahead while the consumer drains, instead of LIST → presign → LIST → presign serially. + +**Why a bounded queue?** Backpressure. Without the bound, the producer can outrun the consumer and exhaust memory. With the bound, `await queue.put(...)` blocks when full. Memory stays flat regardless of bucket size. + +**Why a sentinel and not closing the queue?** `asyncio.Queue` has no close method. The sentinel is the standard "done" signal. + +**Why nested functions?** Closures over `s3`, `queue`, `manifest_path`. No arguments to pass. Private implementation details of `_run`. + +**Why UUID in the `/tmp` filename?** `/tmp` persists across warm invocations. A fixed filename collides between back-to-back runs. UUID guarantees uniqueness. + +**Why `_DONE = object()` instead of a string sentinel?** An `object()` instance has a unique identity that can't possibly collide with any real S3 key. `is` comparison (identity, not equality) is unambiguous. + +**Why `os.unlink` at the end?** `/tmp` is per-environment, at most 10 GB, and persists. A thousand warm invocations without cleanup would fill it and crash subsequent runs. + +### Cold start vs warm — what you'd see in CloudWatch + +First invocation (cold): +``` +REPORT RequestId: ... Duration: 312.45 ms Billed Duration: 313 ms + Memory Size: 256 MB Max Memory Used: 89 MB + Init Duration: 423.12 ms +``` + +`Init Duration` ~400 ms covers importing aioboto3 and aiofiles (aioboto3 pulls in aiobotocore which pulls in botocore — heavy). `Duration` ~300 ms is the actual scan: list, presign, write, upload. + +Second invocation within 30 seconds (warm): +``` +REPORT RequestId: ... Duration: 287.91 ms Billed Duration: 288 ms + Memory Size: 256 MB Max Memory Used: 91 MB +``` + +No `Init Duration` line. Jumped straight to the handler. ~30 ms saved. For a function that runs once a day, every invocation is cold and init matters. For one that runs every few seconds, almost everything is warm. + +### What happens if it times out + +The default function timeout is 3 s — almost certainly not enough. Set it explicitly to 30–60 s for a small prefix, up to 900 s (15 min) for a large one. If it times out, Lambda kills the process. The `/tmp` file may not have been deleted. The manifest may or may not have been uploaded. Re-running produces a fresh manifest with new UUIDs — the previous partial manifest stays in S3 until TTL or manual cleanup. + +### How would you scale this + +**Fan out by prefix.** Wrap in a Step Functions Map state. Pass a list of prefixes; each map iteration runs one Lambda for one prefix. `MaxConcurrency` controls parallelism without saturating the account concurrency quota. + +**Go event-driven.** Subscribe to S3 `ObjectCreated` events filtered to `*.pdf`. The function fires once per upload, handles one file at a time. No producer/consumer needed — nothing to enumerate. Simpler, but different semantics: "process new files as they arrive" vs "scan the existing bucket." + +### What I'd change before production + +1. **Move `BUCKET` and `PREFIX` to the event payload** — currently set at deploy time, which means one function per prefix. Event-driven config lets one function serve many prefixes. +2. **Structured logging** — JSON to stdout with `request_id`, `bucket`, `prefix`, `count`. Logs Insights can aggregate without regex. +3. **EMF metric for `count`** — free CloudWatch metric, no additional API call. Dashboard "PDFs processed per invocation" over time. +4. **Producer error handling** — if `paginator.paginate` raises, the producer task fails but the consumer keeps blocking on `queue.get()` forever, and the function times out. Wrap the producer body in `try/finally` that always puts `_DONE` on the queue so the consumer exits cleanly. +5. **Explicit timeout on `queue.get()`** — `await asyncio.wait_for(queue.get(), timeout=X)` prevents the consumer hanging indefinitely if the producer dies without putting the sentinel. +6. **Consider sync boto3** — `aioboto3` adds ~200 ms to the cold start. If cold start matters and file counts are small, sync boto3 with threading is simpler and starts faster. Async pays off when file counts are large enough that overlap is significant. + +## Makefile targets + +| Target | What it does | +|--------|--------------| +| `make install` | Creates `.venv`, installs `requirements.txt` | +| `make up` | Starts MinIO via `docker compose up -d` | +| `make down` | Stops MinIO (keeps volumes) | +| `make clean` | Stops MinIO and deletes volumes (wipes bucket data) | +| `SOURCE_DIR=path make seed` | Uploads all files from `path` to MinIO | +| `make invoke` | Runs `invoke.py` (calls `handler()` directly) | +| `make graphs` | Renders all `docs/graphs/*.dot` → `.svg` via Graphviz `dot` | +| `make docs` | Renders graphs then opens `docs/index.html` | diff --git a/docs/lambdas-md/lambda-README.md b/docs/lambdas-md/lambda-README.md new file mode 100644 index 0000000..8f8834a --- /dev/null +++ b/docs/lambdas-md/lambda-README.md @@ -0,0 +1,40 @@ +# AWS Lambda — Study notes & sandbox + +Study site built on top of a working Lambda + MinIO sandbox. Read the page, run the code, break things on purpose. + +## Foundations + +- [01 — Overview](lambda-01-overview.md) +- [02 — Mental Model](lambda-02-mental-model.md) +- [03 — Limits](lambda-03-limits.md) + +## Operating + +- [04 — Cold Starts](lambda-04-cold-starts.md) +- [05 — Concurrency](lambda-05-concurrency.md) +- [06 — Triggers](lambda-06-triggers.md) +- [07 — IAM & Permissions](lambda-07-iam.md) +- [08 — Packaging](lambda-08-packaging.md) +- [09 — VPC & Networking](lambda-09-vpc-networking.md) + +## Production + +- [10 — Observability](lambda-10-observability.md) +- [11 — Async & Errors](lambda-11-async-errors.md) +- [12 — Step Functions](lambda-12-step-functions.md) +- [13 — Cost](lambda-13-cost.md) +- [14 — Local Dev](lambda-14-local-dev.md) +- [15 — CI/CD](lambda-15-cicd.md) + +## Reference + +- [16 — Pitfalls](lambda-16-pitfalls.md) +- [17 — Adjacent](lambda-17-adjacent.md) +- [18 — Labs](lambda-18-labs.md) +- [19 — Repository](lambda-19-repository.md) + +## Graphs + +- [System overview](lambda-system_overview.svg) +- [Lifecycle](lambda-lifecycle.svg) +- [Cold vs warm timeline](lambda-cold_warm_timeline.svg) diff --git a/docs/lambdas-md/lambda-cold_warm_timeline.svg b/docs/lambdas-md/lambda-cold_warm_timeline.svg new file mode 100644 index 0000000..be40362 --- /dev/null +++ b/docs/lambdas-md/lambda-cold_warm_timeline.svg @@ -0,0 +1,158 @@ + + + + + + +cold_warm_timeline + +Cold vs warm — what gets billed, what gets measured + +cluster_cold + +INVOCATION 1 — cold (Init Duration shows in CloudWatch) + + +cluster_warm1 + +INVOCATION 2 — warm (no Init Duration logged) + + +cluster_warm2 + +INVOCATION 3 — warm + + + +c_dl + +Download code +~50–200 ms +(NOT billed) + + + +c_init + +Init phase +~200–800 ms typical +(boto3/aioboto3 imports, +client build) +(billed at full mem) + + + +c_dl->c_init + + + + + +c_handler + +Handler +~5–500 ms +(billed) + + + +c_init->c_handler + + + + + +c_freeze + +freeze + + + +c_handler->c_freeze + + + + + +w_thaw + +thaw +microseconds +(NOT billed) + + + +c_freeze->w_thaw + + +next event +(< idle window) + + + +w_handler + +Handler +~5–500 ms +(billed) + + + +w_thaw->w_handler + + + + + +w_freeze + +freeze + + + +w_handler->w_freeze + + + + + +w2_thaw + +thaw + + + +w_freeze->w2_thaw + + + + + +w2_handler + +Handler +(billed) + + + +w2_thaw->w2_handler + + + + + +notes + + + +Init Duration is ONLY in cold-start logs. +Duration is the handler portion only. +Billed Duration rounds Duration up to 1 ms. +With Provisioned Concurrency, init runs ahead of time — +you pay for it in PC pricing, not per invocation. + + + diff --git a/docs/lambdas-md/lambda-function.py b/docs/lambdas-md/lambda-function.py new file mode 100644 index 0000000..3d56525 --- /dev/null +++ b/docs/lambdas-md/lambda-function.py @@ -0,0 +1,79 @@ +import asyncio +import json +import os +import uuid + +import aioboto3 +import aiofiles + +BUCKET = os.environ.get("BUCKET_NAME", "my-company-reports-bucket") +PREFIX = os.environ.get("PREFIX", "2026/04/") +EXPIRY = int(os.environ.get("URL_EXPIRY_SECONDS", "900")) +ENDPOINT = os.environ.get("S3_ENDPOINT_URL") or None +QUEUE_MAX = int(os.environ.get("QUEUE_MAX", "2000")) + +_DONE = object() + + +async def _run(): + session = aioboto3.Session() + async with session.client("s3", endpoint_url=ENDPOINT) as s3: + queue: asyncio.Queue = asyncio.Queue(maxsize=QUEUE_MAX) + manifest_path = f"/tmp/{uuid.uuid4()}.jsonl" + + async def producer(): + paginator = s3.get_paginator("list_objects_v2") + async for page in paginator.paginate(Bucket=BUCKET, Prefix=PREFIX): + for obj in page.get("Contents", []) or []: + key = obj["Key"] + if key.lower().endswith(".pdf"): + await queue.put(key) + await queue.put(_DONE) + + async def consumer(): + count = 0 + async with aiofiles.open(manifest_path, "w") as f: + while True: + item = await queue.get() + if item is _DONE: + break + url = await s3.generate_presigned_url( + "get_object", + Params={"Bucket": BUCKET, "Key": item}, + ExpiresIn=EXPIRY, + ) + await f.write(json.dumps({"key": item, "url": url}) + "\n") + count += 1 + return count + + prod_task = asyncio.create_task(producer()) + count = await consumer() + await prod_task + + manifest_key = f"manifests/{uuid.uuid4()}.jsonl" + async with aiofiles.open(manifest_path, "rb") as f: + body = await f.read() + await s3.put_object( + Bucket=BUCKET, + Key=manifest_key, + Body=body, + ContentType="application/x-ndjson", + ) + manifest_url = await s3.generate_presigned_url( + "get_object", + Params={"Bucket": BUCKET, "Key": manifest_key}, + ExpiresIn=EXPIRY, + ) + + os.unlink(manifest_path) + + return { + "count": count, + "manifest_key": manifest_key, + "manifest_url": manifest_url, + } + + +def handler(event, context): + result = asyncio.run(_run()) + return {"statusCode": 200, "body": json.dumps(result)} diff --git a/docs/lambdas-md/lambda-lifecycle.svg b/docs/lambdas-md/lambda-lifecycle.svg new file mode 100644 index 0000000..2dc1f21 --- /dev/null +++ b/docs/lambdas-md/lambda-lifecycle.svg @@ -0,0 +1,159 @@ + + + + + + +lifecycle + +Lambda execution environment lifecycle + +cluster_cold + +Cold start (first invocation on a fresh execution environment) + + +cluster_invoke + +Invocation + + +cluster_warm + +Warm reuse (subsequent invocations on the same environment) + + + +download + +1. Download code +zip / container layers + + + +bootstrap + +2. Start runtime +bootstrap (python3.x) + + + +download->bootstrap + + + + + +init + +3. Init phase +run module-level code +import boto3 / aioboto3 +build clients +(billed; capped at 10 s) + + + +bootstrap->init + + + + + +handler + +handler(event, context) +your code runs +(billed) + + + +init->handler + + +event arrives + + + +respond + +return / raise + + + +handler->respond + + + + + +freeze + +freeze +process paused +(after handler returns) + + + +respond->freeze + + + + + +thaw + +thaw +(microseconds) + + + +thaw->handler + + +reuse env + + + +reuse + + + +globals retained: +clients, /tmp, +in-memory caches + + + +reuse->handler + + + + + +freeze->thaw + + +next event + + + +shutdown + +shutdown +idle ~5–15 min → +env torn down +/tmp gone + + + +freeze->shutdown + + +idle too long + + + diff --git a/docs/lambdas-md/lambda-system_overview.svg b/docs/lambdas-md/lambda-system_overview.svg new file mode 100644 index 0000000..a27ec81 --- /dev/null +++ b/docs/lambdas-md/lambda-system_overview.svg @@ -0,0 +1,193 @@ + + + + + + +system_overview + +Sample app — Lambda + MinIO sandbox + +cluster_caller + +Caller + + +cluster_lambda + +Lambda execution environment + + +cluster_async + +asyncio.Queue producer / consumer + + +cluster_storage + +Object storage + + + +invoke + +invoke.py +(local) / +API Gateway, +S3 event, +Step Functions +(real AWS) + + + +handler + +handler(event, context) +lambda_function.py + + + +invoke->handler + + +event + + + +producer + +producer +list_objects_v2 (paginator) +filter *.pdf + + + +handler->producer + + +spawn task + + + +consumer + +consumer +generate_presigned_url +append JSONL + + + +handler->consumer + + +spawn task + + + +response + + + +response +{count, manifest_key, +manifest_url} +(< 1 KB; sidesteps 6 MB cap) + + + +handler->response + + +return + + + +queue + + +asyncio.Queue +maxsize=2000 +(backpressure) + + + +producer->queue + + +key + + + +minio + + +MinIO (local) +or real S3 + + + +producer->minio + + +LIST + + + +queue->consumer + + +key + + + +tmp + + +/tmp/<uuid>.jsonl +streamed manifest +(ephemeral, 512 MB default) + + + +consumer->tmp + + +JSONL line + + + +consumer->minio + + +presign +(local HMAC) + + + +tmp->minio + + +put_object +manifests/<uuid>.jsonl + + + +minio->producer + + +page (1000 keys) + + + +bucket + +my-company-reports-bucket +2026/04/*.pdf +manifests/<uuid>.jsonl + + + + diff --git a/docs/lambdas-md/lambda_study_script.md b/docs/lambdas-md/lambda_study_script.md new file mode 100644 index 0000000..a1b6274 --- /dev/null +++ b/docs/lambdas-md/lambda_study_script.md @@ -0,0 +1,1334 @@ +# STUDY SCRIPT — AWS Lambda, complete coverage +## For the interview on Tuesday + +> Same tone as the actor's script. Spoken voice, talking to one person, your +> rhythm. The difference: this includes everything from the lambda-* notes, +> in the order the files appear. Read it through once. Then re-read the +> sections where you stumble. The last section walks through +> `lambda_function.py` line by line and answers every follow-up question +> you couldn't field last time. +> +> English draft. + +--- + +## 1. Overview + +### What we have + +Let me start with what we have, because the rest of this only makes sense once you've held the thing in your hands. + +There's a folder. Inside it, a Python file called `lambda_function.py` — that's the function. There's a `docker-compose.yml` that brings up MinIO on your laptop. MinIO is an S3-compatible object store. There's a Makefile that wraps the whole loop into about five commands. There's an `invoke.py` that calls the function locally with a minimal event, the same way Lambda would call it in AWS. There's a `seed.py` that uploads PDFs from a local directory to MinIO so we have something to scan. + +What does the function do? It lists every PDF inside an S3 prefix. It generates a fifteen-minute presigned download link for each one. It writes them all out as JSONL into `/tmp`. It uploads that JSONL back to S3 as a manifest. And it returns one presigned URL pointing to the manifest. The recipient clicks the URL, gets back a list of links, all expire in fifteen minutes. + +It runs locally against MinIO. The same handler, same signature, same code, would run on real AWS Lambda the day you deploy it. The only thing that changes is the `S3_ENDPOINT_URL` environment variable. In MinIO it points to `http://localhost:9000`. In AWS, you don't set it, and boto3 talks to real S3. + +### How to use this script + +Read it in your voice. The reason that matters is — when you read someone else explaining a concept, you nod along. When you read the same concept as if you were the one explaining it, you immediately notice the gaps. The places where you'd stumble in front of an interviewer. Those are the places to study harder. + +Each section corresponds to one of the lambda notes files. They're numbered the same way. So when something feels thin, when you read it and think "I can't actually answer a follow-up on this," you know exactly which file to open. + +The last section walks through `lambda_function.py` line by line. That's the section you couldn't field in the last interview. We're going to fix it. + +--- + +## 2. Mental model + +### Lambda is a Linux process whose lifecycle is managed for you + +That's the one sentence to memorize. Most of the surprise about Lambda comes from forgetting that it's still a process. + +Each invocation runs inside an execution environment. That environment is a Firecracker microVM. Firecracker is open source, written by AWS in Rust, designed to boot a stripped-down Linux kernel in about a hundred and twenty-five milliseconds. Inside the microVM there's the Lambda runtime — for us, that's Python 3.13 — and your code is unpacked into a directory called `/var/task`. There's `/tmp` for scratch. There's an environment that holds your variables and credentials. + +AWS owns the VM. You own everything inside the process. The microVM is created on demand, kept warm for a while, then torn down when traffic stops. You don't pick a server, but there *is* a server, and it has memory, a clock, and a filesystem. Everything that surprises people about Lambda comes from forgetting that fact. + +### The two phases — the most useful split in all of Lambda + +Every cold start has two phases. + +The init phase is your module-level code. The imports at the top of the file. The construction of any client at module scope. The reading of environment variables. Anything that lives outside `def handler`. This phase has a hard cap of ten seconds. It's billed at the full configured memory, even if you don't use it all. And it runs *once*. + +The handler phase is `handler(event, context)` itself. It runs every invocation. Billed per millisecond at the configured memory. + +Subsequent invocations on the same warm environment skip the init phase entirely. They go straight to the handler. + +Heavy work at module level — pay it once per cold start, free for every warm request after. Heavy work inside the handler — pay it every invocation. This single distinction is what most "Lambda is slow" complaints actually are. + +In our function, look at the top. Five environment variable reads at module scope. Read once per cold start. Reused for every warm request. + +```python +BUCKET = os.environ.get("BUCKET_NAME", "my-company-reports-bucket") +PREFIX = os.environ.get("PREFIX", "2026/04/") +EXPIRY = int(os.environ.get("URL_EXPIRY_SECONDS", "900")) +ENDPOINT = os.environ.get("S3_ENDPOINT_URL") or None +QUEUE_MAX = int(os.environ.get("QUEUE_MAX", "2000")) +``` + +If you moved any of those into `_run()` or into the handler, you'd be doing the lookup on every single invocation forever. Free for the rest of the function's life, vs paid every request. That's the difference. + +### Globals across warm invocations + +Anything assigned at module scope survives between handler calls on the same environment. The intended use is good: a boto3 client at module scope means TCP keep-alive, no re-handshake on every request, the SDK reuses the connection pool. + +The unintended use is the foot-gun. A list at module scope, append to it inside the handler, it grows forever. The same warm container can serve thousands of invocations in a row. A counter at module scope, increment in the handler — wrong number on every request, and inconsistent across environments because each warm container has its own copy. + +The rule: anything at module scope is shared across invocations on the same env, never across environments. If you need state shared across environments, externalize it. DynamoDB, Redis, S3. + +### /tmp is real but local + +Each environment has its own `/tmp`. Default 512 MB, configurable to 10 GB. It persists across warm invocations on that environment, so you can stash artifacts you'd rather not rebuild. But it is *not* shared between concurrent executions, and it's gone when the environment dies. + +This is exactly why our function does this: + +```python +manifest_path = f"/tmp/{uuid.uuid4()}.jsonl" +``` + +Two parallel invocations on different environments are fine, they each have their own /tmp. But two warm invocations back to back on the same environment, if we used a fixed filename, would collide. UUID per invocation, no collision possible. We also `os.unlink(manifest_path)` at the end of `_run()` to make sure /tmp doesn't fill across warm invocations. + +### Concurrency is horizontal + +If two events arrive while one is being processed, AWS spins up a second execution environment. Each environment processes one invocation at a time, single-threaded relative to your handler. There's no thread pool to tune. There's no shared memory between environments. The "concurrency" you see in CloudWatch is the count of environments running in parallel. + +### The reuse window + +Idle environments stick around for roughly five to fifteen minutes before being recycled. AWS won't promise a number. That's why a function that sees one request a minute almost never cold-starts, and a function that sees one request a day always does. + +--- + +## 3. Limits + +### The numbers worth memorizing + +There are about ten numbers an interviewer might ask you. Three of them you should never have to look up. + +Fifteen minutes — maximum function timeout. Default is three seconds, which is too short for almost anything that talks to S3, so set it explicitly. + +Ten gigabytes — maximum memory. Also the maximum size of `/tmp`. It's the same number for both, which suggests it's a microVM provisioning ceiling, not a Lambda product decision. + +Six megabytes — maximum sync request and response. Six in, six out. Above that, the response is silently truncated and the caller gets a 413. We design around this in our function by returning a manifest URL instead of inlining all the presigned URLs. + +### Compute and storage, in detail + +Memory is configurable from 128 MB up to 10 240 MB. CPU scales linearly with memory. At 1769 MB you get a full vCPU. At higher tiers, multiple. So memory isn't just headroom — it's CPU. Often it's *cheaper* to bump memory because duration drops faster than cost rises. If your function is CPU-bound, doubling the memory might more than halve the duration. + +The init phase has a hard ten-second cap. You can blow through this with a heavy ML model load, custom JIT warmups, anything that does serious work at module level. + +`/tmp` defaults to 512 MB. Above that, you pay per invocation for the extra. It persists across warm invocations on the same env, vanishes on cold start. + +### Payloads and responses + +Sync invocation request: 6 MB. Sync invocation response: 6 MB. Async invocation event: 256 KB — that's for `Event` invocations and most event-source-mapped triggers like S3, EventBridge, SNS. Larger payloads, you store in S3 and send a pointer. SQS, SNS, EventBridge messages also cap at 256 KB each. + +Response streaming. There's a way around the 6 MB response limit if you use Function URLs or Lambda's response streaming mode. You flush chunks. The cap goes up to 20 MB soft, with a bandwidth ceiling. Not all clients support it. + +Environment variables: 4 KB total. Per function, all keys plus values combined. If you have a big config that won't fit, you go to Parameter Store or Secrets Manager. + +### Packaging limits + +Zip upload directly: 50 MB. Above that, you upload via S3 first. + +Zip unzipped — function plus all layers extracted: 250 MB. So `aioboto3` plus its dependencies is around 50 MB. We have headroom but not infinite. + +Container image: 10 GB per image. This is what you reach for when you'd otherwise blow the 250 MB zip ceiling. ML dependencies with native binaries, that kind of thing. + +Layers: max five per function. Order matters. Later layers overwrite earlier ones. They count toward the 250 MB unzipped cap. + +### Concurrency and scaling + +Account concurrent executions: default 1000 per region. It's a soft quota, you can request an increase via Service Quotas. The single most common throttling cause in production. + +Burst concurrency: 500 to 3000 immediate, depending on the region. That's how many fresh environments AWS will spin up right now at a traffic spike. Beyond the burst, scaling adds 500 environments per minute. So a spike from zero to 5000 concurrent requests takes several minutes to fully absorb. + +Reserved concurrency: from zero up to your account quota. It carves a slice of the account pool for a function. Setting it to zero effectively disables the function, which is sometimes useful as a circuit breaker. + +Provisioned concurrency: zero by default. Pre-warmed environments. Eliminates cold starts for the warmed slots, costs you for the idle capacity. + +### Time and rate limits at the edges + +API Gateway integration timeout: 29 seconds. Hard cap. Doesn't matter what your Lambda timeout says. If you need longer with API Gateway in front, you return a job ID and have the client poll. Function URLs allow up to 15 minutes. + +Async invocation event age: 6 hours. If retries don't succeed in that window, the event gets dropped — or sent to a DLQ or on-failure destination if you configured one. + +Async retry attempts: default is 2. So three attempts total, including the original. Configurable down to zero. + +SQS visibility timeout requirement: at least six times the function timeout. AWS recommendation. Otherwise messages reappear while still being processed and you do the work twice. + +The memorization hack: three numbers cover most interview questions. Fifteen minutes, ten gigabytes, six megabytes. Everything else is a footnote until you hit a specific design. + +--- + +## 4. Cold starts + +### What triggers one + +A cold start happens whenever Lambda must create a new execution environment. The first request after a deployment. A traffic spike beyond the number of warm environments. An idle environment that AWS has recycled — somewhere between five and fifteen minutes of inactivity, no promise. + +Deployments always cold-start the incoming version. You can't avoid the first one, only reduce how long it takes. + +### The cold path + +What actually happens during a cold start. AWS provisions a Firecracker microVM. Boots it, attaches the network, mounts the filesystem. Downloads and unpacks your code, or pulls the container image. Starts the language runtime. Runs your module-level code. Only then is your handler called. + +The timeline: + +1 — environment provisioning. MicroVM boot, network, filesystem. Not billed. AWS absorbs this. + +2 — init phase. Your module-level code: imports, client construction, config reads. Billed at full configured memory. Capped at 10 seconds. + +3 — handler phase. `handler(event, context)`. Billed per millisecond. + +CloudWatch shows this split. The REPORT line includes an "Init Duration" only on cold invocations. Warm invocations have no Init Duration line at all. + +### Real numbers + +Python 3.13 with minimal deps: about 150 ms median, 400 ms p99. + +Python 3.13 with our `aioboto3` and `aiofiles`: about 300 ms median, 700 ms p99. + +Node.js 22: 100 ms median, 300 ms p99. + +Java 21 without SnapStart: one to two seconds median, three to five at p99. + +Java 21 with SnapStart: about 200 ms median, 600 ms p99. + +Container image, any runtime: add 100 to 300 ms. The first pull after a deploy can be 1 to 3 seconds. + +### Mitigations + +Each one is interesting because it's its own *product*. + +Provisioned Concurrency — pre-warms N environments so they're always in the warm state. Eliminates cold starts for those slots. You pay for them 24/7 even when idle. Use for latency-sensitive, predictable-traffic paths. Schedule the changes via Application Auto Scaling for cost efficiency. + +ARM64. Graviton2 executes the init phase about 10% faster than x86 for CPU-bound init work. Combined with the 20% price reduction, ARM64 is the default choice unless a native wheel blocks you. + +Smaller packages. Lambda downloads and unpacks your zip on every cold start. Trimming unused transitive dependencies, stripping test and doc files, shaves real time. Every megabyte of extracted code costs a few milliseconds. + +Lazy imports. Move rarely-used or slow imports inside the handler, or behind a lazy-init guard. The most common win is heavy ML libraries you only need for inference. Import them on first call, cache the result in a module-level variable. + +SnapStart. Java only. Takes a snapshot of the initialized JVM state after init, restores from snapshot on cold starts. Collapses 1 to 5 seconds of JVM startup to about 200 ms. Not available for Python or Node. + +When cold starts don't matter: batch jobs, async event pipelines, scheduled tasks. Nobody is waiting on the p99. Only optimize cold starts when a human is waiting synchronously for the response. + +--- + +## 5. Concurrency + +### The fundamental model + +Lambda concurrency equals the number of execution environments processing requests at the same instant. Each environment handles exactly one invocation at a time. There is no thread pool, no event loop shared across invocations. If two requests arrive simultaneously, AWS spins up two separate environments. + +### The equation + +This is the cleanest equation in cloud computing. Memorize it. + +Concurrency, roughly, equals requests per second times average duration in seconds. + +A hundred RPS with a 200 ms average duration: 100 times 0.2 equals 20 concurrent environments. + +A hundred RPS with a 500 ms average: 50 environments. With a 2-second average: 200. Same traffic, ten times the footprint, because the function is slower. + +Latency optimization directly reduces your concurrency footprint. They're the same problem. + +### The account pool + +Every AWS account has a regional concurrency quota. Default 1000 concurrent executions per region, shared across all functions in that region. + +When the pool is full, new invocations get throttled. Synchronous calls get HTTP 429 TooManyRequestsException. Async calls get queued and retried. Raising the limit is a Service Quotas request — AWS typically grants up to 10 000 with a business justification. + +This is the single most common production surprise. One function spikes and starves all others in the same region. Reserved concurrency is the fix. + +### Three types of concurrency + +Unreserved. Draws from the shared regional pool on demand. Cost: invocation plus duration only. Use for most functions. + +Reserved. Carves a slice of the regional pool exclusively for one function. It acts as both a floor and a ceiling. No extra charge. Use for protecting critical paths from noisy neighbors, or for capping cost runaway. + +Provisioned. Pre-warms N environments. They stay initialized 24/7. Costs Provisioned Concurrency hours plus invocation cost on top. Use for latency-sensitive functions where cold starts are unacceptable. + +### Reserved concurrency edge cases + +Setting reserved concurrency to zero disables the function entirely. Useful as a circuit breaker. + +Reserved concurrency counts against the account pool even when idle. If you set 500 reserved on one function, only 500 remain for all other functions at the default 1000. + +Reserved concurrency does *not* pre-warm. You still cold-start. You just can't scale past the cap. + +### Burst scaling + +When traffic spikes from zero, Lambda can spin up environments quickly — but not infinitely fast. The burst limit is region-dependent, typically 500 to 3000 immediate. Beyond that, it adds 500 new environments per minute. A spike from 0 to 5000 concurrent requests takes several minutes to fully absorb. Provisioned Concurrency or pre-warming via a ping mechanism is the fix for sudden large spikes. + +The interview-answer template: "Concurrency equals RPS times duration. Default pool is 1000 per region. Reserved carves a slice and prevents both starvation and runaway. Provisioned pre-warms to eliminate cold starts but you pay for idle capacity." + +--- + +## 6. Triggers + +### Three invocation models + +Every trigger falls into one of three models. The model determines retry behavior, error handling, and whether the caller can see the response. + +Synchronous. The caller blocks for the response. Gets the result or the error directly. No retries — that's the caller's responsibility. Max event size: 6 MB request and response. + +Asynchronous. The caller gets a 202 immediately. Lambda queues and retries internally. Two retries, three attempts total, over up to 6 hours. Max event size: 256 KB. + +Poll-based, also called event source mapping or ESM. Lambda polls the source on your behalf and batches records. Keeps retrying until success or the record expires. Event size depends on the source. + +### The trigger catalog + +API Gateway, REST or HTTP. Synchronous. 29-second integration timeout regardless of Lambda timeout. HTTP API is cheaper and lower-latency than REST API. It transforms request and response. + +Function URL. Synchronous. A direct HTTPS endpoint on the function. No API Gateway layer. Supports up to 15 minutes timeout and response streaming. Simpler, cheaper, fewer features. + +Application Load Balancer. Synchronous. Like API Gateway but routes at L7. Useful when Lambda is one target among EC2 or ECS targets. 29-second timeout. + +S3 event notification. Asynchronous. Fires on object create, delete, etc. At-least-once delivery. A large PUT creates exactly one event per object, but notifications can duplicate. Common pattern: S3 to SNS to SQS to Lambda for fan-out plus replay. + +SNS. Asynchronous. Fan-out: one message to multiple subscribers. At-least-once. Dead-letter queue lives on the subscription, not the topic. + +EventBridge. Asynchronous. An event bus with content-based routing rules. Also the managed scheduler — cron and rate expressions, timezone-aware since 2022. At-least-once. + +SQS. Poll-based. Lambda polls and batches up to 10 000 messages. Standard queues are at-least-once and unordered. FIFO queues are ordered per message group, exactly-once with dedup. Visibility timeout has to be at least 6 times the function timeout. Partial batch failure via `batchItemFailures`. + +Kinesis Data Streams. Poll-based. One Lambda shard per stream shard. Records expire — 24 hours to a year. Lambda retries until success or expiry. Use bisect-on-error and `batchItemFailures` to avoid one bad record blocking an entire shard. + +DynamoDB Streams. Poll-based. Captures item-level changes. Ordered per partition key. 24-hour retention. Same retry behavior as Kinesis. Use it for change-data-capture patterns. + +Step Functions. Synchronous, when the state machine has a Task state pointing at the function. Step Functions calls it synchronously and waits for the result. Retries and timeouts are defined in the state machine, not in Lambda. + +Cognito, SES, IoT, others. Service-specific. Cognito triggers like pre-signup or pre-token are sync and block the auth flow. + +### SQS vs SNS plus SQS + +Use plain SQS to Lambda when you have one consumer and want to buffer, batch, and retry. Use SNS to SQS to Lambda when you need fan-out — multiple independent consumers each get a copy — or when the producer is an AWS service that speaks SNS natively. The SNS layer decouples producers from the queue topology. + +--- + +## 7. IAM and permissions + +### Two independent permission layers + +Lambda has two separate permission surfaces. Each must be correct independently. Confusing them is the most common "it works locally but not in AWS" failure. + +Execution role: what can this Lambda function *do* once it's running? Call S3, write to DynamoDB, publish to SNS. You attach this at function creation. + +Resource policy: who is *allowed to invoke* this Lambda function? API Gateway, another AWS account, EventBridge. AWS adds this automatically for most triggers when you wire them up through the console. You add it manually for cross-account grants. + +### Execution role + +The execution role is an IAM role that Lambda assumes when running your function. Every Lambda must have one. The attached policies determine what AWS API calls the function can make. The minimum, for any function, is permission to write its own logs: + +``` +logs:CreateLogGroup +logs:CreateLogStream +logs:PutLogEvents +``` + +For our function, which reads and writes S3, you need at minimum: + +``` +s3:GetObject +s3:PutObject +s3:ListBucket # needed for the paginator; often forgotten +kms:Decrypt # if the bucket uses a CMK +``` + +The `AWSLambdaBasicExecutionRole` managed policy covers logs only. It is intentionally minimal. `AWSLambdaVPCAccessExecutionRole` adds the ENI permissions needed when the function is in a VPC. + +### Resource policy + +The resource policy is attached to the Lambda function itself, not to an IAM identity. When you add an S3 event notification or an API Gateway integration through the console, AWS automatically adds a resource policy entry allowing that service to invoke the function. For cross-account invocations, you add this manually: + +```bash +aws lambda add-permission \ + --function-name my-function \ + --principal 123456789012 \ + --action lambda:InvokeFunction \ + --statement-id cross-account-invoke +``` + +### The four common mistakes + +1: missing `s3:ListBucket` on the bucket resource. `ListObjectsV2`, which our paginator uses, requires `ListBucket` on the bucket ARN — not on the object ARN. Forgetting this causes AccessDenied on the paginator even when GetObject works fine on individual files. This is the most common one. + +2: wrong resource ARN scope. `s3:GetObject` belongs on `arn:aws:s3:::bucket-name/*`. `s3:ListBucket` belongs on `arn:aws:s3:::bucket-name`. Without the wildcard. Swapping the two is a frequent typo. + +3: KMS not in the execution role. If the bucket's objects are encrypted with a customer-managed key, you need both `s3:GetObject` and `kms:Decrypt`. The KMS key policy must also allow the role. Two separate policy documents, two separate denial points. + +4: no resource policy for a manually wired trigger. If you wire EventBridge through the CLI or the SDK and skip the console, the trigger silently fails because there's no resource policy entry granting EventBridge `lambda:InvokeFunction`. + +### Diagnosing permission errors + +CloudTrail is the ground truth. Filter by `errorCode: "AccessDenied"` and `userIdentity.arn` matching the execution role ARN. The event tells you exactly which action on which resource was denied. CloudWatch will show the error in the Lambda log if you let the exception propagate, but CloudTrail shows it even when the calling library swallows the error. + +--- + +## 8. Packaging + +### Three deployment formats + +Zip, direct upload. Up to 50 MB upload, 250 MB unzipped. Best for most Python and Node functions with pure-Python or pre-built wheels. Must match Lambda's architecture. No custom runtime. + +Zip via S3. Same 250 MB unzipped limit, but the zip itself can be larger because you're uploading to S3 first. The S3 bucket has to be in the same region as the function. + +Layers. 250 MB total — that's the function plus all layers combined. Best for shared dependencies across functions, like a company-wide logging layer. Maximum five layers per function. Later layers overwrite earlier ones in the merge. + +Container image. Up to 10 GB per image. Best for ML models, native binary deps, custom runtimes. Slower first cold start because of the image pull. Larger attack surface. + +### Layers in practice + +A layer is a zip file that Lambda extracts into `/opt` before running your function. Your code in `/var/task` can import from `/opt/python` for Python without any path manipulation. + +Use cases. Shared internal libraries deployed independently of business logic. Large dependencies that change rarely — numpy, pandas — cached in a layer so deployments of the business logic stay fast. AWS-provided layers like the Lambda Insights extension or the X-Ray SDK. + +Layers count toward the 250 MB unzipped limit. If you have 5 layers at 40 MB each, plus a 50 MB function zip, you're at 250 MB. No room left. + +### Container images + +Container images must be based on AWS-provided base images, like `public.ecr.aws/lambda/python:3.13`, or implement the Lambda Runtime Interface. They must be stored in ECR — Elastic Container Registry — in the same region as the function. + +The Lambda service caches images on the underlying host after the first pull, so subsequent cold starts on the same host are fast. The very first invocation after a new image is deployed can be slow for large images. + +Container images bypass the 250 MB unzipped limit. That's why they're the standard for Python ML workloads that bundle PyTorch or TensorFlow. + +### ARM64 vs x86_64 + +Graviton2-based ARM64 is about 20% cheaper per GB-second than x86_64. Typically faster at compute-heavy work, too. + +The decision tree. Check all your dependencies for ARM64 wheels. Run `pip download` with `--platform manylinux2014_aarch64` and `--only-binary :all:`. If any fail, you either build from source (which needs a Dockerfile) or stay on x86. For pure-Python deps and most modern packages, ARM64 works out of the box. Native extensions like cryptography, numpy, psycopg2 — they've had ARM64 wheels on PyPI since around 2022. Check the exact version you need. + +### The common foot-gun + +Lambda runs on Amazon Linux 2023. `pip install` on macOS produces wheels compiled for macOS, which will segfault or import-error on Lambda. + +The fix is to build inside the Lambda runtime image: + +```bash +docker run --rm \ + -v "$PWD":/var/task \ + public.ecr.aws/lambda/python:3.13 \ + pip install -r requirements.txt -t python/ + +zip -r layer.zip python/ +``` + +Architecture matters here too. Use the `:3.13-arm64` tag when building for ARM64. + +For our project specifically: `aioboto3` and `aiofiles` are pure-Python and have no native extensions, so they build cleanly on any architecture. The Makefile creates a local `.venv` for development. A real CI pipeline would build the deployment zip inside the Lambda image. + +--- + +## 9. VPC and networking + +### Default: no VPC + +By default, Lambda runs in an AWS-managed network with internet access. It can reach S3, DynamoDB, SQS, and other AWS services via their public endpoints. + +Do not put Lambda in a VPC unless you have a specific reason. Most applications don't need it. This is a strong default. The mistakes that come from VPC placement are expensive in dollars and in latency. + +### When you actually need VPC + +Connecting to RDS or Aurora, which live in private subnets. ElastiCache — Redis or Memcached — which is VPC-only by design. Private REST APIs or internal services on private subnets. Compliance requirements that mandate network isolation. + +S3, DynamoDB, SQS, SNS, and most AWS managed services do *not* require VPC placement. They're public services with public endpoints. + +### ENI attachment and cold start + +When Lambda is VPC-attached, each execution environment gets an Elastic Network Interface — an ENI — in your VPC. Pre-2019, ENIs were allocated per cold start, adding 10 to 30 seconds to init. AWS fixed this in 2019 with hyperplane ENIs that are shared across environments. Today the VPC cold start penalty is about 100 to 500 ms on the first cold start of a new deployment, then negligible. It's no longer the dealbreaker it used to be, but it's not zero. + +### Subnet and AZ placement + +Specify at least two subnets in different AZs for availability. Lambda will distribute environments across AZs. If a subnet runs out of available ENI slots — IP exhaustion — Lambda scaling fails. Size subnets accordingly. A /24 with 254 IPs is often too small for a high-concurrency function. + +### The NAT money pit + +VPC Lambda can't reach the internet by default. If your function needs to call an external API, or reach an AWS service for which there's no VPC endpoint, you need a NAT gateway in a public subnet. + +NAT gateways cost: $0.045 per hour, which is about $32 a month, just to exist. Per AZ. Plus $0.045 per gigabyte of data processed. + +A function that pushes 100 GB a month through NAT costs $4.50 in data alone, on top of the always-on hourly charge. Two AZs for HA: about $64 a month base cost before a single byte of traffic. + +This is frequently the largest unexpected cost in VPC Lambda setups. + +### VPC endpoints — the free alternative + +For AWS services, VPC endpoints bypass NAT and the public internet entirely. + +Two types. Gateway endpoints — S3 and DynamoDB only. Free. They're route table entries. No data charge. Interface endpoints, also called PrivateLink — any AWS service. $0.01 per AZ per hour, plus $0.01 per gigabyte. Expensive at high throughput, but often cheaper than NAT for AWS-service-heavy workloads. + +For a VPC Lambda that only talks to S3 and DynamoDB: create gateway endpoints for both. No NAT needed. Near-zero networking cost. + +### Security groups + +VPC Lambda gets a security group. Outbound rules control where it can connect. The security group of the RDS or ElastiCache instance must allow inbound from the Lambda's security group. + +A common pattern: create a dedicated Lambda SG, reference it in the database's SG inbound rules. Avoids IP-range rules that break when Lambda ENIs change. + +--- + +## 10. Observability + +### CloudWatch Logs — what you get for free + +Every Lambda function automatically writes to a CloudWatch Log Group named `/aws/lambda/`. Each execution environment gets its own Log Stream. + +Lambda writes two special lines automatically: + +``` +START RequestId: abc-123 Version: $LATEST +END RequestId: abc-123 +REPORT RequestId: abc-123 Duration: 312.45 ms Billed Duration: 313 ms + Memory Size: 256 MB Max Memory Used: 89 MB + Init Duration: 423.12 ms # only on cold starts +``` + +The REPORT line is your free performance telemetry. Init Duration appears only on cold invocations. Max Memory Used helps you right-size memory configuration. + +Retention defaults to "Never Expire." Set it explicitly. 7, 14, or 30 days covers most needs. Every megabyte of retained logs costs money. + +### Structured logging + +Emit JSON instead of plain strings. CloudWatch Logs Insights can filter and aggregate JSON fields efficiently. Plain strings require regex and are slow. + +```python +import json, logging +logger = logging.getLogger() +logger.setLevel(logging.INFO) + +def handler(event, context): + logger.info(json.dumps({ + "event": "pdf_scan_start", + "bucket": BUCKET, + "prefix": PREFIX, + "request_id": context.aws_request_id, + })) +``` + +With this, Logs Insights can run something like `filter event = "pdf_scan_start" | stats count() by bin(5m)` in seconds. + +### X-Ray tracing + +X-Ray gives you request traces across services. How long the Lambda itself ran, versus how long the S3 calls took. + +Three things have to all be true for X-Ray to work. + +1: tracing enabled on the function. Console toggle, or `TracingConfig: Active` in SAM or CDK. + +2: the X-Ray SDK instrumented in your code. `from aws_xray_sdk.core import patch_all; patch_all()` wraps boto3 calls automatically. + +3: IAM permission. The execution role needs `xray:PutTraceSegments` and `xray:PutTelemetryRecords`. + +Without all three, traces are either absent or incomplete. People flip one and conclude X-Ray is broken. + +### Lambda Insights + +Lambda Insights is a CloudWatch feature, not a separate service. It surfaces system-level metrics: CPU usage, memory utilization, network I/O, disk I/O — things the REPORT line doesn't include. + +To enable: add the Lambda Insights extension layer (the ARN follows the pattern `arn:aws:lambda::580247275435:layer:LambdaInsightsExtension:38`), and add `cloudwatch:PutMetricData` to the execution role. + +Useful when you suspect memory or CPU contention but the REPORT line's "Max Memory Used" isn't granular enough. + +### EMF — Embedded Metrics Format + +EMF lets you emit custom CloudWatch metrics by writing structured JSON to stdout. No `PutMetricData` API call needed. The Lambda runtime parses the log line and publishes the metric asynchronously. + +This is far more efficient than calling CloudWatch from inside the handler. A `PutMetricData` call adds latency and cost per invocation. EMF is essentially free. + +```python +print(json.dumps({ + "_aws": { + "Timestamp": int(time.time() * 1000), + "CloudWatchMetrics": [{ + "Namespace": "MyApp", + "Dimensions": [["Function"]], + "Metrics": [{"Name": "PDFsProcessed", "Unit": "Count"}] + }] + }, + "PDFsProcessed": count, + "Function": "pdf-scanner", +})) +``` + +### Prometheus and Grafana, briefly + +Prometheus uses a pull model. It scrapes HTTP endpoints. Lambda functions are ephemeral and have no persistent HTTP endpoint, so Prometheus can't scrape them directly. Three approaches: + +EMF to CloudWatch to Grafana. Easiest. Grafana queries CloudWatch as a data source. Zero extra infrastructure. + +Amazon Managed Prometheus with `remote_write`. Lambda pushes metrics to AMP via the Prometheus remote write API. Grafana, or Amazon Managed Grafana, reads from AMP. Requires the `prometheus_client` library and SIGV4 signing on the request. + +A push gateway. Lambda pushes to a persistent push gateway. Prometheus scrapes the gateway. More infrastructure to manage, plus stale metric risk if the push gateway isn't flushed between invocations. + +For Lambda-centric dashboards, the CloudWatch-to-Grafana path is usually the simplest to operate. + +--- + +## 11. Async and errors + +### Sync vs async invocation + +Synchronous, called RequestResponse. The caller blocks. Waits for the result. Response is visible to the caller. No retries — that's the caller's responsibility. Max event size: 6 MB. + +Asynchronous, called Event. The caller gets a 202 immediately. The response is not visible to the caller. Lambda retries automatically — twice, three attempts total. Backoff: about 1 minute, then about 2 minutes. Event age limit: 6 hours. Max event size: 256 KB. + +### The async retry flow + +When Lambda invokes asynchronously and the function throws an unhandled exception, or gets throttled, Lambda retries. Twice. Exponential backoff starting at about a minute. + +If all three attempts fail, or if the event ages past 6 hours, Lambda sends the event to the configured failure destination or DLQ. If neither is configured, the event is silently dropped. + +### DLQ vs Destinations + +These are two different mechanisms that overlap in purpose but have different capabilities. + +Dead-Letter Queue, introduced in 2016. Triggers on failure only. Payload is the original event only. Targets: SQS or SNS. + +Event Destinations, introduced in 2019. Triggers on either success or failure, with separate configurations for each. Payload includes the original event plus the result or error plus metadata. Targets: SQS, SNS, Lambda, EventBridge. + +Use Destinations for new code. DLQ is still useful when the downstream consumer must be SQS and you don't need success notifications. + +### Idempotency + +Because async invocations retry, and most event sources are at-least-once, your handler will occasionally execute more than once for the same logical event. Design handlers to be idempotent: the same input produces the same outcome regardless of how many times it runs. + +The standard pattern is to use a unique key from the event — S3 ETag plus key, SQS MessageId, EventBridge `detail.id` — as a deduplication key. On first execution, write the key plus the result to DynamoDB with a TTL. On retry, check DynamoDB first. If already processed, return the cached result without re-running. + +```python +dedup_key = event["Records"][0]["messageId"] +existing = table.get_item(Key={"id": dedup_key}) +if existing.get("Item"): + return existing["Item"]["result"] + +result = do_the_work(event) +table.put_item(Item={ + "id": dedup_key, + "result": result, + "ttl": now + 86400 +}) +return result +``` + +AWS PowerTools for Lambda — Python — has a built-in `@idempotent` decorator that implements this pattern with DynamoDB. + +For our function: it's idempotent in spirit, because re-running it produces a fresh manifest with new presigned URLs. The previous manifests stay in S3 unless we clean them up. If the requirement was "exactly one manifest per logical job," we'd add a dedup table. + +### Partial batch failures + +When Lambda processes a batch of records and one record fails, the default behavior differs by source. + +SQS by default: if the handler raises an exception, the entire batch is retried. One bad message blocks all others and can cause infinite retry loops. + +With `ReportBatchItemFailures` enabled, you return a `batchItemFailures` list containing only the failed message IDs. Lambda re-queues only those. Successful messages are deleted. + +```python +def handler(event, context): + failures = [] + for record in event["Records"]: + try: + process(record) + except Exception: + failures.append({"itemIdentifier": record["messageId"]}) + return {"batchItemFailures": failures} +``` + +Enable `ReportBatchItemFailures` in the ESM configuration. Always implement partial-batch failure reporting for SQS and Kinesis handlers. A single poison-pill record can otherwise block an entire shard or queue indefinitely. + +The intersection that bites: with partial failures, successful records in the batch are deleted from SQS. But if your function crashes before returning the failure list, the entire batch including the successes gets retried. Idempotency guards must cover every record, not just the ones in `batchItemFailures`. + +--- + +## 12. Step Functions + +### When Lambda alone isn't enough + +A single Lambda function works well for one discrete task. Problems start when you need to chain multiple tasks, retry selectively, wait on human approval, or fan out across thousands of items. + +Doing this with Lambda alone means writing orchestration logic *inside* your functions. Tracking state, implementing retry delays, deciding what "done" means. Step Functions externalizes that orchestration into a state machine where every state transition is durable, auditable, and resumable. + +Reach for Step Functions when you need: sequential steps with state passing, conditional branching, parallel fan-out with join, wait states longer than 15 minutes, retry-with-exponential-backoff built in. + +### Standard vs Express + +Standard. Max duration: 1 year. Execution semantics: exactly-once per state. Full execution history in the AWS console. Pricing: $0.025 per 1000 state transitions. Use for: long-running business workflows, human approvals, compliance audit trails. + +Express. Max duration: 5 minutes. Execution semantics: at-least-once. CloudWatch Logs only — no per-execution audit trail. Pricing: $0.00001 per state transition plus duration. Use for: high-volume short-duration event processing — IoT, streaming. + +For most application orchestration, Standard is the right choice. The exactly-once semantic matters when steps have side effects — charging a card, sending an email. Express is for high-throughput pipelines where at-least-once is acceptable and cost per transition matters. + +### The Map state — fan-out + +The Map state runs the same workflow branch for every item in an array, in parallel. This is the core fan-out primitive. + +For our project, a Step Functions version could fan out across S3 prefixes — run one Lambda per prefix, collect results in a fan-in step: + +```json +{ + "Type": "Map", + "ItemsPath": "$.prefixes", + "MaxConcurrency": 10, + "Iterator": { + "StartAt": "ScanPrefix", + "States": { + "ScanPrefix": { + "Type": "Task", + "Resource": "arn:aws:lambda:...:function:pdf-scanner", + "End": true + } + } + } +} +``` + +`MaxConcurrency: 0` means unlimited — bounded only by the Lambda concurrency pool. Set an explicit cap to avoid saturating the account quota. + +### Other useful states + +Wait — pause for a duration or until a timestamp. The only way to implement delays longer than 15 minutes without polling. + +Choice — conditional branching on input values. Replaces `if/else` logic that would otherwise live inside a Lambda. + +Parallel — run multiple independent branches simultaneously and join their results. + +Task with SDK integrations — Step Functions can call DynamoDB, SQS, ECS, Glue, etc. directly without a Lambda wrapper. Reduces cost and latency for simple operations. + +### Step Functions vs Airflow + +DAG definition. Step Functions: JSON or YAML state machine, called Amazon States Language. Airflow: Python code, DAG files. + +Scheduling. Step Functions: event-driven, on-demand, cron via EventBridge. Airflow: built-in rich scheduler, cron, data-interval-aware. + +Backfill. Step Functions: manual or custom. Airflow: first-class, built-in. + +Operators. Step Functions: AWS services plus Lambda, AWS ecosystem only. Airflow: 600-plus providers including Spark, BigQuery, dbt, Kubernetes. + +Infrastructure. Step Functions: serverless, zero infra. Airflow: managed Airflow (MWAA) starts at about $400 a month. + +Debugging. Step Functions: console execution graph, CloudWatch for logs. Airflow: rich UI with task logs, Gantt charts, retry visualization. + +Step Functions is the right choice when your workflow is AWS-native, event-driven, and you want zero infrastructure. Airflow is the right choice when you need complex scheduling, data-interval backfill, cross-cloud operators, or a data-engineering team that already knows Python DAGs. + +--- + +## 13. Cost + +### The pricing formula + +Two components, both with permanent free tiers. + +Requests. $0.20 per million on x86, same on ARM. Free tier: 1 million per month, forever. + +Duration. $0.0000166667 per GB-second on x86. $0.0000133334 per GB-second on ARM — about 20% cheaper. Free tier: 400 000 GB-seconds per month, forever. + +GB-seconds equals memory configured in gigabytes times duration in seconds. A 512 MB function running for 300 ms is 0.5 times 0.3, which is 0.15 GB-seconds. At a million invocations, that's 150 000 GB-seconds. Well inside the free tier. + +Duration is billed in 1 ms increments. The old 100 ms minimum is gone, since 2020. + +### Memory vs cost — more can be cheaper + +CPU scales linearly with memory. A function configured at 1769 MB gets a full vCPU. Below that, it's a fraction. Doubling memory often more than halves duration for CPU-bound work. The total GB-seconds cost stays the same or decreases. Latency drops. + +AWS Lambda Power Tuning is a Step Functions state machine that automatically benchmarks your function at multiple memory sizes and produces a cost/performance curve. Run it before guessing at the right memory setting. The optimal point is almost never the default 128 MB. + +### ARM64 saves about 20% + +ARM64 duration pricing is 20% cheaper than x86. Same request price. + +If your function is compute-bound — not I/O-bound sleeping on S3 calls — ARM64 also runs faster, compounding the saving. + +For I/O-bound functions like ours, which spends most of its time waiting on S3, the duration difference is smaller. The 20% price reduction still applies. + +### Provisioned Concurrency billing + +Provisioned Concurrency is billed separately. $0.0000097222 per GB-second of provisioned time on x86. Even when idle. + +Math: 10 environments at 512 MB provisioned for 24 hours. 10 times 0.5 GB times 86 400 seconds is 432 000 GB-seconds per day. About $4.20 a day. About $126 a month. Just for the warm slots. Before you count any actual invocation cost on top. + +Provisioned Concurrency is for latency, not cost. It always increases your bill. + +### The hidden costs — the real bill + +NAT Gateway. $0.045 per hour per AZ (about $32 a month) plus $0.045 per gigabyte. Often the largest line item for VPC Lambda. + +API Gateway. REST API: $3.50 per million calls. HTTP API: $1 per million. Can dwarf Lambda cost at high RPS. + +CloudWatch Logs. $0.50 per gigabyte for ingestion, $0.03 per gigabyte per month for storage. Verbose Lambda logs accumulate fast. Set retention. + +Lambda Insights. Additional CloudWatch Logs plus custom metrics charges. + +X-Ray. $5 per million traces, after the free 100 000 per month. + +Data transfer. Traffic leaving a region or going through a NAT has per-gigabyte charges. + +S3 API calls. LIST and GET requests are billed per 1000. A function that does 10 000 LIST calls per invocation, at a million invocations, is 10 billion API calls. Real money. + +For our function, at 1000 invocations a day with 500 ms average duration and 256 MB memory: about $0.002 per day. Essentially free. Lambda's economics only require attention above about 100 000 invocations a day with non-trivial memory or duration. + +--- + +## 14. Local dev + +### The local dev problem + +Lambda has no local runtime by default. Without tooling, your only loop is: zip, upload, invoke, read CloudWatch logs, repeat. Minutes per cycle. + +The tools below collapse that to seconds. Different trade-offs between fidelity, setup cost, and scope. + +### SAM CLI + +What it is: AWS's official local Lambda emulator. Wraps Docker to run your function inside a container that matches the Lambda runtime exactly. Also emulates API Gateway. + +```bash +sam local invoke -e event.json +sam local start-api +sam local invoke --debug-port 5858 +``` + +Fidelity is high. Same Amazon Linux image, same runtime, same filesystem layout. Catches architecture issues, like an x86 wheel running on ARM64, that a plain venv would miss. + +Downsides. Requires Docker. Slow to start because it pulls the image on first run. No MinIO or SQS or DynamoDB emulation built in. You wire those up separately. + +### Lambda Runtime Interface Emulator — RIE + +A lightweight binary embedded in all AWS-provided Lambda base images. When you run the image locally, RIE exposes a local HTTP endpoint that accepts invocations in the Lambda API format. You don't need SAM CLI — just Docker: + +```bash +docker build -t my-fn . +docker run -p 9000:8080 my-fn +curl -XPOST http://localhost:9000/2015-03-31/functions/function/invocations \ + -d '{"key": "value"}' +``` + +Use RIE when you're building container-image Lambdas and want to test them without the SAM overhead. + +### LocalStack + +A full AWS mock that emulates Lambda, S3, SQS, DynamoDB, API Gateway, and dozens more services in a single container. Community edition is free. Pro is $35 a month for more services and persistent state. + +Use it when you need integration tests across multiple AWS services. An EventBridge rule that triggers a Lambda that writes to DynamoDB, all on your laptop. Without LocalStack you'd need a real AWS account for these tests. + +Avoid it when you only need one service — just S3, use MinIO; just Lambda, use SAM or RIE. LocalStack's Lambda emulation has occasional edge-case differences from the real runtime. + +```bash +docker run --rm -p 4566:4566 localstack/localstack +AWS_DEFAULT_REGION=us-east-1 \ + AWS_ACCESS_KEY_ID=test \ + AWS_SECRET_ACCESS_KEY=test \ + aws --endpoint-url=http://localhost:4566 s3 ls +``` + +### MinIO — what we use + +MinIO is an S3-compatible object store that runs locally in Docker. It implements the S3 API precisely enough that boto3 or aioboto3 needs only an `endpoint_url` override to work against it. + +It is *not* a Lambda emulator. It replaces S3 only. + +```bash +make up # MinIO on :9000 (API) and :9001 (console) +SOURCE_DIR=~/pdfs make seed +make invoke +``` + +This is the lightest possible local setup. No Docker-in-Docker. No SAM overhead. Minimal latency. The function handler runs in your local Python process against a real S3-compatible store. + +Differences from real Lambda — no execution environment lifecycle, no /tmp isolation between runs — are acceptable for the development loop, but not for environment-fidelity tests. For those you'd reach for SAM. + +### The decision matrix + +Fast iteration on handler logic: MinIO plus invoke.py. Our setup. + +Emulate Lambda runtime plus API Gateway locally: SAM CLI. + +Test a container-image Lambda: Lambda RIE via Docker. + +Integration test across multiple AWS services: LocalStack. + +Full-fidelity staging before prod: real AWS account, separate environment. + +--- + +## 15. CI/CD + +### Versions and aliases + +Versions are immutable snapshots of a function's code and configuration. When you publish a version with `aws lambda publish-version`, AWS creates an immutable ARN like `arn:...:function:my-fn:7`. `$LATEST` is the only mutable version — it always reflects the most recent code upload. + +Aliases are named pointers to a version. `prod` might point to version 7. `staging` might point to version 8. + +Event source mappings, API Gateway integrations, Step Functions tasks — they should target aliases, not version ARNs. This decouples deployment (publishing a new version) from promotion (updating the alias). + +### Traffic shifting — blue/green + +An alias can split traffic across two versions with weighted routing. + +```bash +aws lambda update-alias \ + --function-name my-fn \ + --name prod \ + --function-version 8 \ + --routing-config AdditionalVersionWeights={"7"=0.9} +# 10% of prod traffic to v8, 90% still to v7 +``` + +Start at 10% canary. Watch error rates in CloudWatch. Shift to 50%. Then 100%. + +Rollback is instant — point the alias back to the stable version. No instance drain, no connection draining. Lambda is stateless. Cutover is atomic. + +### CodeDeploy integration + +SAM and CDK can wire up CodeDeploy for automatic traffic shifting with automatic rollback on CloudWatch alarms. You declare the deployment preference in the template: + +```yaml +DeploymentPreference: + Type: Canary10Percent5Minutes + Alarms: + - !Ref ErrorRateAlarm +``` + +CodeDeploy manages the alias weight changes and triggers the rollback if the alarm fires. Fully automated blue/green without manual traffic management. + +### Deployment tooling — the progression + +AWS CLI or SDK. Good for one-off deployments, scripting, deep control. Verbose. No state management. Drift-prone at scale. + +SAM, the CloudFormation extension. Good for Lambda-first projects. Built-in local testing. CodeDeploy integration. CloudFormation speed and YAML verbosity. AWS-only. + +CDK. Good for complex infra in TypeScript or Python. Reusable constructs. Type safety. Still compiles to CloudFormation. Learning curve. Bootstrapping required. + +Terraform with the AWS provider. Good for multi-cloud orgs, large existing Terraform estate, strong community modules. No built-in Lambda local testing. Plan-and-apply cycle slower than SAM deploy. + +Serverless Framework. Multi-cloud serverless, plugin ecosystem. V3 to V4 became paid for teams. Community plugin quality varies. + +### A CI pipeline skeleton + +```yaml +jobs: + deploy: + steps: + - uses: actions/checkout@v4 + - name: Build zip + run: | + docker run --rm -v $PWD:/var/task \ + public.ecr.aws/lambda/python:3.13 \ + pip install -r requirements.txt -t package/ + cd package && zip -r ../function.zip . && cd .. + zip function.zip lambda_function.py + - name: Deploy + run: | + aws lambda update-function-code \ + --function-name my-fn --zip-file fileb://function.zip + aws lambda wait function-updated --function-name my-fn + aws lambda publish-version --function-name my-fn + aws lambda update-alias --function-name my-fn \ + --name prod --function-version $VERSION +``` + +The `wait function-updated` call is important. `update-function-code` is asynchronous. `publish-version` has to wait for it to complete. + +--- + +## 16. Pitfalls — the must-knows + +### Execution model + +1. Module-level state leaks across invocations. A list you append to in the handler grows forever on warm calls. A counter you increment is wrong by the second request. If it's mutable and lives at module scope, treat it as either a deliberate cache or a bug. + +2. Handler globals are shared by every invocation on that env, but not across envs. "I cached the result" works locally. In production, half your traffic gets the cached value, the other half doesn't, depending on which warm container they hit. Externalize, or accept the variance. + +3. /tmp is per-environment, not per-invocation. If you write `/tmp/output.json` with a fixed name, the next warm invocation finds yesterday's file. Always use a per-invocation suffix — UUID, request ID. Like our function does. + +4. Init phase has a hard 10-second cap. Importing TensorFlow, hydrating a 500 MB model, doing a network call at module scope — you can blow this budget on cold start. Defer expensive work until first handler call (lazy init), or move it to a layer that ships pre-warmed. + +5. `asyncio.run` in a sync handler creates a fresh event loop per invocation. Acceptable, but it means async clients can't be shared across invocations the way sync boto3 clients can. Profile before assuming async is faster. (More on this in the project walkthrough.) + +### Payload and size limits + +6. The 6 MB sync response cap is silent. Returning a JSON list of 50 000 items "works" in the function but the API Gateway caller gets a 413. The fix in our function — returning a presigned URL to a manifest file rather than the full list — is the standard pattern. + +7. API Gateway caps integration time at 29 seconds. Doesn't matter if your Lambda timeout is 15 minutes. For longer work, return a job ID and poll, or use Function URLs (15 min) with response streaming. + +8. Environment variables max 4 KB total. Big secrets — RSA keys, JSON config blobs — blow this. Parameter Store or Secrets Manager and read on init. + +### Concurrency and throttling + +9. Default account concurrency is 1000 per region. Most teams hit this before they realize. It sets a hard ceiling on RPS — at 100 ms latency, that's 10 000 RPS account-wide. At 1 second, 1000 RPS. + +10. Reserved concurrency at zero disables the function. Looks weird. Used as a circuit breaker. + +11. Provisioned concurrency double-bills. You pay for the warm slots *and* for invocations against them. Worth it for latency-sensitive paths. Wasteful for batch. + +12. Burst limit is regional and finite. A traffic spike from 0 to 5000 RPS will throttle until AWS scales up at +500 envs per minute. Provisioned concurrency or pre-warming is the fix. + +### Triggers, retries, idempotency + +13. Async invocation retries 2 times by default. Total 3 attempts. If your handler isn't idempotent, you can charge a card three times. + +14. S3, SNS, EventBridge invoke async — at-least-once. Plan for duplicates. SQS standard is also at-least-once. SQS FIFO and Kinesis are exactly-once-ish per shard but with their own quirks. + +15. SQS visibility timeout must be at least 6 times the function timeout. Otherwise the message comes back while you're still processing it, and you do the work twice or more. + +16. Partial batch failures need explicit signaling. Returning `batchItemFailures` for SQS or Kinesis tells AWS which records to retry. Otherwise the entire batch retries or none does. + +17. API Gateway error responses are JSON-shaped if you don't say otherwise. Throw an unhandled exception, the client sees a JSON body with `errorMessage` and `errorType`, status 502. Map errors yourself. + +### Networking, IAM, observability + +18. Putting Lambda in a VPC adds an ENI cold-start penalty — improved a lot in 2019, but still real for the first invocation. Only do it if you genuinely need private-subnet resources. Outbound internet from VPC Lambda needs NAT, which costs money 24/7. + +19. S3 access from a VPC Lambda needs a VPC gateway endpoint or NAT. Without one, your S3 calls hang and time out. Looks like a code bug, isn't. + +20. CloudWatch log groups default to "Never expire" retention. Verbose Lambdas can rack up real cost in CloudWatch Logs alone. Set retention — 7, 14, or 30 days — on every log group you create. + +21. Lambda execution role is implicit on every action. Forgetting `s3:GetObject` or `kms:Decrypt` on the bucket's CMK is the most common "but it works locally" failure. CloudTrail tells you what was denied. + +22. Resource policy versus execution role are different layers. Resource policy says "who can *invoke* this Lambda." Execution role says "what this Lambda can *do*." Both must allow. + +23. X-Ray needs an SDK call *and* tracing enabled on the function *and* IAM permission. Three switches. People flip one and conclude X-Ray is broken. + +### Deployment, dependencies, runtimes + +24. The boto3 in the Python runtime lags pip. If you need a recent API, bundle current boto3 in your zip. + +25. Native wheels must match Lambda's runtime architecture. `pip install` on a Mac and zip-uploading `cryptography` is a classic foot-gun. Build in a Docker image matching the Lambda runtime. + +26. ARM64 saves about 20% at the same memory, but some wheels are still x86-only. Audit your deps before flipping. + +27. Layers are merge-ordered. Later layers overwrite earlier. A "base" layer for shared dependencies works. Conflicting layers silently shadow each other. + +28. Container-image deploys are cached on the Lambda host. First cold start can be slow because of the image pull. Subsequent are normal. Keep images small even though the limit is 10 GB. + +### Time, scheduling, secrets + +29. EventBridge schedule (cron or rate) is always UTC. "9 AM" in your local time means something different in production. Use the new EventBridge Scheduler from 2022 for time-zone-aware schedules. + +30. Async invocations have a 6-hour event age. If retries fail past that, the event is silently dropped unless you've set a DLQ or on-failure destination. + +31. Secrets in env vars are visible to anyone with `lambda:GetFunctionConfiguration`. Encrypted at rest. Plaintext in the console. Use Secrets Manager or Parameter Store for actual secrets. + +The skim test: if you can re-state the cold-start split (Init / Handler), the 6 MB / 256 KB / 4 KB / 250 MB / 10 GB constants, and the difference between resource policy and execution role from memory — you'll handle most "tell me about Lambda" interview questions. + +--- + +## 17. Adjacent — Glue, Prometheus, Grafana + +### AWS Glue + +Glue is a managed Spark-based ETL service. Lambda and Glue solve different problems. + +Runtime model. Lambda: serverless, up to 15 minutes, one handler at a time per env. Glue: managed Spark cluster, hours-long jobs, distributed compute. + +Data scale. Lambda: up to a few gigabytes comfortably. Glue: terabytes to petabytes natively. + +Language. Lambda: Python, Node, Java, Go, custom runtime. Glue: PySpark, Scala, plus Glue Studio for no-code. + +Startup time. Lambda: milliseconds when warm. Glue: 1 to 2 minutes to provision the Spark cluster. + +Cost model. Lambda: per request plus per millisecond. Glue: per DPU-hour (1 DPU is $0.44 an hour), 10-minute minimum billing. + +Use Lambda for light transforms, event reactions, API backends. Use Glue for large-scale joins, aggregations, schema inference on a data lake. + +Key Glue concepts to know. DynamicFrame — Glue's DataFrame variant with schema flexibility. Glue Catalog — centralized metadata store for table schemas, also used by Athena. Job Bookmarks — Glue tracks processed S3 partitions to avoid reprocessing on incremental runs. + +The decision is usually straightforward. If the data fits in Lambda's memory and the job finishes in under 15 minutes, use Lambda. If you're joining multiple large S3 datasets or transforming daily partition files, use Glue. + +### Prometheus and Grafana + +Prometheus is a pull-based time-series metrics system. It scrapes HTTP `/metrics` endpoints on a schedule. + +The fundamental tension with Lambda: Lambda functions are ephemeral. There's no persistent HTTP endpoint to scrape. The function may be at zero concurrency between invocations. + +Three options for Lambda-to-Prometheus. + +EMF to CloudWatch to Grafana with the CloudWatch plugin. No Prometheus involved. Grafana reads directly from CloudWatch. Easiest for AWS-native stacks. + +Remote write to Amazon Managed Prometheus (AMP). The function pushes metrics to AMP via the Prometheus `remote_write` API at the end of each invocation. Grafana — or Amazon Managed Grafana — reads from AMP. Requires the `prometheus_client` library and SIGV4 signing on the request. + +Push gateway. A persistent intermediate that Lambda pushes to. Prometheus scrapes the gateway. More infrastructure to manage, plus stale metric risk if the push gateway isn't flushed between invocations. + +Grafana itself is a dashboarding layer. It doesn't store data — it queries data sources. CloudWatch is the data source most useful for Lambda observability. Built-in Grafana plugin. Queries CloudWatch Metrics and Logs Insights. Zero extra infrastructure. The standard choice for Lambda metrics: invocations, errors, duration, throttles, concurrent executions. + +For a Lambda-only stack with no existing Prometheus investment, the practical answer is: EMF for custom metrics, CloudWatch for the built-in Lambda metrics, Grafana connected to CloudWatch. No extra infrastructure. Dashboards in an hour. + +--- + +## 18. The project — walking through `lambda_function.py` + +### What the function does, end to end + +In one paragraph: the function lists every PDF inside an S3 prefix. For each one, it generates a presigned download URL that expires in 15 minutes. It writes those (key, URL) pairs into a JSONL file in `/tmp` as it goes. When the listing is done, it uploads the JSONL to S3 as a manifest, generates one more presigned URL pointing to the manifest itself, deletes the local file, and returns the manifest URL plus the count. + +The use case: you want to ship a batch of files to someone who isn't on your AWS account. Send them one URL. They open it, get back a list of links, every link works for 15 minutes, then everything dies. + +Now let's walk through it. Top to bottom. + +### Imports and module-scope config + +```python +import asyncio +import json +import os +import uuid + +import aioboto3 +import aiofiles +``` + +Standard library first, third-party after. `aioboto3` is the async version of boto3 — async S3 calls, so we can overlap I/O. `aiofiles` is async filesystem access — same reason. + +```python +BUCKET = os.environ.get("BUCKET_NAME", "my-company-reports-bucket") +PREFIX = os.environ.get("PREFIX", "2026/04/") +EXPIRY = int(os.environ.get("URL_EXPIRY_SECONDS", "900")) +ENDPOINT = os.environ.get("S3_ENDPOINT_URL") or None +QUEUE_MAX = int(os.environ.get("QUEUE_MAX", "2000")) + +_DONE = object() +``` + +Five environment reads at module scope. Init phase. They run once per cold start, get cached as Python module attributes, and every warm invocation reuses them for free. + +`ENDPOINT` is the trick that lets this run against MinIO locally. When you run on real Lambda, you don't set the env var, the value is `None`, and aioboto3 talks to real S3. When you run locally with MinIO, you set it to `http://localhost:9000` and the same code talks to MinIO. The function doesn't know the difference. + +`_DONE` is a sentinel. A unique singleton that we'll put on the queue to signal "no more items coming." We'll get to why in a moment. The reason it's an `object()` and not a string — a string could theoretically collide with a real S3 key. An `object()` instance has a unique identity. Comparing with `is` — not `==` — is unambiguous. + +### The handler — minimal on purpose + +```python +def handler(event, context): + result = asyncio.run(_run()) + return {"statusCode": 200, "body": json.dumps(result)} +``` + +The handler is sync because Lambda's contract is sync. AWS calls `handler(event, context)` and waits for it to return. + +Inside, we open an asyncio event loop with `asyncio.run`, run our async coroutine, get back a result, wrap it in an API-Gateway-style response shape with `statusCode` and `body`. The response shape is a habit — useful when the function gets fronted by API Gateway later. A pure Lambda invoke doesn't need it, but it doesn't hurt. + +`asyncio.run` creates a fresh event loop per invocation. This is one of the small inefficiencies of doing async inside a sync Lambda handler. The cost is small — tens of microseconds — but it means async clients can't be shared across invocations the way sync boto3 clients could. + +Why async at all in Lambda? Because Lambda's billing model is per-millisecond of wall-clock time. Anything you can overlap, you save money on. Our function does a lot of S3 calls — listing pages, generating presigned URLs, writing files. While S3 is preparing the next page of results, we can already be presigning and writing the previous page. That overlap directly reduces duration, which directly reduces cost and latency. That's why async. + +### `_run()` — the actual work + +```python +async def _run(): + session = aioboto3.Session() + async with session.client("s3", endpoint_url=ENDPOINT) as s3: + queue: asyncio.Queue = asyncio.Queue(maxsize=QUEUE_MAX) + manifest_path = f"/tmp/{uuid.uuid4()}.jsonl" +``` + +Open an aioboto3 session. Create an S3 client, with the optional endpoint override for MinIO. The `async with` block makes sure the client is properly closed when we're done — connections cleaned up, session closed. + +Why is the session created inside `_run` instead of at module scope? Because aioboto3 async clients don't cleanly support cross-invocation reuse. The async context manager is tied to the event loop, and each invocation gets a fresh event loop via `asyncio.run`. Sync boto3 clients you'd put at module scope. Async ones, you create per invocation. + +Inside the `async with`, two things. An `asyncio.Queue` with a maximum size of `QUEUE_MAX` — that's 2000 by default. And a path in `/tmp` with a UUID in the filename. + +Why a queue at all? Because we want producer and consumer running concurrently. A queue is the standard channel for that. + +Why bounded? Because if the producer is faster than the consumer, an unbounded queue grows in memory. Lambda has at most 10 GB of memory, and probably has only 256 or 512 MB by default. If we're scanning a bucket with a million PDFs and the producer loads them all into the queue before we presign even one, we OOM. The bounded queue gives us backpressure: when it's full, `await queue.put(...)` blocks until the consumer takes something off. Producer waits for consumer. Memory stays flat. + +Why 2000? Big enough that the producer doesn't block on a normal-sized run. Small enough that even at 100 bytes per key, the queue is at most 200 KB of memory. Comfortable margin. + +Why the UUID in the manifest path? Because `/tmp` persists across warm invocations on the same environment. Two invocations back to back, both writing to a fixed path like `/tmp/manifest.jsonl`, would collide. With `uuid4`, no collision possible. + +### The producer + +```python + async def producer(): + paginator = s3.get_paginator("list_objects_v2") + async for page in paginator.paginate(Bucket=BUCKET, Prefix=PREFIX): + for obj in page.get("Contents", []) or []: + key = obj["Key"] + if key.lower().endswith(".pdf"): + await queue.put(key) + await queue.put(_DONE) +``` + +Why is `producer` defined inside `_run`? Two reasons. 1: it's a closure. It captures `s3`, `queue` from the enclosing scope without us having to pass them as arguments. Cleaner. 2: it's a private implementation detail of `_run` — nobody else needs to call it. Defining it inside makes that scope explicit. + +What does it do? It uses S3's `list_objects_v2` operation through a paginator. S3 returns at most 1000 objects per page. The paginator hides that — you `async for page in paginator.paginate(...)` and it transparently calls the next page when needed. + +For each object on each page, check if it ends in `.pdf` (case-insensitive). If yes, put it on the queue. + +When the paginator is exhausted — no more pages — put the `_DONE` sentinel on the queue. That tells the consumer "I'm done, you can stop reading." + +Why a sentinel and not, say, closing the queue? Because `asyncio.Queue` doesn't have a "close" method. The standard pattern for "no more items" is the sentinel. The consumer checks `if item is _DONE` and breaks. + +Note that `await queue.put(key)` will *block* if the queue is full. The producer pauses there until the consumer takes something off. That's the backpressure I mentioned. Memory bounded. + +### The consumer + +```python + async def consumer(): + count = 0 + async with aiofiles.open(manifest_path, "w") as f: + while True: + item = await queue.get() + if item is _DONE: + break + url = await s3.generate_presigned_url( + "get_object", + Params={"Bucket": BUCKET, "Key": item}, + ExpiresIn=EXPIRY, + ) + await f.write(json.dumps({"key": item, "url": url}) + "\n") + count += 1 + return count +``` + +Same closure pattern. Captures `queue`, `manifest_path`, `s3` from the enclosing scope. + +Open the manifest file for writing, async, in `/tmp`. The `async with` makes sure it's flushed and closed when we exit. + +Loop forever. Take items off the queue. If the item is the sentinel, break. Otherwise it's a PDF key. Generate a presigned URL for it — 15 minutes by default. Write a JSONL line: a JSON object with `key` and `url`, plus a newline. Increment the count. + +When the loop breaks, close the file (via the `async with`), return the count. + +`generate_presigned_url` is a local computation, not a network call. It takes your AWS credentials, your bucket name, your key, the expiry, and your region, and produces a signed URL deterministically. No HTTP request. Fast. + +Why JSONL — JSON Lines — and not a JSON array? Because JSONL streams. You can write one line at a time without buffering the whole array in memory. The reader can process one line at a time. If the manifest grows to gigabytes, JSONL stays usable. + +### Running them together + +```python + prod_task = asyncio.create_task(producer()) + count = await consumer() + await prod_task +``` + +This is the concurrency. `asyncio.create_task(producer())` schedules the producer coroutine to run on the event loop, returns immediately with a task handle. The producer is now running in the background. + +`count = await consumer()` runs the consumer in the foreground. It blocks until the consumer returns, which happens when the consumer sees the sentinel. + +`await prod_task` makes sure the producer task has fully completed and any exceptions get raised. By the time the consumer sees the sentinel, the producer has put it on the queue, so `prod_task` should be done — but awaiting it makes that guarantee explicit and propagates errors. + +Why this two-task structure? Because we want overlap. While S3 is preparing the next page of LIST results — network round trip — the consumer is presigning and writing the previous page. If we did it sequentially — list everything, then presign everything — we'd add the listing latency and the presigning latency. With overlap, we add the larger of the two. + +For a small number of files, the difference is negligible. For a thousand or ten thousand files, async + queue cuts wall-clock time noticeably. Less duration, less cost. + +### Uploading the manifest + +```python + manifest_key = f"manifests/{uuid.uuid4()}.jsonl" + async with aiofiles.open(manifest_path, "rb") as f: + body = await f.read() + await s3.put_object( + Bucket=BUCKET, + Key=manifest_key, + Body=body, + ContentType="application/x-ndjson", + ) +``` + +Generate an S3 key for the manifest under `manifests/`, with another UUID. Read the local /tmp file as bytes. Upload it with `put_object`, setting the content type to `application/x-ndjson` — that's the registered MIME type for newline-delimited JSON. + +Why not use `s3.upload_file` instead of read + put_object? Because `upload_file` doesn't have a clean async equivalent in aioboto3 that handles the multipart logic the same way. For files this size — hundreds of kilobytes to a few megabytes — read-then-put is fine. For very large files we'd want multipart upload. + +### Generating the manifest URL and cleaning up + +```python + manifest_url = await s3.generate_presigned_url( + "get_object", + Params={"Bucket": BUCKET, "Key": manifest_key}, + ExpiresIn=EXPIRY, + ) + + os.unlink(manifest_path) + + return { + "count": count, + "manifest_key": manifest_key, + "manifest_url": manifest_url, + } +``` + +Generate a presigned URL for the manifest itself — 15 minutes again. Delete the local file from /tmp so it doesn't accumulate across warm invocations on the same environment. Return the count, the S3 key, and the URL. + +The handler wraps that in `{"statusCode": 200, "body": json.dumps(result)}` and returns. Done. + +### Why this design? — answers to the interview questions you missed + +Why presigned URLs, not return the data directly? Because the response is small — just a few hundred bytes — and the recipient doesn't need an AWS account to use the URL. The URL is signed by your credentials, expires in 15 minutes, and works for anyone who has it. + +Why upload the manifest to S3 and return a URL to *it*, instead of returning the manifest contents in the response body? Because of the 6 MB sync response cap. Ten thousand presigned URLs in JSONL is around 3 to 5 MB. Twenty thousand blows the cap, and the cap is silent — the function succeeds, the caller gets a 413 with no warning. The manifest-in-S3 pattern has no upper bound. + +Why async, not sync? Two reasons. 1: we want to overlap S3 LIST calls with presigning and file writes. Async + queue is the standard pattern for that. 2: even though presigning is local, we still have the LIST round trips and the final upload, both of which benefit from being non-blocking. + +Why a producer and consumer instead of one loop that does both? Because the producer is bursty — when a page comes back, it has up to 1000 keys to dump on the queue. The consumer is steady. Decoupling them with a queue means the producer can race ahead while the consumer steadily drains, instead of LIST-then-presign-then-LIST-then-presign serially. + +Why a bounded queue? For backpressure. Without the bound, the producer can outrun the consumer and exhaust memory. With the bound, when the queue fills, the producer's `await queue.put(...)` blocks until the consumer takes something off. Memory stays flat regardless of how many files we're scanning. + +Why a sentinel and not closing the queue? Because asyncio.Queue doesn't have a close method. The sentinel is the standard "I'm done" signal. The consumer checks `if item is _DONE` and breaks the loop. + +Why nested functions? Because they're closures over `s3`, `queue`, `manifest_path` from the enclosing scope. We don't have to pass those as arguments. They're also private implementation details of `_run` — defining them inside makes that scope explicit. + +Why UUID in the /tmp filename? Because /tmp persists across warm invocations on the same environment. A fixed filename collides between back-to-back warm runs. UUID guarantees uniqueness. + +Why `_DONE = object()` instead of a string sentinel? Because an `object()` instance has a unique identity that can't possibly collide with any real S3 key. Comparing with `is` (identity, not equality) is unambiguous. + +Why `os.unlink(manifest_path)` at the end? Because /tmp persists across warm invocations and is at most 10 GB. If the function ran a thousand times on the same warm env without cleanup, /tmp would fill and subsequent invocations would fail. + +### Cold start vs warm — what you'd see in CloudWatch + +First invocation, cold start. The REPORT line shows something like: + +``` +REPORT RequestId: ... Duration: 312.45 ms Init Duration: 423.12 ms +``` + +Init Duration: about 400 ms. That covers importing aioboto3 and aiofiles, reading the five env vars. Heavy because aioboto3 pulls in aiobotocore, which pulls in botocore. + +Duration: about 300 ms. That's the actual scan: listing the bucket, presigning 50 PDFs, writing the manifest, uploading it. + +Second invocation within 30 seconds — warm: + +``` +REPORT RequestId: ... Duration: 287.91 ms +``` + +No Init Duration line. We jumped straight to the handler. About 30 ms saved. + +For a function that runs once a day, every invocation is cold. Init Duration matters. For a function that runs every few seconds, almost everything is warm. Init Duration is irrelevant. + +### What happens if it times out + +The default function timeout is 3 seconds. Almost certainly not enough — set it explicitly to something like 30 or 60 seconds for this function. Maximum is 15 minutes. + +If the function does time out, Lambda kills the process. The execution environment is still alive but the invocation is over. The local /tmp file may or may not have been deleted, depending on how far we got. If we wrote the manifest to S3 before the timeout, it's there. If not, the partial work is lost. + +The function isn't quite idempotent in the strict sense — re-running it produces a fresh manifest with new UUIDs and new presigned URLs. The previous manifest stays in S3. If "exactly one manifest per logical job" was a requirement, we'd add a dedup table — DynamoDB with the request ID as the key — to skip re-runs. + +### How would you scale this + +Two natural scaling moves. + +1 — fan out by prefix. Wrap this function in a Step Functions Map state. The orchestrator passes a list of prefixes. Each map iteration runs one Lambda for one prefix. Concurrency cap controlled by the Map state's `MaxConcurrency`, not by Lambda's account quota. + +2 — go async with S3 events. Skip the LIST entirely. Subscribe the function to S3 ObjectCreated events filtered to `*.pdf`. The function fires once per upload, handles one file at a time, no producer/consumer needed because there's nothing to enumerate. Way simpler. Different use case — that's "process new files as they arrive," not "scan the existing bucket." + +For the existing-bucket scan, the current design is right. + +### What I'd change before production + +A few things. + +1: make `BUCKET` and `PREFIX` come from the event payload, not from environment variables. Currently they're set at deploy time. If you want the same function to scan different prefixes on different invocations, the event-driven version is more flexible. + +2: enable `ReportBatchItemFailures` if this becomes part of an SQS-fed pipeline later. Currently it's not, but it's good defensive design. + +3: add structured logging. JSON to stdout, with `request_id`, `bucket`, `prefix`, `count`. Logs Insights can then aggregate. + +4: emit an EMF metric for `count`. Free CloudWatch metric, no additional API calls. Lets you dashboard "PDFs processed per invocation" over time. + +5: explicit error handling on the producer. Currently if `paginator.paginate` raises, the producer task fails, the consumer keeps waiting on `queue.get` forever, and the function times out. Better: wrap the producer in a try/except that puts `_DONE` on the queue in a `finally` block, so the consumer always exits. + +6: shorten the imports. `aioboto3` adds 200+ ms to the cold start. If cold start matters, consider sync boto3 — the function isn't actually doing enough I/O concurrency to make async pay off until file counts get large. + +Those are the six things. None of them are wrong about the current design — they're refinements for moving from "weekend project" to "production service." + +--- + +That's everything in the notes. Thirty-one pitfalls, eighteen sections, one project, one set of answers to the questions that tripped you up last time. + +Tuesday, you'll know it. diff --git a/docs/viewer.html b/docs/viewer.html new file mode 100644 index 0000000..214c382 --- /dev/null +++ b/docs/viewer.html @@ -0,0 +1,101 @@ + + + + +Graph Viewer + + + +
+ +
+ + + diff --git a/invoke.py b/invoke.py new file mode 100644 index 0000000..50fb2dc --- /dev/null +++ b/invoke.py @@ -0,0 +1,15 @@ +import json +import os + +os.environ.setdefault("BUCKET_NAME", "my-company-reports-bucket") +os.environ.setdefault("PREFIX", "2026/04/") +os.environ.setdefault("S3_ENDPOINT_URL", "http://localhost:9000") +os.environ.setdefault("AWS_ACCESS_KEY_ID", "minioadmin") +os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "minioadmin") +os.environ.setdefault("AWS_REGION", "us-east-1") + +from lambda_function import handler # noqa: E402 + +if __name__ == "__main__": + response = handler({}, None) + print(json.dumps(response, indent=2)) diff --git a/lambda_function.py b/lambda_function.py new file mode 100644 index 0000000..3d56525 --- /dev/null +++ b/lambda_function.py @@ -0,0 +1,79 @@ +import asyncio +import json +import os +import uuid + +import aioboto3 +import aiofiles + +BUCKET = os.environ.get("BUCKET_NAME", "my-company-reports-bucket") +PREFIX = os.environ.get("PREFIX", "2026/04/") +EXPIRY = int(os.environ.get("URL_EXPIRY_SECONDS", "900")) +ENDPOINT = os.environ.get("S3_ENDPOINT_URL") or None +QUEUE_MAX = int(os.environ.get("QUEUE_MAX", "2000")) + +_DONE = object() + + +async def _run(): + session = aioboto3.Session() + async with session.client("s3", endpoint_url=ENDPOINT) as s3: + queue: asyncio.Queue = asyncio.Queue(maxsize=QUEUE_MAX) + manifest_path = f"/tmp/{uuid.uuid4()}.jsonl" + + async def producer(): + paginator = s3.get_paginator("list_objects_v2") + async for page in paginator.paginate(Bucket=BUCKET, Prefix=PREFIX): + for obj in page.get("Contents", []) or []: + key = obj["Key"] + if key.lower().endswith(".pdf"): + await queue.put(key) + await queue.put(_DONE) + + async def consumer(): + count = 0 + async with aiofiles.open(manifest_path, "w") as f: + while True: + item = await queue.get() + if item is _DONE: + break + url = await s3.generate_presigned_url( + "get_object", + Params={"Bucket": BUCKET, "Key": item}, + ExpiresIn=EXPIRY, + ) + await f.write(json.dumps({"key": item, "url": url}) + "\n") + count += 1 + return count + + prod_task = asyncio.create_task(producer()) + count = await consumer() + await prod_task + + manifest_key = f"manifests/{uuid.uuid4()}.jsonl" + async with aiofiles.open(manifest_path, "rb") as f: + body = await f.read() + await s3.put_object( + Bucket=BUCKET, + Key=manifest_key, + Body=body, + ContentType="application/x-ndjson", + ) + manifest_url = await s3.generate_presigned_url( + "get_object", + Params={"Bucket": BUCKET, "Key": manifest_key}, + ExpiresIn=EXPIRY, + ) + + os.unlink(manifest_path) + + return { + "count": count, + "manifest_key": manifest_key, + "manifest_url": manifest_url, + } + + +def handler(event, context): + result = asyncio.run(_run()) + return {"statusCode": 200, "body": json.dumps(result)} diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..84c7310 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,3 @@ +aioboto3>=15.0 +aiofiles>=23.2 +boto3>=1.40 diff --git a/seed.py b/seed.py new file mode 100644 index 0000000..4a01085 --- /dev/null +++ b/seed.py @@ -0,0 +1,76 @@ +import os +import sys + +import boto3 +from botocore.client import Config +from botocore.exceptions import ClientError + +BUCKET = os.environ.get("BUCKET_NAME", "my-company-reports-bucket") +PREFIX = os.environ.get("PREFIX", "2026/04/") +ENDPOINT = os.environ.get("S3_ENDPOINT_URL", "http://localhost:9000") +DECOY_EXTS = (".txt", ".csv", ".json") + + +def _client(): + return boto3.client( + "s3", + endpoint_url=ENDPOINT, + aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID", "minioadmin"), + aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY", "minioadmin"), + region_name=os.environ.get("AWS_REGION", "us-east-1"), + config=Config(signature_version="s3v4"), + ) + + +def _ensure_bucket(s3, name): + try: + s3.head_bucket(Bucket=name) + except ClientError: + s3.create_bucket(Bucket=name) + + +def _walk(source_dir): + for root, _, files in os.walk(source_dir): + for name in files: + yield os.path.join(root, name) + + +def main(): + source_dir = sys.argv[1] if len(sys.argv) > 1 else os.environ.get("SOURCE_DIR") + if not source_dir: + print("usage: SOURCE_DIR= python seed.py (or pass as argv[1])", file=sys.stderr) + sys.exit(2) + if not os.path.isdir(source_dir): + print(f"not a directory: {source_dir}", file=sys.stderr) + sys.exit(2) + + s3 = _client() + _ensure_bucket(s3, BUCKET) + + pdf_n = decoy_n = 0 + for path in _walk(source_dir): + lower = path.lower() + is_pdf = lower.endswith(".pdf") + is_decoy = lower.endswith(DECOY_EXTS) + if not (is_pdf or is_decoy): + continue + + rel = os.path.relpath(path, source_dir).replace(os.sep, "/") + key = f"{PREFIX}{rel}" + try: + s3.upload_file(path, BUCKET, key) + except (ClientError, OSError) as exc: + print(f" skip {path}: {exc}", file=sys.stderr) + continue + if is_pdf: + pdf_n += 1 + else: + decoy_n += 1 + if (pdf_n + decoy_n) % 100 == 0: + print(f" uploaded {pdf_n} pdfs / {decoy_n} decoys ...") + + print(f"done: {pdf_n} pdfs and {decoy_n} decoys uploaded to s3://{BUCKET}/{PREFIX}") + + +if __name__ == "__main__": + main()