Files
sysmonstm/infra/aws/terraform/lambda.tf
2025-12-29 14:40:06 -03:00

204 lines
5.2 KiB
HCL

# Lambda Functions for Data Processing Pipeline
# These are optional and enabled via enable_lambda_pipeline variable
# SQS Queue for buffering metrics
resource "aws_sqs_queue" "metrics" {
count = var.enable_lambda_pipeline ? 1 : 0
name = "${var.project_name}-metrics"
visibility_timeout_seconds = var.lambda_timeout * 2
message_retention_seconds = 86400 # 24 hours
redrive_policy = jsonencode({
deadLetterTargetArn = aws_sqs_queue.metrics_dlq[0].arn
maxReceiveCount = 3
})
}
resource "aws_sqs_queue" "metrics_dlq" {
count = var.enable_lambda_pipeline ? 1 : 0
name = "${var.project_name}-metrics-dlq"
message_retention_seconds = 1209600 # 14 days
}
# S3 Bucket for metric backups
resource "aws_s3_bucket" "metrics" {
count = var.enable_s3_backup ? 1 : 0
bucket_prefix = "${var.project_name}-metrics-"
}
resource "aws_s3_bucket_lifecycle_configuration" "metrics" {
count = var.enable_s3_backup ? 1 : 0
bucket = aws_s3_bucket.metrics[0].id
rule {
id = "archive-old-metrics"
status = "Enabled"
transition {
days = 30
storage_class = "STANDARD_IA"
}
transition {
days = 90
storage_class = "GLACIER"
}
expiration {
days = 365
}
}
}
# IAM Role for Lambda
resource "aws_iam_role" "lambda" {
count = var.enable_lambda_pipeline ? 1 : 0
name_prefix = "${var.project_name}-lambda-"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "lambda.amazonaws.com"
}
}
]
})
}
resource "aws_iam_role_policy" "lambda" {
count = var.enable_lambda_pipeline ? 1 : 0
name = "lambda-policy"
role = aws_iam_role.lambda[0].id
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Effect = "Allow"
Action = [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
]
Resource = "arn:aws:logs:*:*:*"
},
{
Effect = "Allow"
Action = [
"sqs:ReceiveMessage",
"sqs:DeleteMessage",
"sqs:GetQueueAttributes"
]
Resource = aws_sqs_queue.metrics[0].arn
},
{
Effect = "Allow"
Action = [
"s3:PutObject",
"s3:GetObject"
]
Resource = var.enable_s3_backup ? "${aws_s3_bucket.metrics[0].arn}/*" : "*"
}
]
})
}
# Lambda function for metric aggregation
resource "aws_lambda_function" "aggregator" {
count = var.enable_lambda_pipeline ? 1 : 0
function_name = "${var.project_name}-aggregator"
role = aws_iam_role.lambda[0].arn
handler = "main.handler"
runtime = "python3.11"
timeout = var.lambda_timeout
memory_size = var.lambda_memory_size
# Placeholder - will be deployed via CI/CD
filename = "${path.module}/../lambdas/aggregator/placeholder.zip"
source_code_hash = filebase64sha256("${path.module}/../lambdas/aggregator/placeholder.zip")
environment {
variables = {
TIMESCALE_HOST = aws_instance.sysmonstm.private_ip
LOG_LEVEL = "INFO"
}
}
lifecycle {
ignore_changes = [filename, source_code_hash]
}
}
resource "aws_lambda_event_source_mapping" "sqs_trigger" {
count = var.enable_lambda_pipeline ? 1 : 0
event_source_arn = aws_sqs_queue.metrics[0].arn
function_name = aws_lambda_function.aggregator[0].arn
batch_size = 100
scaling_config {
maximum_concurrency = 5
}
}
# CloudWatch Event for scheduled compaction
resource "aws_cloudwatch_event_rule" "compactor" {
count = var.enable_lambda_pipeline ? 1 : 0
name = "${var.project_name}-compactor-schedule"
description = "Trigger metric compaction every hour"
schedule_expression = "rate(1 hour)"
}
resource "aws_lambda_function" "compactor" {
count = var.enable_lambda_pipeline ? 1 : 0
function_name = "${var.project_name}-compactor"
role = aws_iam_role.lambda[0].arn
handler = "main.handler"
runtime = "python3.11"
timeout = 300
memory_size = 512
filename = "${path.module}/../lambdas/compactor/placeholder.zip"
source_code_hash = filebase64sha256("${path.module}/../lambdas/compactor/placeholder.zip")
environment {
variables = {
TIMESCALE_HOST = aws_instance.sysmonstm.private_ip
S3_BUCKET = var.enable_s3_backup ? aws_s3_bucket.metrics[0].bucket : ""
LOG_LEVEL = "INFO"
}
}
lifecycle {
ignore_changes = [filename, source_code_hash]
}
}
resource "aws_cloudwatch_event_target" "compactor" {
count = var.enable_lambda_pipeline ? 1 : 0
rule = aws_cloudwatch_event_rule.compactor[0].name
target_id = "compactor-lambda"
arn = aws_lambda_function.compactor[0].arn
}
resource "aws_lambda_permission" "compactor_cloudwatch" {
count = var.enable_lambda_pipeline ? 1 : 0
statement_id = "AllowCloudWatchInvoke"
action = "lambda:InvokeFunction"
function_name = aws_lambda_function.compactor[0].function_name
principal = "events.amazonaws.com"
source_arn = aws_cloudwatch_event_rule.compactor[0].arn
}