diff --git a/README.md b/README.md index e264bdc3d5..9e70df6d14 100644 --- a/README.md +++ b/README.md @@ -155,6 +155,8 @@ Join our discord community via [this invite link](https://discord.gg/bxgXW8jJGh) | [key\_name](#input\_key\_name) | Key pair name | `string` | `null` | no | | [kms\_key\_arn](#input\_kms\_key\_arn) | Optional CMK Key ARN to be used for Parameter Store. This key must be in the current account. | `string` | `null` | no | | [lambda\_architecture](#input\_lambda\_architecture) | AWS Lambda architecture. Lambda functions using Graviton processors ('arm64') tend to have better price/performance than 'x86\_64' functions. | `string` | `"arm64"` | no | +| [lambda\_event\_source\_mapping\_batch\_size](#input\_lambda\_event\_source\_mapping\_batch\_size) | Maximum number of records to pass to the lambda function in a single batch for the event source mapping. When not set, the AWS default of 10 events will be used. | `number` | `10` | no | +| [lambda\_event\_source\_mapping\_maximum\_batching\_window\_in\_seconds](#input\_lambda\_event\_source\_mapping\_maximum\_batching\_window\_in\_seconds) | Maximum amount of time to gather records before invoking the lambda function, in seconds. AWS requires this to be greater than 0 if batch\_size is greater than 10. Defaults to 0. | `number` | `0` | no | | [lambda\_principals](#input\_lambda\_principals) | (Optional) add extra principals to the role created for execution of the lambda, e.g. for local testing. |
list(object({| `[]` | no | | [lambda\_runtime](#input\_lambda\_runtime) | AWS Lambda runtime. | `string` | `"nodejs22.x"` | no | | [lambda\_s3\_bucket](#input\_lambda\_s3\_bucket) | S3 bucket from which to specify lambda functions. This is an alternative to providing local files directly. | `string` | `null` | no | diff --git a/lambdas/functions/control-plane/src/lambda.test.ts b/lambdas/functions/control-plane/src/lambda.test.ts index 2c54a4d541..c6f9f24c1d 100644 --- a/lambdas/functions/control-plane/src/lambda.test.ts +++ b/lambdas/functions/control-plane/src/lambda.test.ts @@ -70,19 +70,33 @@ vi.mock('@aws-github-runner/aws-powertools-util'); vi.mock('@aws-github-runner/aws-ssm-util'); describe('Test scale up lambda wrapper.', () => { - it('Do not handle multiple record sets.', async () => { - await testInvalidRecords([sqsRecord, sqsRecord]); + it('Do not handle empty record sets.', async () => { + const sqsEventMultipleRecords: SQSEvent = { + Records: [], + }; + + await expect(scaleUpHandler(sqsEventMultipleRecords, context)).resolves.not.toThrow(); }); - it('Do not handle empty record sets.', async () => { - await testInvalidRecords([]); + it('Ignores non-sqs event sources.', async () => { + const record = { + ...sqsRecord, + eventSource: 'aws:non-sqs', + }; + + const sqsEventMultipleRecordsNonSQS: SQSEvent = { + Records: [record], + }; + + await expect(scaleUpHandler(sqsEventMultipleRecordsNonSQS, context)).resolves.not.toThrow(); + expect(scaleUp).toHaveBeenCalledWith([]); }); it('Scale without error should resolve.', async () => { const mock = vi.fn(scaleUp); mock.mockImplementation(() => { return new Promise((resolve) => { - resolve(); + resolve([]); }); }); await expect(scaleUpHandler(sqsEvent, context)).resolves.not.toThrow(); @@ -104,28 +118,137 @@ describe('Test scale up lambda wrapper.', () => { vi.mocked(scaleUp).mockImplementation(mock); await expect(scaleUpHandler(sqsEvent, context)).rejects.toThrow(error); }); -}); -async function testInvalidRecords(sqsRecords: SQSRecord[]) { - const mock = vi.fn(scaleUp); - const logWarnSpy = vi.spyOn(logger, 'warn'); - mock.mockImplementation(() => { - return new Promise((resolve) => { - resolve(); + describe('Batch processing', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + const createMultipleRecords = (count: number, eventSource = 'aws:sqs'): SQSRecord[] => { + return Array.from({ length: count }, (_, i) => ({ + ...sqsRecord, + eventSource, + messageId: `message-${i}`, + body: JSON.stringify({ + ...body, + id: i + 1, + }), + })); + }; + + it('Should handle multiple SQS records in a single invocation', async () => { + const records = createMultipleRecords(3); + const multiRecordEvent: SQSEvent = { Records: records }; + + const mock = vi.fn(scaleUp); + mock.mockImplementation(() => Promise.resolve([])); + vi.mocked(scaleUp).mockImplementation(mock); + + await expect(scaleUpHandler(multiRecordEvent, context)).resolves.not.toThrow(); + expect(scaleUp).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ messageId: 'message-0' }), + expect.objectContaining({ messageId: 'message-1' }), + expect.objectContaining({ messageId: 'message-2' }), + ]), + ); + }); + + it('Should return batch item failures for rejected messages', async () => { + const records = createMultipleRecords(3); + const multiRecordEvent: SQSEvent = { Records: records }; + + const mock = vi.fn(scaleUp); + mock.mockImplementation(() => Promise.resolve(['message-1', 'message-2'])); + vi.mocked(scaleUp).mockImplementation(mock); + + const result = await scaleUpHandler(multiRecordEvent, context); + expect(result).toEqual({ + batchItemFailures: [{ itemIdentifier: 'message-1' }, { itemIdentifier: 'message-2' }], + }); + }); + + it('Should filter out non-SQS event sources', async () => { + const sqsRecords = createMultipleRecords(2, 'aws:sqs'); + const nonSqsRecords = createMultipleRecords(1, 'aws:sns'); + const mixedEvent: SQSEvent = { + Records: [...sqsRecords, ...nonSqsRecords], + }; + + const mock = vi.fn(scaleUp); + mock.mockImplementation(() => Promise.resolve([])); + vi.mocked(scaleUp).mockImplementation(mock); + + await scaleUpHandler(mixedEvent, context); + expect(scaleUp).toHaveBeenCalledWith( + expect.arrayContaining([ + expect.objectContaining({ messageId: 'message-0' }), + expect.objectContaining({ messageId: 'message-1' }), + ]), + ); + expect(scaleUp).not.toHaveBeenCalledWith( + expect.arrayContaining([expect.objectContaining({ messageId: 'message-2' })]), + ); + }); + + it('Should sort messages by retry count', async () => { + const records = [ + { + ...sqsRecord, + messageId: 'high-retry', + body: JSON.stringify({ ...body, retryCounter: 5 }), + }, + { + ...sqsRecord, + messageId: 'low-retry', + body: JSON.stringify({ ...body, retryCounter: 1 }), + }, + { + ...sqsRecord, + messageId: 'no-retry', + body: JSON.stringify({ ...body }), + }, + ]; + const multiRecordEvent: SQSEvent = { Records: records }; + + const mock = vi.fn(scaleUp); + mock.mockImplementation((messages) => { + // Verify messages are sorted by retry count (ascending) + expect(messages[0].messageId).toBe('no-retry'); + expect(messages[1].messageId).toBe('low-retry'); + expect(messages[2].messageId).toBe('high-retry'); + return Promise.resolve([]); + }); + vi.mocked(scaleUp).mockImplementation(mock); + + await scaleUpHandler(multiRecordEvent, context); + }); + + it('Should return all failed messages when scaleUp throws non-ScaleError', async () => { + const records = createMultipleRecords(2); + const multiRecordEvent: SQSEvent = { Records: records }; + + const mock = vi.fn(scaleUp); + mock.mockImplementation(() => Promise.reject(new Error('Generic error'))); + vi.mocked(scaleUp).mockImplementation(mock); + + const result = await scaleUpHandler(multiRecordEvent, context); + expect(result).toEqual({ batchItemFailures: [] }); + }); + + it('Should throw when scaleUp throws ScaleError', async () => { + const records = createMultipleRecords(2); + const multiRecordEvent: SQSEvent = { Records: records }; + + const error = new ScaleError('Critical scaling error'); + const mock = vi.fn(scaleUp); + mock.mockImplementation(() => Promise.reject(error)); + vi.mocked(scaleUp).mockImplementation(mock); + + await expect(scaleUpHandler(multiRecordEvent, context)).rejects.toThrow(error); }); }); - const sqsEventMultipleRecords: SQSEvent = { - Records: sqsRecords, - }; - - await expect(scaleUpHandler(sqsEventMultipleRecords, context)).resolves.not.toThrow(); - - expect(logWarnSpy).toHaveBeenCalledWith( - expect.stringContaining( - 'Event ignored, only one record at the time can be handled, ensure the lambda batch size is set to 1.', - ), - ); -} +}); describe('Test scale down lambda wrapper.', () => { it('Scaling down no error.', async () => { diff --git a/lambdas/functions/control-plane/src/lambda.ts b/lambdas/functions/control-plane/src/lambda.ts index 3e3ab90557..266fdfc7a1 100644 --- a/lambdas/functions/control-plane/src/lambda.ts +++ b/lambdas/functions/control-plane/src/lambda.ts @@ -1,34 +1,72 @@ import middy from '@middy/core'; import { logger, setContext } from '@aws-github-runner/aws-powertools-util'; import { captureLambdaHandler, tracer } from '@aws-github-runner/aws-powertools-util'; -import { Context, SQSEvent } from 'aws-lambda'; +import { Context, type SQSBatchItemFailure, type SQSBatchResponse, SQSEvent } from 'aws-lambda'; import { PoolEvent, adjust } from './pool/pool'; import ScaleError from './scale-runners/ScaleError'; import { scaleDown } from './scale-runners/scale-down'; -import { scaleUp } from './scale-runners/scale-up'; +import { type ActionRequestMessage, type ActionRequestMessageSQS, scaleUp } from './scale-runners/scale-up'; import { SSMCleanupOptions, cleanSSMTokens } from './scale-runners/ssm-housekeeper'; import { checkAndRetryJob } from './scale-runners/job-retry'; -export async function scaleUpHandler(event: SQSEvent, context: Context): Promise
type = string
identifiers = list(string)
}))
list(object({| `[]` | no | | [lambda\_runtime](#input\_lambda\_runtime) | AWS Lambda runtime. | `string` | `"nodejs22.x"` | no | | [lambda\_s3\_bucket](#input\_lambda\_s3\_bucket) | S3 bucket from which to specify lambda functions. This is an alternative to providing local files directly. | `string` | `null` | no | diff --git a/modules/multi-runner/runners.tf b/modules/multi-runner/runners.tf index 9f5d1bb456..e36ed7324f 100644 --- a/modules/multi-runner/runners.tf +++ b/modules/multi-runner/runners.tf @@ -57,28 +57,30 @@ module "runners" { metadata_options = each.value.runner_config.runner_metadata_options credit_specification = each.value.runner_config.credit_specification - enable_runner_binaries_syncer = each.value.runner_config.enable_runner_binaries_syncer - lambda_s3_bucket = var.lambda_s3_bucket - runners_lambda_s3_key = var.runners_lambda_s3_key - runners_lambda_s3_object_version = var.runners_lambda_s3_object_version - lambda_runtime = var.lambda_runtime - lambda_architecture = var.lambda_architecture - lambda_zip = var.runners_lambda_zip - lambda_scale_up_memory_size = var.scale_up_lambda_memory_size - lambda_timeout_scale_up = var.runners_scale_up_lambda_timeout - lambda_scale_down_memory_size = var.scale_down_lambda_memory_size - lambda_timeout_scale_down = var.runners_scale_down_lambda_timeout - lambda_subnet_ids = var.lambda_subnet_ids - lambda_security_group_ids = var.lambda_security_group_ids - lambda_tags = var.lambda_tags - tracing_config = var.tracing_config - logging_retention_in_days = var.logging_retention_in_days - logging_kms_key_id = var.logging_kms_key_id - enable_cloudwatch_agent = each.value.runner_config.enable_cloudwatch_agent - cloudwatch_config = try(coalesce(each.value.runner_config.cloudwatch_config, var.cloudwatch_config), null) - runner_log_files = each.value.runner_config.runner_log_files - runner_group_name = each.value.runner_config.runner_group_name - runner_name_prefix = each.value.runner_config.runner_name_prefix + enable_runner_binaries_syncer = each.value.runner_config.enable_runner_binaries_syncer + lambda_s3_bucket = var.lambda_s3_bucket + runners_lambda_s3_key = var.runners_lambda_s3_key + runners_lambda_s3_object_version = var.runners_lambda_s3_object_version + lambda_runtime = var.lambda_runtime + lambda_architecture = var.lambda_architecture + lambda_zip = var.runners_lambda_zip + lambda_scale_up_memory_size = var.scale_up_lambda_memory_size + lambda_event_source_mapping_batch_size = var.lambda_event_source_mapping_batch_size + lambda_event_source_mapping_maximum_batching_window_in_seconds = var.lambda_event_source_mapping_maximum_batching_window_in_seconds + lambda_timeout_scale_up = var.runners_scale_up_lambda_timeout + lambda_scale_down_memory_size = var.scale_down_lambda_memory_size + lambda_timeout_scale_down = var.runners_scale_down_lambda_timeout + lambda_subnet_ids = var.lambda_subnet_ids + lambda_security_group_ids = var.lambda_security_group_ids + lambda_tags = var.lambda_tags + tracing_config = var.tracing_config + logging_retention_in_days = var.logging_retention_in_days + logging_kms_key_id = var.logging_kms_key_id + enable_cloudwatch_agent = each.value.runner_config.enable_cloudwatch_agent + cloudwatch_config = try(coalesce(each.value.runner_config.cloudwatch_config, var.cloudwatch_config), null) + runner_log_files = each.value.runner_config.runner_log_files + runner_group_name = each.value.runner_config.runner_group_name + runner_name_prefix = each.value.runner_config.runner_name_prefix scale_up_reserved_concurrent_executions = each.value.runner_config.scale_up_reserved_concurrent_executions diff --git a/modules/multi-runner/variables.tf b/modules/multi-runner/variables.tf index b138205459..301eabfc0b 100644 --- a/modules/multi-runner/variables.tf +++ b/modules/multi-runner/variables.tf @@ -714,3 +714,15 @@ variable "user_agent" { type = string default = "github-aws-runners" } + +variable "lambda_event_source_mapping_batch_size" { + description = "Maximum number of records to pass to the lambda function in a single batch for the event source mapping. When not set, the AWS default of 10 events will be used." + type = number + default = 10 +} + +variable "lambda_event_source_mapping_maximum_batching_window_in_seconds" { + description = "Maximum amount of time to gather records before invoking the lambda function, in seconds. AWS requires this to be greater than 0 if batch_size is greater than 10. Defaults to 0." + type = number + default = 0 +} diff --git a/modules/runners/README.md b/modules/runners/README.md index f7dd7ecb88..9eed4ca1b1 100644 --- a/modules/runners/README.md +++ b/modules/runners/README.md @@ -173,6 +173,8 @@ yarn run dist | [key\_name](#input\_key\_name) | Key pair name | `string` | `null` | no | | [kms\_key\_arn](#input\_kms\_key\_arn) | Optional CMK Key ARN to be used for Parameter Store. | `string` | `null` | no | | [lambda\_architecture](#input\_lambda\_architecture) | AWS Lambda architecture. Lambda functions using Graviton processors ('arm64') tend to have better price/performance than 'x86\_64' functions. | `string` | `"arm64"` | no | +| [lambda\_event\_source\_mapping\_batch\_size](#input\_lambda\_event\_source\_mapping\_batch\_size) | Maximum number of records to pass to the lambda function in a single batch for the event source mapping. When not set, the AWS default of 10 events will be used. | `number` | `10` | no | +| [lambda\_event\_source\_mapping\_maximum\_batching\_window\_in\_seconds](#input\_lambda\_event\_source\_mapping\_maximum\_batching\_window\_in\_seconds) | Maximum amount of time to gather records before invoking the lambda function, in seconds. AWS requires this to be greater than 0 if batch\_size is greater than 10. Defaults to 0. | `number` | `0` | no | | [lambda\_runtime](#input\_lambda\_runtime) | AWS Lambda runtime. | `string` | `"nodejs22.x"` | no | | [lambda\_s3\_bucket](#input\_lambda\_s3\_bucket) | S3 bucket from which to specify lambda functions. This is an alternative to providing local files directly. | `string` | `null` | no | | [lambda\_scale\_down\_memory\_size](#input\_lambda\_scale\_down\_memory\_size) | Memory size limit in MB for scale down lambda. | `number` | `512` | no | diff --git a/modules/runners/job-retry.tf b/modules/runners/job-retry.tf index e51c3903d4..130992667f 100644 --- a/modules/runners/job-retry.tf +++ b/modules/runners/job-retry.tf @@ -3,30 +3,32 @@ locals { job_retry_enabled = var.job_retry != null && var.job_retry.enable ? true : false job_retry = { - prefix = var.prefix - tags = local.tags - aws_partition = var.aws_partition - architecture = var.lambda_architecture - runtime = var.lambda_runtime - security_group_ids = var.lambda_security_group_ids - subnet_ids = var.lambda_subnet_ids - kms_key_arn = var.kms_key_arn - lambda_tags = var.lambda_tags - log_level = var.log_level - logging_kms_key_id = var.logging_kms_key_id - logging_retention_in_days = var.logging_retention_in_days - metrics = var.metrics - role_path = var.role_path - role_permissions_boundary = var.role_permissions_boundary - s3_bucket = var.lambda_s3_bucket - s3_key = var.runners_lambda_s3_key - s3_object_version = var.runners_lambda_s3_object_version - zip = var.lambda_zip - tracing_config = var.tracing_config - github_app_parameters = var.github_app_parameters - enable_organization_runners = var.enable_organization_runners - sqs_build_queue = var.sqs_build_queue - ghes_url = var.ghes_url + prefix = var.prefix + tags = local.tags + aws_partition = var.aws_partition + architecture = var.lambda_architecture + runtime = var.lambda_runtime + security_group_ids = var.lambda_security_group_ids + subnet_ids = var.lambda_subnet_ids + kms_key_arn = var.kms_key_arn + lambda_tags = var.lambda_tags + log_level = var.log_level + logging_kms_key_id = var.logging_kms_key_id + logging_retention_in_days = var.logging_retention_in_days + metrics = var.metrics + role_path = var.role_path + role_permissions_boundary = var.role_permissions_boundary + s3_bucket = var.lambda_s3_bucket + s3_key = var.runners_lambda_s3_key + s3_object_version = var.runners_lambda_s3_object_version + zip = var.lambda_zip + tracing_config = var.tracing_config + github_app_parameters = var.github_app_parameters + enable_organization_runners = var.enable_organization_runners + sqs_build_queue = var.sqs_build_queue + ghes_url = var.ghes_url + lambda_event_source_mapping_batch_size = var.lambda_event_source_mapping_batch_size + lambda_event_source_mapping_maximum_batching_window_in_seconds = var.lambda_event_source_mapping_maximum_batching_window_in_seconds } } diff --git a/modules/runners/job-retry/README.md b/modules/runners/job-retry/README.md index 5276db9d60..f2e078ac52 100644 --- a/modules/runners/job-retry/README.md +++ b/modules/runners/job-retry/README.md @@ -42,7 +42,7 @@ The module is an inner module and used by the runner module when the opt-in feat | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [config](#input\_config) | Configuration for the spot termination watcher lambda function.
type = string
identifiers = list(string)
}))
object({| n/a | yes | +| [config](#input\_config) | Configuration for the spot termination watcher lambda function.
aws_partition = optional(string, null)
architecture = optional(string, null)
enable_organization_runners = bool
environment_variables = optional(map(string), {})
ghes_url = optional(string, null)
user_agent = optional(string, null)
github_app_parameters = object({
key_base64 = map(string)
id = map(string)
})
kms_key_arn = optional(string, null)
lambda_tags = optional(map(string), {})
log_level = optional(string, null)
logging_kms_key_id = optional(string, null)
logging_retention_in_days = optional(number, null)
memory_size = optional(number, null)
metrics = optional(object({
enable = optional(bool, false)
namespace = optional(string, null)
metric = optional(object({
enable_github_app_rate_limit = optional(bool, true)
enable_job_retry = optional(bool, true)
}), {})
}), {})
prefix = optional(string, null)
principals = optional(list(object({
type = string
identifiers = list(string)
})), [])
queue_encryption = optional(object({
kms_data_key_reuse_period_seconds = optional(number, null)
kms_master_key_id = optional(string, null)
sqs_managed_sse_enabled = optional(bool, true)
}), {})
role_path = optional(string, null)
role_permissions_boundary = optional(string, null)
runtime = optional(string, null)
security_group_ids = optional(list(string), [])
subnet_ids = optional(list(string), [])
s3_bucket = optional(string, null)
s3_key = optional(string, null)
s3_object_version = optional(string, null)
sqs_build_queue = object({
url = string
arn = string
})
tags = optional(map(string), {})
timeout = optional(number, 30)
tracing_config = optional(object({
mode = optional(string, null)
capture_http_requests = optional(bool, false)
capture_error = optional(bool, false)
}), {})
zip = optional(string, null)
})
object({| n/a | yes | ## Outputs diff --git a/modules/runners/job-retry/main.tf b/modules/runners/job-retry/main.tf index 9561c7db71..612c515f8c 100644 --- a/modules/runners/job-retry/main.tf +++ b/modules/runners/job-retry/main.tf @@ -44,9 +44,10 @@ module "job_retry" { } resource "aws_lambda_event_source_mapping" "job_retry" { - event_source_arn = aws_sqs_queue.job_retry_check_queue.arn - function_name = module.job_retry.lambda.function.arn - batch_size = 1 + event_source_arn = aws_sqs_queue.job_retry_check_queue.arn + function_name = module.job_retry.lambda.function.arn + batch_size = var.config.lambda_event_source_mapping_batch_size + maximum_batching_window_in_seconds = var.config.lambda_event_source_mapping_maximum_batching_window_in_seconds } resource "aws_lambda_permission" "job_retry" { diff --git a/modules/runners/job-retry/variables.tf b/modules/runners/job-retry/variables.tf index 4a8fe19fbf..f40bec1ba7 100644 --- a/modules/runners/job-retry/variables.tf +++ b/modules/runners/job-retry/variables.tf @@ -11,6 +11,8 @@ variable "config" { 'user_agent': Optional User-Agent header for GitHub API requests. 'github_app_parameters': Parameter Store for GitHub App Parameters. 'kms_key_arn': Optional CMK Key ARN instead of using the default AWS managed key. + `lambda_event_source_mapping_batch_size`: Maximum number of records to pass to the lambda function in a single batch for the event source mapping. When not set, the AWS default will be used. + `lambda_event_source_mapping_maximum_batching_window_in_seconds`: Maximum amount of time to gather records before invoking the lambda function, in seconds. AWS requires this to be greater than 0 if batch_size is greater than 10. `lambda_principals`: Add extra principals to the role created for execution of the lambda, e.g. for local testing. `lambda_tags`: Map of tags that will be added to created resources. By default resources will be tagged with name and environment. `log_level`: Logging level for lambda logging. Valid values are 'silly', 'trace', 'debug', 'info', 'warn', 'error', 'fatal'. @@ -45,12 +47,14 @@ variable "config" { key_base64 = map(string) id = map(string) }) - kms_key_arn = optional(string, null) - lambda_tags = optional(map(string), {}) - log_level = optional(string, null) - logging_kms_key_id = optional(string, null) - logging_retention_in_days = optional(number, null) - memory_size = optional(number, null) + kms_key_arn = optional(string, null) + lambda_event_source_mapping_batch_size = optional(number, 10) + lambda_event_source_mapping_maximum_batching_window_in_seconds = optional(number, 0) + lambda_tags = optional(map(string), {}) + log_level = optional(string, null) + logging_kms_key_id = optional(string, null) + logging_retention_in_days = optional(number, null) + memory_size = optional(number, null) metrics = optional(object({ enable = optional(bool, false) namespace = optional(string, null) diff --git a/modules/runners/scale-up.tf b/modules/runners/scale-up.tf index ad96c496a4..fad37af288 100644 --- a/modules/runners/scale-up.tf +++ b/modules/runners/scale-up.tf @@ -87,9 +87,11 @@ resource "aws_cloudwatch_log_group" "scale_up" { } resource "aws_lambda_event_source_mapping" "scale_up" { - event_source_arn = var.sqs_build_queue.arn - function_name = aws_lambda_function.scale_up.arn - batch_size = 1 + event_source_arn = var.sqs_build_queue.arn + function_name = aws_lambda_function.scale_up.arn + function_response_types = ["ReportBatchItemFailures"] + batch_size = var.lambda_event_source_mapping_batch_size + maximum_batching_window_in_seconds = var.lambda_event_source_mapping_maximum_batching_window_in_seconds } resource "aws_lambda_permission" "scale_runners_lambda" { diff --git a/modules/runners/variables.tf b/modules/runners/variables.tf index f70e80b9cc..1960d51946 100644 --- a/modules/runners/variables.tf +++ b/modules/runners/variables.tf @@ -761,3 +761,23 @@ variable "user_agent" { type = string default = null } + +variable "lambda_event_source_mapping_batch_size" { + description = "Maximum number of records to pass to the lambda function in a single batch for the event source mapping. When not set, the AWS default of 10 events will be used." + type = number + default = 10 + validation { + condition = var.lambda_event_source_mapping_batch_size >= 1 && var.lambda_event_source_mapping_batch_size <= 1000 + error_message = "The batch size for the lambda event source mapping must be between 1 and 1000." + } +} + +variable "lambda_event_source_mapping_maximum_batching_window_in_seconds" { + description = "Maximum amount of time to gather records before invoking the lambda function, in seconds. AWS requires this to be greater than 0 if batch_size is greater than 10. Defaults to 0." + type = number + default = 0 + validation { + condition = var.lambda_event_source_mapping_maximum_batching_window_in_seconds >= 0 && var.lambda_event_source_mapping_maximum_batching_window_in_seconds <= 300 + error_message = "Maximum batching window must be between 0 and 300 seconds." + } +} diff --git a/variables.tf b/variables.tf index 975aa19b1d..d066702256 100644 --- a/variables.tf +++ b/variables.tf @@ -1007,3 +1007,19 @@ variable "user_agent" { type = string default = "github-aws-runners" } + +variable "lambda_event_source_mapping_batch_size" { + description = "Maximum number of records to pass to the lambda function in a single batch for the event source mapping. When not set, the AWS default of 10 events will be used." + type = number + default = 10 +} + +variable "lambda_event_source_mapping_maximum_batching_window_in_seconds" { + description = "Maximum amount of time to gather records before invoking the lambda function, in seconds. AWS requires this to be greater than 0 if batch_size is greater than 10. Defaults to 0." + type = number + default = 0 + validation { + condition = var.lambda_event_source_mapping_maximum_batching_window_in_seconds >= 0 && var.lambda_event_source_mapping_maximum_batching_window_in_seconds <= 300 + error_message = "Maximum batching window must be between 0 and 300 seconds." + } +}
aws_partition = optional(string, null)
architecture = optional(string, null)
enable_organization_runners = bool
environment_variables = optional(map(string), {})
ghes_url = optional(string, null)
user_agent = optional(string, null)
github_app_parameters = object({
key_base64 = map(string)
id = map(string)
})
kms_key_arn = optional(string, null)
lambda_event_source_mapping_batch_size = optional(number, 10)
lambda_event_source_mapping_maximum_batching_window_in_seconds = optional(number, 0)
lambda_tags = optional(map(string), {})
log_level = optional(string, null)
logging_kms_key_id = optional(string, null)
logging_retention_in_days = optional(number, null)
memory_size = optional(number, null)
metrics = optional(object({
enable = optional(bool, false)
namespace = optional(string, null)
metric = optional(object({
enable_github_app_rate_limit = optional(bool, true)
enable_job_retry = optional(bool, true)
}), {})
}), {})
prefix = optional(string, null)
principals = optional(list(object({
type = string
identifiers = list(string)
})), [])
queue_encryption = optional(object({
kms_data_key_reuse_period_seconds = optional(number, null)
kms_master_key_id = optional(string, null)
sqs_managed_sse_enabled = optional(bool, true)
}), {})
role_path = optional(string, null)
role_permissions_boundary = optional(string, null)
runtime = optional(string, null)
security_group_ids = optional(list(string), [])
subnet_ids = optional(list(string), [])
s3_bucket = optional(string, null)
s3_key = optional(string, null)
s3_object_version = optional(string, null)
sqs_build_queue = object({
url = string
arn = string
})
tags = optional(map(string), {})
timeout = optional(number, 30)
tracing_config = optional(object({
mode = optional(string, null)
capture_http_requests = optional(bool, false)
capture_error = optional(bool, false)
}), {})
zip = optional(string, null)
})