├── apply.sh ├── destroy.sh ├── upload_files.sh ├── upload_file.sh ├── download_rendered.sh ├── blender-lambda-producer ├── get_frames.py ├── Dockerfile └── producer_function.py ├── main.tf ├── blender-lambda-consumer ├── render_frame.py ├── Dockerfile └── consumer_function.py ├── render.sh ├── blender-lambda.tfvars ├── outputs.tf ├── .gitignore ├── LICENSE ├── consumer.tf ├── common.tf ├── README.md ├── producer.tf ├── vars.tf └── .terraform.lock.hcl /apply.sh: -------------------------------------------------------------------------------- 1 | terraform apply -var-file=blender-lambda.tfvars -------------------------------------------------------------------------------- /destroy.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | terraform destroy -var-file=blender-lambda.tfvars 3 | -------------------------------------------------------------------------------- /upload_files.sh: -------------------------------------------------------------------------------- 1 | for file in "$@" 2 | do 3 | ./upload_file.sh $file 4 | done -------------------------------------------------------------------------------- /upload_file.sh: -------------------------------------------------------------------------------- 1 | FILENAME=$1 2 | BUCKET=$(terraform output -raw lambda_bucket_name) 3 | 4 | aws s3 cp $FILENAME s3://$BUCKET/$FILENAME -------------------------------------------------------------------------------- /download_rendered.sh: -------------------------------------------------------------------------------- 1 | BUCKET=$(terraform output -raw lambda_bucket_name) 2 | 3 | aws s3 cp s3://$BUCKET/ . --recursive --exclude "*" --include "rendered*" -------------------------------------------------------------------------------- /blender-lambda-producer/get_frames.py: -------------------------------------------------------------------------------- 1 | import bpy 2 | 3 | scene = bpy.context.scene 4 | if scene: 5 | print('Scene found.') 6 | 7 | print(f"Frame range: {scene.frame_start}-{scene.frame_end}") 8 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 3.48.0" 6 | } 7 | } 8 | 9 | required_version = "~> 1.0" 10 | } 11 | 12 | provider "aws" { 13 | region = var.aws_region 14 | 15 | default_tags { 16 | tags = var.default_tags 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /blender-lambda-consumer/render_frame.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import bpy 3 | 4 | argv = sys.argv 5 | argv = argv[argv.index("--") + 1:] 6 | 7 | input_file = argv[0] 8 | output_file = argv[1] 9 | frame_number = int(argv[2]) 10 | 11 | bpy.ops.wm.open_mainfile(filepath=input_file, load_ui=False) 12 | 13 | bpy.context.scene.frame_set(frame_number) 14 | bpy.context.scene.render.filepath = output_file 15 | 16 | bpy.ops.render.render(write_still = True) 17 | -------------------------------------------------------------------------------- /blender-lambda-producer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nytimes/blender:3.1-cpu-ubuntu18.04 2 | 3 | ARG FUNCTION_DIR="/home/app" 4 | RUN mkdir -p $FUNCTION_DIR 5 | WORKDIR ${FUNCTION_DIR} 6 | 7 | RUN pip install boto3 8 | RUN pip install awslambdaric --target ${FUNCTION_DIR} 9 | 10 | ENTRYPOINT [ "/bin/3.1/python/bin/python3.10", "-m", "awslambdaric" ] 11 | CMD [ "producer_function.handler" ] 12 | 13 | COPY *.py ${FUNCTION_DIR}/ 14 | RUN chmod 755 $(find . -type d) && \ 15 | chmod 644 $(find . -type f) -------------------------------------------------------------------------------- /blender-lambda-consumer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nytimes/blender:3.1-cpu-ubuntu18.04 2 | 3 | ARG FUNCTION_DIR="/home/app" 4 | RUN mkdir -p $FUNCTION_DIR 5 | WORKDIR ${FUNCTION_DIR} 6 | 7 | RUN pip install boto3 8 | RUN pip install awslambdaric --target ${FUNCTION_DIR} 9 | 10 | ENTRYPOINT [ "/bin/3.1/python/bin/python3.10", "-m", "awslambdaric" ] 11 | CMD [ "consumer_function.handler" ] 12 | 13 | COPY *.py ${FUNCTION_DIR}/ 14 | RUN chmod 755 $(find . -type d) && \ 15 | chmod 644 $(find . -type f) 16 | -------------------------------------------------------------------------------- /render.sh: -------------------------------------------------------------------------------- 1 | BLENDER_FILE=$1 2 | shift 3 | SUPPORT_FILES=$@ 4 | 5 | SUPPORT_FILES_STRING="[ " 6 | for SUPPORT_FILE in $SUPPORT_FILES 7 | do 8 | SUPPORT_FILES_STRING="$SUPPORT_FILES_STRING\"$SUPPORT_FILE\"," 9 | done 10 | SUPPORT_FILES_STRING="${SUPPORT_FILES_STRING%?} ]" 11 | 12 | PUBLIC_URL=$(terraform output -raw public_url) 13 | DATA='{ "file_name": "'$BLENDER_FILE'", "support_files": '$SUPPORT_FILES_STRING', "frame_start": 1, "frame_end": 165 }' 14 | 15 | curl -s \ 16 | -X POST \ 17 | -H "Content-Type: application/json" \ 18 | -d "$DATA" \ 19 | "$PUBLIC_URL/render-job" 20 | -------------------------------------------------------------------------------- /blender-lambda.tfvars: -------------------------------------------------------------------------------- 1 | default_tags = { 2 | "project" = "blender-lambda" 3 | } 4 | 5 | producer_api_gateway_name = "blender-lambda-api" 6 | 7 | producer_lambda_source_path = "./blender-lambda-producer" 8 | 9 | producer_invocation_route_key = "POST /render-job" 10 | 11 | producer_lambda_function_name = "blender-lambda-producer" 12 | 13 | producer_ecr_repo = "blender-lambda-producer" 14 | 15 | consumer_lambda_source_path = "./blender-lambda-consumer" 16 | 17 | consumer_lambda_function_name = "blender-lambda-consumer" 18 | 19 | consumer_ecr_repo = "blender-lambda-consumer" 20 | 21 | queue_name = "blender-lambda-queue" -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "lambda_bucket_name" { 2 | description = "Name of the S3 bucket to store the Lambda code" 3 | value = aws_s3_bucket.lambda_bucket.id 4 | } 5 | 6 | output "public_url" { 7 | description = "Base URL for API Gateway Stage" 8 | value = aws_apigatewayv2_stage.lambda.invoke_url 9 | } 10 | 11 | output "consumer_image_url" { 12 | description = "Image URL for the consumer Lambda function" 13 | value = aws_lambda_function.consumer_lambda.image_uri 14 | } 15 | 16 | output "producer_image_url" { 17 | description = "Image URL for the producer Lambda function" 18 | value = aws_lambda_function.producer_lambda.image_uri 19 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | 11 | # Exclude all .tfvars files, which are likely to contain sentitive data, such as 12 | # password, private keys, and other secrets. These should not be part of version 13 | # control as they are data points which are potentially sensitive and subject 14 | # to change depending on the environment. 15 | # 16 | *.tfvars 17 | 18 | # Ignore override files as they are usually used to override resources locally and so 19 | # are not checked in 20 | override.tf 21 | override.tf.json 22 | *_override.tf 23 | *_override.tf.json 24 | 25 | # Include override files you do wish to add to version control using negated pattern 26 | # 27 | # !example_override.tf 28 | 29 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 30 | # example: *tfplan* 31 | 32 | # Ignore CLI configuration files 33 | .terraformrc 34 | terraform.rc 35 | 36 | # Temporary location 37 | tmp/ 38 | 39 | # Python virtual environment 40 | .venv/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Alpha 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /consumer.tf: -------------------------------------------------------------------------------- 1 | module "consumer_docker_image" { 2 | source = "terraform-aws-modules/lambda/aws//modules/docker-build" 3 | 4 | create_ecr_repo = true 5 | ecr_repo = var.consumer_ecr_repo 6 | source_path = abspath(var.consumer_lambda_source_path) 7 | } 8 | 9 | resource "aws_lambda_function" "consumer_lambda" { 10 | function_name = var.consumer_lambda_function_name 11 | 12 | package_type = "Image" 13 | image_uri = module.consumer_docker_image.image_uri 14 | role = aws_iam_role.lambda_exec.arn 15 | timeout = var.consumer_timeout_seconds 16 | memory_size = 3009 17 | 18 | environment { 19 | variables = { 20 | "QUEUE_NAME" = aws_sqs_queue.queue.name, 21 | "S3_BUCKET_NAME" = aws_s3_bucket.lambda_bucket.id, 22 | } 23 | } 24 | } 25 | 26 | resource "aws_lambda_event_source_mapping" "event_source_mapping" { 27 | event_source_arn = aws_sqs_queue.queue.arn 28 | enabled = true 29 | function_name = aws_lambda_function.consumer_lambda.arn 30 | batch_size = 1 31 | } 32 | 33 | 34 | resource "aws_cloudwatch_log_group" "consumer_lambda_log_group" { 35 | name = "/aws/lambda/${aws_lambda_function.consumer_lambda.function_name}" 36 | 37 | retention_in_days = 30 38 | } -------------------------------------------------------------------------------- /common.tf: -------------------------------------------------------------------------------- 1 | resource "aws_s3_bucket" "lambda_bucket" { 2 | bucket = var.lambda_bucket 3 | acl = "private" 4 | force_destroy = true 5 | } 6 | 7 | resource "aws_iam_role" "lambda_exec" { 8 | name = "lambda_role" 9 | 10 | assume_role_policy = jsonencode({ 11 | Version = "2012-10-17", 12 | Statement = [{ 13 | Action = "sts:AssumeRole", 14 | Effect = "Allow", 15 | Sid = "", 16 | Principal = { 17 | Service = "lambda.amazonaws.com" 18 | } 19 | }] 20 | }) 21 | } 22 | 23 | data "aws_iam_policy_document" "lambda_policy_document" { 24 | statement { 25 | sid = "LambdaPolicyQueueAccess" 26 | actions = [ 27 | "sqs:ReceiveMessage", 28 | "sqs:DeleteMessage", 29 | "sqs:GetQueueAttributes", 30 | "sqs:SendMessage", 31 | "sqs:GetQueueUrl" 32 | ] 33 | resources = [ 34 | aws_sqs_queue.queue.arn 35 | ] 36 | } 37 | 38 | statement { 39 | sid = "LambdaPolicyS3Access" 40 | actions = [ 41 | "s3:GetObject", 42 | "s3:PutObject", 43 | "s3:DeleteObject", 44 | "s3:ListBucket", 45 | "s3:GetBucketLocation", 46 | "s3:GetBucketPolicy", 47 | "s3:GetBucketTagging", 48 | "s3:GetBucketVersioning", 49 | "s3:GetBucketWebsite", 50 | "s3:GetLifecycleConfiguration", 51 | ] 52 | resources = [ 53 | aws_s3_bucket.lambda_bucket.arn, 54 | format("%s%s", aws_s3_bucket.lambda_bucket.arn, "/*") 55 | ] 56 | } 57 | } 58 | 59 | resource "aws_iam_policy" "lambda_policy" { 60 | name = "lambda_policy" 61 | policy = data.aws_iam_policy_document.lambda_policy_document.json 62 | } 63 | 64 | resource "aws_iam_role_policy_attachment" "lambda_sqs_policy" { 65 | role = aws_iam_role.lambda_exec.name 66 | policy_arn = aws_iam_policy.lambda_policy.arn 67 | } 68 | 69 | resource "aws_iam_role_policy_attachment" "lambda_basic_policy" { 70 | role = aws_iam_role.lambda_exec.name 71 | policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" 72 | } 73 | 74 | resource "aws_sqs_queue" "queue" { 75 | name = var.queue_name 76 | 77 | visibility_timeout_seconds = var.consumer_timeout_seconds 78 | } -------------------------------------------------------------------------------- /blender-lambda-consumer/consumer_function.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import json 3 | import logging 4 | import os 5 | import sys 6 | 7 | # from https://gist.github.com/niranjv/fb95e716151642e8ca553b0e38dd152e 8 | logger = logging.getLogger() 9 | for h in logger.handlers: 10 | logger.removeHandler(h) 11 | h = logging.StreamHandler(sys.stdout) 12 | FORMAT = '[%(levelname)s] %(message)s' 13 | h.setFormatter(logging.Formatter(FORMAT)) 14 | logger.addHandler(h) 15 | logger.setLevel(logging.INFO) 16 | 17 | S3_BUCKET_NAME = os.environ['S3_BUCKET_NAME'] 18 | LOCAL_RENDER_FILE = '/tmp/render_file.blend' 19 | 20 | 21 | def handler(event, context): 22 | try: 23 | received_body = event['Records'][0]['body'] 24 | record = json.loads(received_body) 25 | 26 | file_name = record['file_name'] 27 | frame = record['frame'] 28 | support_files = record['support_files'] 29 | 30 | logger.info(f'Received message for file: {file_name} and frame: {frame}') 31 | 32 | retrieve_files_from_s3(file_name, support_files) 33 | 34 | frame_str = str(frame).zfill(4) 35 | output_file = f'/tmp/rendered_{frame_str}.png' 36 | render_frame(frame, output_file) 37 | 38 | upload_file_to_s3(output_file) 39 | 40 | logger.info('Done.') 41 | except Exception as e: 42 | logger.exception(e) 43 | raise e 44 | 45 | 46 | def render_frame(frame, output_file): 47 | logger.info(f'Rendering frame: {frame}') 48 | 49 | os.system(f"blender -b -P render_frame.py -- {LOCAL_RENDER_FILE} {output_file} {frame}") 50 | 51 | logger.info(f'Rendering frame: {frame} done') 52 | 53 | 54 | def retrieve_files_from_s3(file_name, support_files): 55 | logger.info(f'Retrieving file: {file_name} from S3 bucket: {S3_BUCKET_NAME}') 56 | 57 | s3 = boto3.resource('s3') 58 | bucket = s3.Bucket(S3_BUCKET_NAME) 59 | bucket.download_file(file_name, LOCAL_RENDER_FILE) 60 | 61 | logger.info(f'Retrieving file: {file_name} from S3 bucket: {S3_BUCKET_NAME} done') 62 | 63 | for file in support_files: 64 | logger.info(f'Retrieving file: {file} from S3 bucket: {S3_BUCKET_NAME}') 65 | 66 | bucket.download_file(file, f'/tmp/{file}') 67 | 68 | logger.info(f'Retrieving file: {file} from S3 bucket: {S3_BUCKET_NAME} done') 69 | 70 | 71 | def upload_file_to_s3(file_name): 72 | logger.info(f'Uploading file: {file_name} to S3 bucket: {S3_BUCKET_NAME}') 73 | 74 | s3 = boto3.resource('s3') 75 | bucket = s3.Bucket(S3_BUCKET_NAME) 76 | bucket.upload_file(file_name, os.path.basename(file_name)) 77 | 78 | logger.info(f'Uploading file: {file_name} to S3 bucket: {S3_BUCKET_NAME} done') 79 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Blender on Lambda 2 | 3 | Render Blender scenes on the distributed serverless cloud! 4 | 5 | This repository is based on the architecture laid out by [TAS-PC](https://github.com/AlphaGit/tas-pc). 6 | 7 | This repository aids in the following tasks: 8 | 9 | - Setting up cloud infrastructure in AWS for rendering blender scenes 10 | - Executing multiple concurrent jobs for rendering each frame 11 | - Uploading and downloading scene and support files 12 | - Removing infrastructure 13 | 14 | ## A few notes on the chosen architecture 15 | 16 | The infrastructure created is called Serverless, through an AWS service called Lambda functions. These are charged per-use, and have a monthly free tier that costs nothing to use. AWS Lambdas do not currently support GPU rendering, but are easily scaled to concurrent executions, saving time. 17 | 18 | ## How to use 19 | 20 | 1. Create an AWS Account. 21 | 2. Download the AWS CLI. Install it and configure your credentials (`aws configure`). 22 | 3. Download terraform. 23 | 4. Clone this repository 24 | 5. Create a `blender-lambda.tfvars` file for configuration. Use these values as startup, but modify them as you will. 25 | ```tf 26 | default_tags = { 27 | "project" = "blender-lambda" 28 | } 29 | 30 | producer_api_gateway_name = "blender-lambda-api" 31 | 32 | producer_lambda_source_path = "./blender-lambda-producer" 33 | 34 | producer_invocation_route_key = "POST /render-job" 35 | 36 | producer_lambda_function_name = "blender-lambda-producer" 37 | 38 | producer_ecr_repo = "blender-lambda-producer" 39 | 40 | consumer_lambda_source_path = "./blender-lambda-consumer" 41 | 42 | consumer_lambda_function_name = "blender-lambda-consumer" 43 | 44 | consumer_ecr_repo = "blender-lambda-consumer" 45 | 46 | queue_name = "blender-lambda-queue" 47 | ``` 48 | 6. Execute `./apply.sh` and accept the changes to have the infrastructure be created on your AWS account. 49 | 7. Execute `./upload_files.sh ` to upload your files (the scene file and any supporting meadia files). 50 | 8. Execute `./render.sh ...` to start the render job. 51 | 9. Wait. 52 | 10. Execute `./download_rendered.sh` to download the results of the render. 53 | 11. Execute `./destroy.sh` to remove all the infrastructure from your AWS account. 54 | 55 | 56 | ## More information 57 | 58 | This work is based on these previous investigations: 59 | 60 | - [TAS-PC](https://blog.alphasmanifesto.com/2021/11/22/tas-pc/) 61 | - [Rendering Blender Scenes in the cloud with AWS Lambda](https://blog.theodo.com/2021/08/blender-serverless-lambda/), by JR Beaudoin 62 | - [Blender-docker](https://github.com/nytimes/rd-blender-docker) by the NYTimes Research Team 63 | -------------------------------------------------------------------------------- /producer.tf: -------------------------------------------------------------------------------- 1 | module "producer_docker_image" { 2 | source = "terraform-aws-modules/lambda/aws//modules/docker-build" 3 | 4 | create_ecr_repo = true 5 | ecr_repo = var.producer_ecr_repo 6 | source_path = abspath(var.producer_lambda_source_path) 7 | } 8 | 9 | resource "aws_lambda_function" "producer_lambda" { 10 | function_name = var.producer_lambda_function_name 11 | 12 | package_type = "Image" 13 | image_uri = module.producer_docker_image.image_uri 14 | role = aws_iam_role.lambda_exec.arn 15 | timeout = 120 16 | 17 | environment { 18 | variables = { 19 | "QUEUE_NAME" = aws_sqs_queue.queue.name, 20 | "S3_BUCKET_NAME" = aws_s3_bucket.lambda_bucket.id, 21 | } 22 | } 23 | } 24 | 25 | resource "aws_cloudwatch_log_group" "producer_lambda_log_group" { 26 | name = "/aws/lambda/${aws_lambda_function.producer_lambda.function_name}" 27 | 28 | retention_in_days = 30 29 | } 30 | 31 | resource "aws_apigatewayv2_api" "lambda" { 32 | name = var.producer_api_gateway_name 33 | protocol_type = "HTTP" 34 | } 35 | 36 | resource "aws_apigatewayv2_stage" "lambda" { 37 | api_id = aws_apigatewayv2_api.lambda.id 38 | 39 | name = var.producer_apigateway_stage_name 40 | auto_deploy = true 41 | 42 | access_log_settings { 43 | destination_arn = aws_cloudwatch_log_group.api_gw.arn 44 | 45 | format = jsonencode({ 46 | requestId = "$context.requestId", 47 | sourceIp = "$context.identity.sourceIp", 48 | requestTime = "$context.requestTime", 49 | protocol = "$context.protocol", 50 | httpMethod = "$context.httpMethod", 51 | resourcePath = "$context.resourcePath", 52 | routeKey = "$context.routeKey", 53 | status = "$context.status", 54 | responseLength = "$context.responseLength", 55 | integrationErrorMessage = "$context.integrationErrorMessage", 56 | }) 57 | } 58 | } 59 | 60 | resource "aws_apigatewayv2_integration" "queue" { 61 | api_id = aws_apigatewayv2_api.lambda.id 62 | 63 | integration_uri = aws_lambda_function.producer_lambda.invoke_arn 64 | integration_type = "AWS_PROXY" 65 | integration_method = "POST" 66 | } 67 | 68 | resource "aws_apigatewayv2_route" "queue" { 69 | api_id = aws_apigatewayv2_api.lambda.id 70 | 71 | route_key = var.producer_invocation_route_key 72 | target = "integrations/${aws_apigatewayv2_integration.queue.id}" 73 | } 74 | 75 | resource "aws_cloudwatch_log_group" "api_gw" { 76 | name = "/aws/api_gw/${aws_apigatewayv2_api.lambda.name}" 77 | 78 | retention_in_days = 30 79 | } 80 | 81 | resource "aws_lambda_permission" "api_gw" { 82 | statement_id = "AllowExecutionFromAPIGateway" 83 | action = "lambda:InvokeFunction" 84 | function_name = aws_lambda_function.producer_lambda.function_name 85 | principal = "apigateway.amazonaws.com" 86 | 87 | source_arn = "${aws_apigatewayv2_api.lambda.execution_arn}/*/*" 88 | } 89 | -------------------------------------------------------------------------------- /vars.tf: -------------------------------------------------------------------------------- 1 | # variable "aws_account_id" { 2 | # description = "AWS Account ID" 3 | 4 | # type = string 5 | # } 6 | 7 | variable "aws_region" { 8 | description = "AWS region for all resources" 9 | 10 | type = string 11 | default = "us-east-1" 12 | } 13 | 14 | variable "producer_ecr_repo" { 15 | description = "ECR repository name for producer function" 16 | 17 | type = string 18 | } 19 | 20 | variable "consumer_ecr_repo" { 21 | description = "ECR repository name for consumer function" 22 | 23 | type = string 24 | } 25 | 26 | variable "lambda_bucket" { 27 | description = "Bucket for all lambda archives" 28 | 29 | type = string 30 | default = "temp-lambda-archive-bucket" 31 | } 32 | 33 | variable "default_tags" { 34 | type = map 35 | description = "Default tags to apply to all resources" 36 | default = {} 37 | } 38 | 39 | variable "producer_lambda_function_name" { 40 | description = "Name of the producer lambda function" 41 | 42 | type = string 43 | default = "producer-lambda-function" 44 | } 45 | 46 | variable "producer_api_gateway_name" { 47 | description = "Name of the producer api gateway" 48 | 49 | type = string 50 | default = "producer-api-gateway" 51 | } 52 | 53 | variable "producer_lambda_source_path" { 54 | description = "Path to the producer lambda source" 55 | 56 | type = string 57 | default = "./producer_function" 58 | } 59 | 60 | variable "producer_lambda_runtime" { 61 | description = "Runtime for the producer lambda" 62 | 63 | type = string 64 | default = "python3.8" 65 | } 66 | 67 | variable "producer_lambda_handler" { 68 | description = "Handler for the producer lambda" 69 | 70 | type = string 71 | default = "producer_function.lambda_handler" 72 | } 73 | 74 | variable "producer_apigateway_stage_name" { 75 | description = "Name of the API gateway stage for the producer lambda" 76 | 77 | type = string 78 | default = "prod" 79 | } 80 | 81 | variable "producer_invocation_route_key" { 82 | description = "Route key for the producer lambda" 83 | 84 | type = string 85 | default = "POST /queue" 86 | } 87 | 88 | variable "consumer_lambda_function_name" { 89 | description = "Name of the consumer lambda" 90 | 91 | type = string 92 | default = "consumer-lambda-function" 93 | } 94 | 95 | variable "consumer_lambda_runtime" { 96 | description = "Runtime for the consumer lambda" 97 | 98 | type = string 99 | default = "python3.8" 100 | } 101 | 102 | variable "consumer_lambda_handler" { 103 | description = "Handler for the consumer lambda" 104 | 105 | type = string 106 | default = "consumer_function.lambda_handler" 107 | } 108 | 109 | variable "consumer_lambda_source_path" { 110 | description = "Path to the consumer lambda source" 111 | 112 | type = string 113 | default = "./consumer_function" 114 | } 115 | 116 | variable "queue_name" { 117 | description = "Name of the queue" 118 | 119 | type = string 120 | default = "producer-consumer-queue" 121 | } 122 | 123 | variable "consumer_timeout_seconds" { 124 | description = "Timeout for the consumer lambda" 125 | 126 | type = number 127 | default = 900 128 | } -------------------------------------------------------------------------------- /.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/archive" { 5 | version = "2.2.0" 6 | constraints = "~> 2.2.0" 7 | hashes = [ 8 | "h1:2K5LQkuWRS2YN1/YoNaHn9MAzjuTX8Gaqy6i8Mbfv8Y=", 9 | "zh:06bd875932288f235c16e2237142b493c2c2b6aba0e82e8c85068332a8d2a29e", 10 | "zh:0c681b481372afcaefddacc7ccdf1d3bb3a0c0d4678a526bc8b02d0c331479bc", 11 | "zh:100fc5b3fc01ea463533d7bbfb01cb7113947a969a4ec12e27f5b2be49884d6c", 12 | "zh:55c0d7ddddbd0a46d57c51fcfa9b91f14eed081a45101dbfc7fd9d2278aa1403", 13 | "zh:73a5dd68379119167934c48afa1101b09abad2deb436cd5c446733e705869d6b", 14 | "zh:841fc4ac6dc3479981330974d44ad2341deada8a5ff9e3b1b4510702dfbdbed9", 15 | "zh:91be62c9b41edb137f7f835491183628d484e9d6efa82fcb75cfa538c92791c5", 16 | "zh:acd5f442bd88d67eb948b18dc2ed421c6c3faee62d3a12200e442bfff0aa7d8b", 17 | "zh:ad5720da5524641ad718a565694821be5f61f68f1c3c5d2cfa24426b8e774bef", 18 | "zh:e63f12ea938520b3f83634fc29da28d92eed5cfbc5cc8ca08281a6a9c36cca65", 19 | "zh:f6542918faa115df46474a36aabb4c3899650bea036b5f8a5e296be6f8f25767", 20 | ] 21 | } 22 | 23 | provider "registry.terraform.io/hashicorp/aws" { 24 | version = "3.48.0" 25 | constraints = "~> 3.48.0" 26 | hashes = [ 27 | "h1:HY/knz06L5OaxmLsuOevFA6PgF7eJKoQSZLlX2IqubU=", 28 | "zh:1de9f52bc5c254fc021a4fdb285fca5cf7665e9eda890ac24aa7af8469654cc9", 29 | "zh:2faf10c36dfaf6a97fb6a4c877ae9be61cb6bb81ee666f3455f156116b20c7a4", 30 | "zh:3dad064853e24c0854c3e47c67f9e77c11319e52f0f3525a2583db13a272af6d", 31 | "zh:56e9363542b5c745110b83f3904524669bb801f62db928d42860202ba3f48b51", 32 | "zh:591088a86f9c9826d1b6918964386ca04b3d4e521efea9a5f00152d134162664", 33 | "zh:baf5afc4b38b4bc9010123b1251fd7af8b68828fda22b67bd4e4f631bd19671a", 34 | "zh:d3963400ef625433ea7d1fbabb564ac0aafaabb67c138f4a2954f05813f4cbd5", 35 | "zh:d67856355bc746924bbdfcd4709afe9ebf8ccd5092fb10d6cb5fa1f19e6c2f43", 36 | "zh:db99519a33a12b4f5965bf7127ed92759242e694b5ab1680aab6345d0102a200", 37 | "zh:e44aea91718a15b96d903f60ac8cf5f913c6828c860035993c9bd1b872d84159", 38 | "zh:fc25605dafb055d9138d90a837f5ae9ee96bcd4f8a2a89c4180573f6ea8fda66", 39 | ] 40 | } 41 | 42 | provider "registry.terraform.io/kreuzwerker/docker" { 43 | version = "2.15.0" 44 | constraints = ">= 2.8.0" 45 | hashes = [ 46 | "h1:n8oH2KcSW5r6XnG15fXo2fknsKY6zbWFpzvFSTh7jqw=", 47 | "zh:0241e5c7b66c14aa54e367dfe380fbde8388d3254cbe8a70717c12f71897e82b", 48 | "zh:0f162f0a01ffe9eec32e78dfc2a5b05a373230c41f041439efa3f4b68903fdcb", 49 | "zh:1c222c1425fbb0367154bcb8e4d87b19d6eae133fbb341f73102fa3b300f34bf", 50 | "zh:679206433e31d8fa69d95167af6d2cd762218e89811b735ee20bd8da19f97854", 51 | "zh:a16baab045bc7a709a9767816304cc5548aa2ee0b72c0eee49e3826e6a46a3fd", 52 | "zh:a29c4e304a6a7faf5b651a61a91a1aa2e837591cff049fbe1c747b6319e43956", 53 | "zh:bed7a69dbb9a024aecfac840ca2ac2f1527062f3d9c8c718e4e81d464b5ad329", 54 | "zh:c22aa10806de216b6aa0b36a2c1114a9fdaa5b47925aaad3333de3ce24cc52c9", 55 | "zh:d034295663d8a39035fd6fdf0488b72544b13f48acaee797af967343248676f8", 56 | "zh:d9001dfeac0db1799d8ab9d04be090522529baae0dba7f7e82b71f2168f05582", 57 | "zh:d9f3eb7ef8f256eb9148d72bd4a00e34f3be5570484598e26b603cbdc5eed923", 58 | "zh:ef573f1e0f000127fa9f569c8ee320f03ba1d302a6fbf9aac8300efa6fc6f4b2", 59 | "zh:ef7e246b4205202161b4119c10a1282f74243e029508184132731f9f6f6d9f4c", 60 | ] 61 | } 62 | -------------------------------------------------------------------------------- /blender-lambda-producer/producer_function.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | import json 4 | import os 5 | import subprocess 6 | import boto3 7 | import re 8 | import logging 9 | import sys 10 | 11 | # from https://gist.github.com/niranjv/fb95e716151642e8ca553b0e38dd152e 12 | logger = logging.getLogger() 13 | for h in logger.handlers: 14 | logger.removeHandler(h) 15 | h = logging.StreamHandler(sys.stdout) 16 | FORMAT = '[%(levelname)s] %(message)s' 17 | h.setFormatter(logging.Formatter(FORMAT)) 18 | logger.addHandler(h) 19 | logger.setLevel(logging.INFO) 20 | 21 | QUEUE_NAME = os.environ['QUEUE_NAME'] 22 | S3_BUCKET_NAME = os.environ['S3_BUCKET_NAME'] 23 | LOCAL_RENDER_FILE = '/tmp/render_file.blend' 24 | 25 | s3_bucket = boto3.resource('s3').Bucket(S3_BUCKET_NAME) 26 | sqs_queue = boto3.resource('sqs').get_queue_by_name(QueueName=QUEUE_NAME) 27 | 28 | def handler(event, context): 29 | logger.info('Starting producer lambda function') 30 | 31 | try: 32 | render_request = json.loads(event['body']) 33 | assert_request_is_valid(render_request) 34 | except Exception as exception: 35 | return get_response(status_code=400, body={ 'error': str(exception) }) 36 | 37 | try: 38 | file_name = render_request['file_name'] 39 | retrieve_file_from_s3(file_name) 40 | 41 | support_files = render_request['support_files'] if 'support_files' in render_request else [] 42 | for support_file in support_files: 43 | check_s3_file_exists(support_file) 44 | 45 | (frame_start, frame_end) = get_frame_range(render_request) 46 | 47 | queue_render_jobs(file_name, frame_start, frame_end, support_files) 48 | 49 | logger.info(f'Finished producing lambda function') 50 | 51 | return get_response(body={ 52 | 'file_name': file_name, 53 | 'jobs_queued': frame_end - frame_start + 1 54 | }) 55 | except Exception as exception: 56 | logger.exception(exception) 57 | return get_response(status_code=500, body={ 'error': str(exception) }) 58 | 59 | 60 | def get_frame_range(render_request: dict) -> Tuple[int, int]: 61 | if 'frame_start' in render_request and 'frame_end' in render_request: 62 | return (int(render_request['frame_start']), int(render_request['frame_end'])) 63 | 64 | logger.info(f'Getting frame range from {LOCAL_RENDER_FILE}') 65 | proc = subprocess.Popen(['blender', '-b', LOCAL_RENDER_FILE, '-P', 'get_frames.py'], stdout=subprocess.PIPE) 66 | (out, err) = proc.communicate() 67 | logger.debug(f'get_frames output: {out}') 68 | logger.debug(f'get_frames error: {err}') 69 | 70 | matches = re.findall(r'Frame range: (\d+-\d+)', out.decode('utf-8')) 71 | if len(matches) == 0: 72 | raise Exception('No frame range found in file, output found:' + out.decode('utf-8')) 73 | (file_frame_start, file_frame_end) = matches[0].split('-') 74 | 75 | if (not file_frame_start or not file_frame_end): 76 | raise Exception(f'Failed to get frame range from file.') 77 | 78 | return (int(file_frame_start), int(file_frame_end)) 79 | 80 | 81 | def queue_render_jobs(file_name, frame_start, frame_end, support_files): 82 | for frame in range(frame_start, frame_end + 1): 83 | message = json.dumps({ 84 | 'file_name': file_name, 85 | 'frame': frame, 86 | 'support_files': support_files 87 | }) 88 | logger.debug('Sending message to queue: ' + message) 89 | sqs_queue.send_message(MessageBody=message) 90 | 91 | 92 | def check_s3_file_exists(file_name): 93 | try: 94 | s3_bucket.Object(file_name).load() 95 | except Exception as exception: 96 | raise Exception(f'File {file_name} does not exist in S3 bucket {S3_BUCKET_NAME}') 97 | logger.info(f'File {file_name} exists in S3 bucket {S3_BUCKET_NAME}') 98 | 99 | 100 | def retrieve_file_from_s3(file_name): 101 | logger.info(f'Retrieving file {file_name} from S3 bucket {S3_BUCKET_NAME} to {LOCAL_RENDER_FILE}') 102 | s3_bucket.download_file(file_name, LOCAL_RENDER_FILE) 103 | 104 | 105 | def assert_request_is_valid(render_request: dict) -> None: 106 | logger.debug(f'Validating request {render_request}') 107 | 108 | if not isinstance(render_request, dict): 109 | raise TypeError(f"data is not valid json, instead is {str(type(render_request))}") 110 | 111 | if 'file_name' not in render_request: 112 | raise TypeError("'file_name' parameter is missing") 113 | 114 | if not isinstance(render_request['file_name'], str): 115 | raise TypeError("'file_name' must be a string") 116 | 117 | if 'frame_start' in render_request: 118 | if not isinstance(render_request['frame_start'], int): 119 | raise TypeError('frame_start must be an integer') 120 | 121 | if 'frame_end' in render_request: 122 | if not isinstance(render_request['frame_end'], int): 123 | raise TypeError('frame_end must be an integer') 124 | 125 | if 'frame_start' in render_request and 'frame_end' in render_request and render_request['frame_start'] > render_request['frame_end']: 126 | raise ValueError('frame_start must be less than or equal to frame_end') 127 | 128 | logger.debug('Request is valid.') 129 | 130 | 131 | def get_response(status_code = 200, body = {}, headers = { 'Content-Type': 'application/json' }): 132 | return { 133 | 'statusCode': status_code, 134 | 'body': json.dumps(body), 135 | 'headers': headers 136 | } 137 | --------------------------------------------------------------------------------