├── .cargo └── config.toml ├── .gitignore ├── .rustfmt.toml ├── Cargo.toml ├── LICENSE ├── README.md ├── deploy-proxy.sh ├── env-vars-aws.sh ├── env-vars-lambda.sh ├── img ├── emulator-launch.png ├── lambda-debugger-components.png ├── lambda-debugger-usecase.png └── sqs-queues.png ├── lambda-debugger ├── Cargo.toml ├── env-vars-emulator.sh ├── src │ ├── config.rs │ ├── handlers │ │ ├── lambda_error.rs │ │ ├── lambda_response.rs │ │ ├── mod.rs │ │ └── next_invocation.rs │ ├── main.rs │ ├── sqs.rs │ └── types.rs └── test-payload.json ├── proxy-lambda ├── Cargo.toml └── src │ └── main.rs └── test-lambda ├── Cargo.toml └── src └── main.rs /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [target.aarch64-unknown-linux-gnu] 2 | linker = "aarch64-linux-gnu-gcc" 3 | 4 | [target.aarch64-unknown-linux-musl] 5 | linker = "aarch64-linux-gnu-gcc" -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | *.zip 4 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2021" 2 | # https://github.com/rust-lang/rustfmt/blob/master/Configurations.md#merge_imports 3 | # imports_granularity = "Crate" 4 | # https://github.com/rust-lang/rustfmt/blob/master/Configurations.md#max_width 5 | max_width = 120 -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = ["proxy-lambda", "lambda-debugger", "test-lambda"] 3 | 4 | resolver = "2" 5 | 6 | [workspace.dependencies] 7 | lambda_runtime = { version = "0.12", default-features = false } 8 | tokio = { version = "1.14" } 9 | serde = { version = "1", features = ["derive"] } 10 | serde_json = "1" 11 | tracing = { version = "0.1", features = ["log"] } 12 | tracing-subscriber = { version = "0.3", features = ["env-filter"] } 13 | 14 | [profile.release] 15 | lto = true 16 | codegen-units = 1 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [2021] [Max Voskob] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AWS Lambda Runtime Emulator for Local and Remote Debugging 2 | 3 | This emulator allows running Lambda functions locally with either a local payload from a file or a remote payload from AWS as if the local lambda was running there. 4 | 5 | ## Installation and usage 6 | 7 | ```bash 8 | cargo install lambda-debugger 9 | ``` 10 | 11 | ### Debugging with local payload 12 | 13 | Use this method for simple use cases where a single static payload is sufficient. 14 | 15 | 1. Save your payload into a file, e.g. save `{"command": "echo"}` into `test-payload.json` file 16 | 2. Start the emulator with the payload file name as its only param, e.g. `cargo lambda-debugger test-payload.json` 17 | 3. Add env vars printed by the emulator and start your lambda with `cargo run` in a separate terminal 18 | 19 | The lambda will connect to the emulator and receive the payload. 20 | You can re-run your lambda with the same payload as many times as needed. 21 | 22 | ## Debugging with remote payload 23 | 24 | Use this method to get dynamic payload from other AWS services or when you need to send back a dynamic response, e.g. to process a request triggered by a user action on a website involving API Gateway as in the following diagram: 25 | 26 | ![function needed debugging](./img/lambda-debugger-usecase.png) 27 | 28 | __Remote debugging configuration__ 29 | 30 | This project provides the tools necessary to bring the AWS payload to your local machine in real-time, run the lambda and send back the response as if the local lambda was running on AWS. 31 | 32 | - _proxy-lambda_ forwards Lambda requests and responses between AWS and your development machine in real time 33 | - _lambda-debugger_ provides Lambda APIs to run a lambda function locally and exchange payloads with AWS 34 | 35 | ![function debugged locally](./img/lambda-debugger-components.png) 36 | 37 | ### Limitations 38 | 39 | This Lambda emulator does not provide the full runtime capabilities of AWS: 40 | 41 | * no environment constraints, e.g. memory or execution time 42 | * panics are not reported back to AWS 43 | * no concurrent request handling 44 | * no support for X-Trace or Extensions APIs 45 | * no stream responses 46 | * smaller maximum payload size 47 | 48 | ## Getting started with remote debugging 49 | 50 | - create _request_ and _response_ queues in SQS with IAM permissions 51 | - deploy _proxy-lambda_ in place of the function you need to debug 52 | - run the emulator locally as a binary or with `cargo run` 53 | - run your lambda locally with `cargo run` 54 | 55 | ### SQS configuration 56 | 57 | Create two SQS queues with an identical configuration: 58 | 59 | - `proxy_lambda_req` for requests to be sent from AWS to your local lambda under debugging. ___Required___. 60 | - `proxy_lambda_resp` if you want responses from your local lambda to be returned to the caller. _Optional_. 61 | 62 | See _Advanced setup_ section for more info on how to customize queue names and other settings. 63 | 64 | Recommended queue settings: 65 | 66 | - **Queue type**: Standard 67 | - **Maximum message size**: 256 KB 68 | - **Default visibility timeout**: 10 Seconds 69 | - **Message retention period**: 1 Hour 70 | - **Receive message wait time**: 20 Seconds 71 | 72 | This IAM policy grants _proxy-lambda_ access to the queues. 73 | It assumes that you already have sufficient privileges to access Lambda and SQS from your local machine. 74 | 75 | ```json 76 | { 77 | "Version": "2012-10-17", 78 | "Statement": [ 79 | { 80 | "Effect": "Allow", 81 | "Principal": { 82 | "AWS": "arn:aws:iam::512295225992:role/lambda_basic" 83 | }, 84 | "Action": [ 85 | "sqs:DeleteMessage", 86 | "sqs:GetQueueAttributes", 87 | "sqs:ReceiveMessage", 88 | "sqs:SendMessage" 89 | ], 90 | "Resource": "arn:aws:sqs:us-east-1:512295225992:proxy_lambda_req" 91 | } 92 | ] 93 | } 94 | ``` 95 | #### Modifying the policy with your IDs 96 | 97 | You need to replace _Principal_ and _Resource_ IDs with your values before adding the above policy to your queues: 98 | 99 | - _Principal_ - is the IAM role your lambda assumes (check Lambda's Permissions Config tab in the AWS console to find the value) 100 | - _Resource_ - the ARN of the queue the policy is attached to (see the queue details page in the AWS console to find the value) 101 | 102 | Use different _Resource_ values for _request_ and _response_ queues: 103 | 104 | - `arn:aws:sqs:[your_region]:[your_aws_account]:proxy_lambda_req` for `proxy_lambda_req` queue 105 | - `arn:aws:sqs:[your_region]:[your_aws_account]:proxy_lambda_resp` for `proxy_lambda_resp` queue 106 | 107 | 108 | ### Building and deploying _proxy-lambda_ 109 | 110 | The _proxy-lambda_ function should be deployed to AWS in place of the function you want to debug. 111 | 112 | Replace the following parts of the bash script below with your values before running it from the project root: 113 | - _target_ - the architecture of the lambda function on AWS, e.g. `x86_64-unknown-linux-gnu` 114 | - _region_ - the region of the lambda function, e.g. `us-east-1` 115 | - _name_ - the name of the lambda function you want to replace with the proxy, e.g. `my-lambda` 116 | 117 | ``` 118 | target=x86_64-unknown-linux-gnu 119 | region=us-east-1 120 | name=my-lambda 121 | 122 | cargo build --release --target $target 123 | cp ./target/$target/release/proxy-lambda ./bootstrap && zip proxy.zip bootstrap && rm bootstrap 124 | aws lambda update-function-code --region $region --function-name $name --zip-file fileb://proxy.zip 125 | ``` 126 | 127 | A deployed _proxy-lambda_ should return _OK_ or time out waiting for a response if you run it with a test event from the AWS console. 128 | Check CloudWatch logs for a detailed execution report. 129 | 130 | ### Debugging 131 | 132 | __Pre-requisites:__ 133 | - _proxy-lambda_ was deployed to AWS 134 | - SQS queues were created with the recommended access policies 135 | 136 | ![list of sqs queues](/img/sqs-queues.png) 137 | 138 | __Launching the local lambda:__ 139 | - run `cargo lambda-debugger` in a separate terminal 140 | - add environmental variables from the prompt printed by the emulator 141 | - start your lambda with `cargo run` in the same terminal where you added the env vars 142 | 143 | ![launch example](/img/emulator-launch.png) 144 | 145 | __Debugging:__ 146 | - trigger the event on AWS as part of your normal data flow, e.g. by a user action on a webpage 147 | - the emulator should display the lambda payload and forward it to your local lambda for processing 148 | - debug as needed 149 | 150 | __Success, failure and replay:__ 151 | 152 | - successful responses are sent back to the caller if the response queue is configured (`proxy_lambda_resp`) 153 | - panics or handler errors are not sent back to AWS 154 | - the same incoming SQS message is reused until the lambda completes successfully 155 | - _lambda-debugger_ deletes the request message from `proxy_lambda_req` queue when the local lambda completes successfully 156 | - _proxy-lambda_ deletes the response message from `proxy_lambda_resp` queue after forwarding it to the caller, e.g. to API Gateway 157 | - _proxy-lambda_ purges `proxy_lambda_resp` queue before sending a new request to `proxy_lambda_resp` 158 | - you have to purge `proxy_lambda_req` queue manually to delete stale requests 159 | 160 | If the local lambda fails, terminates or panics, you can make changes to its code and run it again to reuse the same incoming payload from the request queue. 161 | 162 | 163 | ## Advanced remote debugging setup 164 | 165 | ### Custom SQS queue names 166 | 167 | By default, _proxy-lambda_ and the local _lambda-debugger_ attempt to connect to `proxy_lambda_req` and `proxy_lambda_resp` queues in the same region. 168 | 169 | Provide these env vars to _proxy-lambda_ and _lambda-debugger_ if your queue names differ from the defaults: 170 | - `PROXY_LAMBDA_REQ_QUEUE_URL` - _request_ queue, e.g. https://sqs.us-east-1.amazonaws.com/512295225992/debug_request 171 | - `PROXY_LAMBDA_RESP_QUEUE_URL` - _response_ queue, e.g. https://sqs.us-east-1.amazonaws.com/512295225992/debug_response 172 | 173 | ### Late responses 174 | 175 | Debugging the local lambda may take longer than the AWS service is willing to wait. 176 | For example, _proxy-lambda_ function can be configured to wait for up to 15 minutes, but the AWS API Gateway wait time is limited to 30 seconds. 177 | 178 | Assume that it took you 5 minutes to fix the lambda code and return the correct response. 179 | If _proxy-lambda_ was configured to wait for that long it would still forward the response to the API Gateway which timed out 4.5 min earlier. 180 | In that case, you may need to trigger another request for it to complete successfully end-to-end. 181 | 182 | ### Not waiting for responses from local lambda 183 | 184 | It may be inefficient to have _proxy-lambda_ waiting for a response from the local lambda because it takes too long or no response is necessary. 185 | Both _proxy-lambda_ and _lambda-debugger_ would not expect a response if the response queue is inaccessible. 186 | 187 | Option 1: delete _proxy_lambda_resp_ queue 188 | 189 | Option 2: add `PROXY_LAMBDA_RESP_QUEUE_URL` env var with no value to _proxy-lambda_ and _lambda-debugger_ 190 | 191 | Option 3: make _proxy_lambda_resp_ queue inaccessible by changing its IAM policy. 192 | E.g. change the resource name from the correct queue name `"Resource": "arn:aws:sqs:us-east-1:512295225992:proxy_lambda_resp"` to a non-existent name like this `"Resource": "arn:aws:sqs:us-east-1:512295225992:proxy_lambda_resp_BLOCKED"`. 193 | Both _proxy-lambda_ and _lambda-debugger_ treat the access error as a hint to not expect a response. 194 | 195 | ### Canceling long _proxy-lambda_ wait 196 | 197 | If your _proxy-lambda_ is configured to expect a long debugging time, e.g. 30 minutes, you may want to cancel the wait for a rerun. 198 | Since it is impossible to kill a running lambda instance on AWS, the easiest way to cancel the wait is to send a random message to `proxy_lambda_resp` queue via the AWS console. 199 | The waiting _proxy-lambda_ will forward it to the caller and become available for a new request. 200 | 201 | ### Large payloads and data compression 202 | 203 | The size of the SQS payload is [limited to 262,144 bytes by SQS](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/quotas-messages.html) while [Lambda allows up to 6MB](https://docs.aws.amazon.com/lambda/latest/dg/gettingstarted-limits.html). 204 | _proxy-lambda_ and _lambda-debugger_ compress oversized payloads using [flate2 crate](https://crates.io/crates/flate2) and send them as an encoded Base58 string to get around that limitation. 205 | 206 | The data compression can take up to a minute in debug mode. It is significantly faster with release builds. 207 | 208 | ### Logging 209 | 210 | Both _proxy-lambda_ and _lambda-debugger_ use `RUST_LOG` env var to set the logging level and filters. 211 | If `RUST_LOG` is not present or is empty, both crates log at the _INFO_ level and suppress logging from their dependencies. 212 | See [https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#example-syntax] for more info. 213 | 214 | Examples of `RUST_LOG` values: 215 | - `error` - log errors only from all crates and dependencies 216 | - `warn,lambda_debugger=info` - _INFO_ level for the _lambda-debugger_, _WARN_ level for everything else 217 | - `proxy=debug` - detailed logging in _proxy-lambda_ -------------------------------------------------------------------------------- /deploy-proxy.sh: -------------------------------------------------------------------------------- 1 | target=x86_64-unknown-linux-gnu 2 | region=us-east-1 3 | lambda=my-lambda 4 | crate=proxy-lambda 5 | 6 | cargo build --release --target $target 7 | cp ./target/$target/release/$crate ./bootstrap && zip proxy.zip bootstrap && rm bootstrap 8 | aws lambda update-function-code --region $region --function-name $lambda --zip-file fileb://proxy.zip 9 | 10 | # Available targets: 11 | # x86_64-unknown-linux-gnu 12 | # x86_64-unknown-linux-musl 13 | # aarch64-unknown-linux-gnu 14 | # aarch64-unknown-linux-musl -------------------------------------------------------------------------------- /env-vars-aws.sh: -------------------------------------------------------------------------------- 1 | # This file is a sample list of all environmental variables exported from AWS Lambda. 2 | # 3 | # To generate your own list: 4 | # 1. deploy proxy-lambda function to AWS 5 | # 2. run a test event in the console 6 | # 3. copy your list of environmental variables from the CloudWatch log 7 | # 8 | # This list contains more variables than what is returned by the AWS CLI when you deploy the function. 9 | # See env-lambda.sh for the subset of vars required to run the runtime emulator. 10 | 11 | export AWS_DEFAULT_REGION=us-east-1 12 | export AWS_LAMBDA_FUNCTION_MEMORY_SIZE=128 13 | export AWS_LAMBDA_FUNCTION_NAME=lambda-debug-proxy 14 | export AWS_LAMBDA_FUNCTION_VERSION=$LATEST 15 | export AWS_LAMBDA_INITIALIZATION_TYPE=on-demand 16 | export AWS_LAMBDA_LOG_FORMAT=Text 17 | export AWS_LAMBDA_LOG_GROUP_NAME=/aws/lambda/lambda-debug-proxy 18 | export AWS_LAMBDA_LOG_STREAM_NAME=2024/06/04/lambda-debug-proxy[$LATEST]c026cde3732340049aff322b9cc3c19b 19 | export AWS_LAMBDA_RUNTIME_API=127.0.0.1:9001 # This is the default value. Change it only if it conflicts with your local setup 20 | export AWS_REGION=us-east-1 21 | export AWS_XRAY_CONTEXT_MISSING=LOG_ERROR 22 | export AWS_XRAY_DAEMON_ADDRESS=169.254.79.129:2000 23 | export PROXY_LAMBDA_REQ_QUEUE_URL=https://sqs.us-east-1.amazonaws.com/512295225992/proxy_lambda_req # Replace with your own queue URL (the a/c number will be different!) 24 | export PROXY_LAMBDA_RESP_QUEUE_URL=https://sqs.us-east-1.amazonaws.com/512295225992/proxy_lambda_resp # Replace with your own queue URL (the a/c number will be different!) 25 | export LAMBDA_RUNTIME_DIR=/var/runtime 26 | export LAMBDA_TASK_ROOT=/var/task 27 | export LANG=en_US.UTF-8 28 | export LD_LIBRARY_PATH=/lib64:/usr/lib64:/var/runtime:/var/runtime/lib:/var/task:/var/task/lib:/opt/lib 29 | export PATH=/usr/local/bin:/usr/bin/:/bin:/opt/bin 30 | export TZ=:UTC 31 | export _AWS_XRAY_DAEMON_ADDRESS=169.254.79.129 32 | export _AWS_XRAY_DAEMON_PORT=2000 33 | export _HANDLER=hello.handler -------------------------------------------------------------------------------- /env-vars-lambda.sh: -------------------------------------------------------------------------------- 1 | # These env vars are required by the lambda function you are testing locally 2 | 3 | # Run this script in the same terminal window as the lambda to set the vars before its launch 4 | # or set them globally by running this script at startup, e.g. in .bashrc. 5 | 6 | # Replace the values, if needed 7 | export AWS_LAMBDA_FUNCTION_VERSION=$LATEST 8 | export AWS_LAMBDA_FUNCTION_MEMORY_SIZE=128 9 | export AWS_LAMBDA_FUNCTION_NAME=my-lambda 10 | 11 | # Leave the AWS default (127.0.0.1:9001) unless it conflicts with your local setup. 12 | # It tells the lambda function where the runtime emulator is. 13 | # This value must match the value in env-emulator.sh file. 14 | export AWS_LAMBDA_RUNTIME_API=127.0.0.1:9001 -------------------------------------------------------------------------------- /img/emulator-launch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rimutaka/lambda-debugger-runtime-emulator/96c1db3d2202fe8c9c343a7302e1b72b18aecbaf/img/emulator-launch.png -------------------------------------------------------------------------------- /img/lambda-debugger-components.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rimutaka/lambda-debugger-runtime-emulator/96c1db3d2202fe8c9c343a7302e1b72b18aecbaf/img/lambda-debugger-components.png -------------------------------------------------------------------------------- /img/lambda-debugger-usecase.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rimutaka/lambda-debugger-runtime-emulator/96c1db3d2202fe8c9c343a7302e1b72b18aecbaf/img/lambda-debugger-usecase.png -------------------------------------------------------------------------------- /img/sqs-queues.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rimutaka/lambda-debugger-runtime-emulator/96c1db3d2202fe8c9c343a7302e1b72b18aecbaf/img/sqs-queues.png -------------------------------------------------------------------------------- /lambda-debugger/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lambda-debugger" 3 | version = "0.2.1" 4 | authors = ["rimutaka "] 5 | edition = "2021" 6 | description = "AWS Lambda Runtime Emulator for local and remote debugging" 7 | license = "Apache-2.0" 8 | repository = "https://github.com/rimutaka/lambda-debugger-runtime-emulator" 9 | categories = ["web-programming::http-server"] 10 | keywords = ["AWS", "Lambda", "API"] 11 | readme = "../README.md" 12 | 13 | [lib] 14 | # the lib is needed to export types for other crates 15 | name = "runtime_emulator_types" 16 | path = "src/types.rs" 17 | 18 | [[bin]] 19 | name = "cargo-lambda-debugger" # this name has to have cargo- prefix for cargo to recognize it 20 | path = "src/main.rs" 21 | 22 | [dependencies] 23 | tokio = { version = "1.16", features = [ 24 | "macros", 25 | "io-util", 26 | "sync", 27 | "rt-multi-thread", 28 | ] } 29 | 30 | serde.workspace = true 31 | serde_json.workspace = true 32 | tracing.workspace = true 33 | tracing-subscriber.workspace = true 34 | lambda_runtime.workspace = true 35 | flate2 = "1.0" 36 | bs58 = "0.5" 37 | aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } 38 | aws-sdk-sqs = "1.27" 39 | aws-types = "1.3" 40 | hyper = { version = "1", features = ["full"] } 41 | http-body-util = "0.1" 42 | hyper-util = { version = "0.1", features = ["full"] } 43 | hex = "0.4.3" 44 | uuid = { version = "1.8", features = ["v4", "fast-rng", "macro-diagnostics"] } 45 | regex = "1.10.5" 46 | lazy_static = "1.5.0" 47 | async_once = "0.2.6" 48 | -------------------------------------------------------------------------------- /lambda-debugger/env-vars-emulator.sh: -------------------------------------------------------------------------------- 1 | # Run this script in the terminal windows for the runtime emulator 2 | # if you need to provide custom values for queue URLs or the runtime API address. 3 | # Set them globally by running it at the system startup, e.g. in .bashrc for repeated use. 4 | 5 | # These queues is how the emulator communicates with the proxy lambda on AWS 6 | # Replace with your own queue URLs if they are different from the default 7 | export PROXY_LAMBDA_REQ_QUEUE_URL=https://sqs.us-east-1.amazonaws.com/[your_aws_account]/proxy_lambda_req, 8 | # The response queue can be omitted if you don't need to forward responses to the caller 9 | export PROXY_LAMBDA_RESP_QUEUE_URL=https://sqs.us-east-1.amazonaws.com/[your_aws_account]/proxy_lambda_resp 10 | 11 | # Leave the AWS default (127.0.0.1:9001) unless you have to change it 12 | # It tells the lambda function where the runtime emulator is 13 | export AWS_LAMBDA_RUNTIME_API=127.0.0.1:9001 14 | 15 | -------------------------------------------------------------------------------- /lambda-debugger/src/config.rs: -------------------------------------------------------------------------------- 1 | use crate::sqs::get_default_queues; 2 | use core::net::SocketAddrV4; 3 | use std::env::{args, var}; 4 | use std::net::Ipv4Addr; 5 | use std::str::FromStr; 6 | use tracing::{debug, info, warn}; 7 | 8 | const REQUIRED_ENV_VARS: &str = "export AWS_LAMBDA_FUNCTION_VERSION=$LATEST && export AWS_LAMBDA_FUNCTION_MEMORY_SIZE=128 && export AWS_LAMBDA_FUNCTION_NAME=my-lambda && export AWS_LAMBDA_RUNTIME_API=127.0.0.1:9001"; 9 | 10 | /// Payloads come from a local file, responses are not sent anywhere 11 | pub(crate) struct LocalConfig { 12 | /// Decoded payload from the local file. Can be anything as long as it's UTF-8 13 | pub payload: String, 14 | /// File name from which the payload was read, as provided in the param 15 | pub file_name: String, 16 | } 17 | 18 | /// Payloads come from SQS and may be sent back to SQS 19 | pub(crate) struct RemoteConfig { 20 | /// E.g. https://sqs.us-east-1.amazonaws.com/512295225992/proxy_lambda-req 21 | pub request_queue_url: String, 22 | /// E.g. https://sqs.us-east-1.amazonaws.com/512295225992/proxy-lambda-resp. 23 | /// No response is set if this property is None. 24 | pub response_queue_url: Option, 25 | } 26 | 27 | /// A concrete type for either remote or local source of payloads 28 | pub(crate) enum PayloadSources { 29 | Local(LocalConfig), 30 | Remote(RemoteConfig), 31 | } 32 | 33 | pub(crate) struct Config { 34 | /// E.g. 127.0.0.1:9001 35 | pub lambda_api_listener: SocketAddrV4, 36 | /// Source and destination of request and response payloads 37 | pub sources: PayloadSources, 38 | } 39 | 40 | impl Config { 41 | /// Creates a new Config instance from environment variables and defaults. 42 | /// Uses default values where possible. 43 | /// Panics if the required environment variables are not set. 44 | pub async fn from_env() -> Self { 45 | // 127.0.0.1:9001 is the default endpoint used on AWS 46 | let listener_ip_str = var("AWS_LAMBDA_RUNTIME_API").unwrap_or_else(|_e| "127.0.0.1:9001".to_string()); 47 | 48 | let lambda_api_listener = match listener_ip_str.split_once(':') { 49 | Some((ip, port)) => { 50 | let listener_ip = std::net::Ipv4Addr::from_str(ip).expect( 51 | "Invalid IP address in AWS_LAMBDA_RUNTIME_API env var. Must be a valid IP4, e.g. 127.0.0.1", 52 | ); 53 | let listener_port = port.parse::().expect( 54 | "Invalid port number in AWS_LAMBDA_RUNTIME_API env var. Must be a valid port number, e.g. 9001", 55 | ); 56 | SocketAddrV4::new(listener_ip, listener_port) 57 | } 58 | None => SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 9001), 59 | }; 60 | 61 | // attempt to extract payload from a local file if the file name is provided in the command line arguments 62 | // alternatively try to find remote queues 63 | // exit if no sources are set 64 | let sources = match get_local_payload() { 65 | Some(local_config) => { 66 | info!( 67 | "Listening on http://{}\n- payload from: {}\n", 68 | lambda_api_listener, local_config.file_name 69 | ); 70 | 71 | PayloadSources::Local(local_config) 72 | } 73 | None => match get_queues().await { 74 | Some(remote_config) => { 75 | info!( 76 | "Listening on http://{}\n- request queue: {}\n- response queue: {}\n", 77 | lambda_api_listener, 78 | remote_config.request_queue_url, 79 | remote_config.response_queue_url.clone().unwrap_or_else(String::new), 80 | ); 81 | 82 | PayloadSources::Remote(remote_config) 83 | } 84 | None => { 85 | panic!("No payload source is set.\nAdd payload file name as a param for local debugging or create request / response queues for remote debugging.\nSee ReadMe for more info."); 86 | } 87 | }, 88 | }; 89 | warn!("Add required env vars and start the lambda:\n{}\n", REQUIRED_ENV_VARS); 90 | 91 | Self { 92 | lambda_api_listener, 93 | sources, 94 | } 95 | } 96 | 97 | /// A shortcut for unwrapping the remote config. 98 | /// Panics if the config is not RemoteConfig. 99 | pub(crate) fn remote_config(&self) -> &RemoteConfig { 100 | // get the request queue URL from deep inside the config 101 | match &self.sources { 102 | PayloadSources::Remote(remote_config) => remote_config, 103 | _ => panic!("Invalid config: expected RemoteConfig. It's a bug."), 104 | } 105 | } 106 | } 107 | 108 | /// Returns URLs of the request and response queues, if they exist. 109 | /// Reads values from the environment variables or uses the defaults. 110 | /// Does not panic. 111 | async fn get_queues() -> Option { 112 | // queue names from env vars have higher priority than the defaults 113 | let request_queue_url = var("PROXY_LAMBDA_REQ_QUEUE_URL").ok(); 114 | let response_queue_url = var("LAMBDA_PROXY_RESP_QUEUE_URL").ok(); 115 | 116 | // only get the default queue names if the env vars are not set because the call is expensive (SQS List Queues) 117 | let (default_req_queue, default_resp_queue) = if request_queue_url.is_none() || response_queue_url.is_none() { 118 | get_default_queues().await 119 | } else { 120 | (None, None) 121 | }; 122 | 123 | // choose between default and env var queues for request - at least one is required 124 | let request_queue_url = match request_queue_url { 125 | Some(v) => v, 126 | None => match default_req_queue { 127 | Some(v) => v, 128 | None => { 129 | return None; 130 | } 131 | }, 132 | }; 133 | 134 | // the response queue is optional 135 | let response_queue_url = match response_queue_url { 136 | Some(v) => Some(v), 137 | None => default_resp_queue, // this may also be None 138 | }; 139 | 140 | Some(RemoteConfig { 141 | request_queue_url, 142 | response_queue_url, 143 | }) 144 | } 145 | 146 | /// Extracts the payload from a local file if the file name is provided in the command line arguments. 147 | /// Panics if the payload cannot be read. 148 | fn get_local_payload() -> Option { 149 | // the number of arguments depends on if this is a cargo command or a standalone executable 150 | // calculate where the params of the command are located inside the argument collection 151 | let param_idx = args().next().map_or_else( 152 | || 0, // this an impossible scenario because the very first argument is always the name of the executable 153 | |v| { 154 | if v.ends_with( 155 | &args() 156 | .nth(1) 157 | .map_or_else(|| "###".to_string(), |v| format!("cargo-{v}")), 158 | ) { 159 | 2 // invoked as a cargo command: `/home/mx/.cargo/bin/cargo-lambda-debugger lambda-debugger` 160 | } else { 161 | 1 // invoked as a standalone binary: `/home/mx/projects/gh-forks/lambda-runtime-emulator/target/debug/cargo-lambda-debugger` 162 | } 163 | }, 164 | ); 165 | debug!( 166 | "Param: {param_idx}, args: {}", 167 | std::env::args().collect::>().join(" ") 168 | ); 169 | 170 | // attempt to extract payload from a local file if the file name is provided in the command line arguments 171 | if let Some(payload_file) = args().nth(param_idx) { 172 | // cargo help lambda-debugger is equivalent to `/home/mx/.cargo/bin/cargo-lambda-debugger lambda-debugger --help` 173 | if &payload_file == "--help" { 174 | println!("AWS Lambda environment emulator for local and remote debugging."); 175 | println!("1. run `cargo lambda-debugger`"); 176 | println!("2. copy the env vars printed by the emulator"); 177 | println!("3. set the env vars in a separate terminal and start your lambda there with `cargo run`"); 178 | println!(); 179 | println!("With local payload: cargo lambda-debugger [payload_file], e.g. lambda_payload.json"); 180 | println!("With payload from AWS: cargo lambda-debugger"); 181 | println!(); 182 | println!("See https://github.com/rimutaka/lambda-debugger-runtime-emulator for more info."); 183 | 184 | std::process::exit(0); 185 | } 186 | 187 | // read the payload from the file 188 | match std::fs::read_to_string(payload_file.clone()) { 189 | Ok(payload) => Some(LocalConfig { 190 | payload, 191 | file_name: payload_file, 192 | }), 193 | 194 | // there is no point proceeding if the payload cannot be read 195 | Err(e) => { 196 | panic!("Failed to read payload from {}\n{:?}", payload_file, e) 197 | } 198 | } 199 | } else { 200 | None 201 | } 202 | } 203 | -------------------------------------------------------------------------------- /lambda-debugger/src/handlers/lambda_error.rs: -------------------------------------------------------------------------------- 1 | use super::{empty, BLOCK_NEXT_INVOCATION}; 2 | use http_body_util::{combinators::BoxBody, BodyExt}; 3 | use hyper::body::Bytes; 4 | use hyper::Error; 5 | use hyper::{Request, Response}; 6 | use tracing::{debug, error, info}; 7 | 8 | pub(crate) async fn handler(req: Request) -> Response> { 9 | // Initialization error (https://docs.aws.amazon.com/lambda/latest/dg/runtimes-api.html#runtimes-api-initerror) and 10 | // Invocation error (https://docs.aws.amazon.com/lambda/latest/dg/runtimes-api.html#runtimes-api-invokeerror) 11 | // are rolled together into a single handler because it is not clear how to handle errors 12 | // and if the error should be propagated upstream 13 | let resp = match req.into_body().collect().await { 14 | Ok(v) => v.to_bytes(), 15 | Err(e) => panic!("Failed to read lambda response: {:?}", e), 16 | }; 17 | 18 | match String::from_utf8(resp.as_ref().to_vec()) { 19 | Ok(v) => { 20 | info!("Lambda error: {v}"); 21 | } 22 | Err(e) => { 23 | error!( 24 | "Non-UTF-8 error response from Lambda. {:?}\n{}", 25 | e, 26 | hex::encode(resp.as_ref()) 27 | ); 28 | } 29 | } 30 | 31 | // block the next invocation to prevent an infinite loop of reruns 32 | if let Ok(mut w) = BLOCK_NEXT_INVOCATION.write() { 33 | debug!("Blocking the next invocation"); 34 | *w = true; 35 | } else { 36 | error!("Write deadlock on BLOCK_NEXT_INVOCATION. It's a bug"); 37 | } 38 | 39 | // lambda allows for more informative error responses, but this may be enough for now 40 | Response::builder() 41 | .status(hyper::StatusCode::INTERNAL_SERVER_ERROR) 42 | .body(empty()) 43 | .expect("Failed to create a response") 44 | } 45 | -------------------------------------------------------------------------------- /lambda-debugger/src/handlers/lambda_response.rs: -------------------------------------------------------------------------------- 1 | use super::{empty, BLOCK_NEXT_INVOCATION, LOCAL_REQUEST_ID}; 2 | use crate::sqs; 3 | use http_body_util::{combinators::BoxBody, BodyExt}; 4 | use hyper::body::Bytes; 5 | use hyper::Error; 6 | use hyper::Request; 7 | use hyper::Response; 8 | use regex::Regex; 9 | use std::sync::OnceLock; 10 | use tracing::{debug, error, info}; 11 | 12 | /// Contains compiled regex for extracting the receipt handle from the URL. 13 | static RECEIPT_REGEX: OnceLock = OnceLock::new(); 14 | 15 | /// Handles an invocation response the local lambda when it successfully completed processing. 16 | /// We forward the response to the SQS queue where it is picked up by the remote proxy lambda 17 | /// that forwards it to the original caller, e.g. API Gateway. 18 | /// See https://docs.aws.amazon.com/lambda/latest/dg/runtimes-api.html#runtimes-api-response 19 | /// 20 | /// Lambda invocations are async in nature - the lambda picks up an invocation as a response from the runtime, 21 | /// does the processing and then sends another request to the runtime with the invocation/request ID in the URL. 22 | pub(crate) async fn handler(req: Request) -> Response> { 23 | // The regex extracts the receipt handle from the path, e.g. /runtime/invocation/[aws-req-id]/response 24 | // where the request ID in the URL is the receipt handle for SQS - it is not the actual lambda request ID. 25 | // We need to store the receipt handle somewhere and placing it into the request-id param seems like an easy way to do it 26 | // because the local lambda will return it with the response. 27 | // The receipt handle can be a long string with /, - and other non-alphanumeric characters. 28 | 29 | let regex = RECEIPT_REGEX.get_or_init(|| { 30 | Regex::new(r"/runtime/invocation/(.+)/response").expect("Invalid response URL regex. It's a bug.") 31 | }); 32 | let receipt_handle = regex 33 | .captures(req.uri().path()) 34 | .unwrap_or_else(|| panic!("URL parsing regex failed on: {:?}. It' a bug", req.uri())) 35 | .get(1) 36 | .unwrap_or_else(|| { 37 | panic!( 38 | "Request URL does not conform to /runtime/invocation/AwsRequestId/response: {:?}", 39 | req.uri() 40 | ) 41 | }) 42 | .as_str() 43 | .to_owned(); 44 | 45 | // convert the lambda response to bytes 46 | let response = match req.into_body().collect().await { 47 | Ok(v) => v.to_bytes(), 48 | Err(e) => panic!("Failed to read lambda response: {:?}", e), 49 | }; 50 | 51 | let sqs_payload = match String::from_utf8(response.as_ref().to_vec()) { 52 | Ok(v) => v, 53 | Err(e) => { 54 | panic!( 55 | "Non-UTF-8 response from Lambda. {:?}\n{}", 56 | e, 57 | hex::encode(response.as_ref()) 58 | ); 59 | } 60 | }; 61 | 62 | info!("Lambda response: {sqs_payload}"); 63 | 64 | // only send responses back to SQS if the request came from SQS 65 | if receipt_handle == LOCAL_REQUEST_ID { 66 | // block the next invocation to prevent an infinite loop of reruns 67 | if let Ok(mut w) = BLOCK_NEXT_INVOCATION.write() { 68 | debug!("Blocking the next invocation"); 69 | *w = true; 70 | } else { 71 | error!("Write deadlock on BLOCK_NEXT_INVOCATION. It's a bug"); 72 | } 73 | } else { 74 | sqs::send_output(sqs_payload, receipt_handle).await; 75 | } 76 | 77 | Response::builder() 78 | .status(hyper::StatusCode::OK) 79 | .body(empty()) 80 | .expect("Failed to create a response") 81 | } 82 | -------------------------------------------------------------------------------- /lambda-debugger/src/handlers/mod.rs: -------------------------------------------------------------------------------- 1 | use http_body_util::{combinators::BoxBody, BodyExt, Empty, Full}; 2 | use hyper::body::Bytes; 3 | use std::sync::RwLock; 4 | 5 | pub(crate) mod lambda_error; 6 | pub(crate) mod lambda_response; 7 | pub(crate) mod next_invocation; 8 | 9 | /// A request ID substitute for local file payloads. 10 | /// No SQS responses are sent back to AWS for this request ID. 11 | pub(crate) const LOCAL_REQUEST_ID: &str = "local-request-id"; 12 | 13 | /// Is set to TRUE if the next invocation will be using the same payload resulting 14 | /// in an infinite loop. It happens with SUCCESS responses for local payloads and all ERROR responses. 15 | /// It is set while processing the response (success or error). 16 | /// Once an invocation is blocked, it is reset to FALSE to let the next invocation can go ahead. 17 | pub(crate) static BLOCK_NEXT_INVOCATION: RwLock = RwLock::new(false); 18 | 19 | /// Returns an empty response body. 20 | pub(crate) fn empty() -> BoxBody { 21 | Empty::::new().map_err(|never| match never {}).boxed() 22 | } 23 | 24 | /// Returns an response body with contents of `chunk` which can be some type convertible into Bytes, e.g. &str. 25 | pub(crate) fn full>(chunk: T) -> BoxBody { 26 | Full::new(chunk.into()).map_err(|never| match never {}).boxed() 27 | } 28 | -------------------------------------------------------------------------------- /lambda-debugger/src/handlers/next_invocation.rs: -------------------------------------------------------------------------------- 1 | use super::{full, BLOCK_NEXT_INVOCATION, LOCAL_REQUEST_ID}; 2 | use crate::config::PayloadSources; 3 | use crate::sqs; 4 | use crate::CONFIG; 5 | use http_body_util::combinators::BoxBody; 6 | use hyper::body::Bytes; 7 | use hyper::Error; 8 | use hyper::Response; 9 | use tokio::time::{sleep, Duration}; 10 | use tracing::{error, info, warn}; 11 | 12 | /// Handles _next invocation_ request from the local lambda. 13 | /// It blocks on SQS and waits indefinitely for the next SQS message to arrive. 14 | /// The first message in the queue is passed back onto the local lambda. 15 | /// See https://docs.aws.amazon.com/lambda/latest/dg/runtimes-api.html#runtimes-api-next 16 | pub(crate) async fn handler() -> Response> { 17 | // check if the current invocation is a re-run and should be blocked 18 | block_if_rerun().await; 19 | 20 | // check if there is a payload file name in the command line arguments 21 | let config = CONFIG.get().await; 22 | 23 | // return local payload from the file if was provided 24 | if let PayloadSources::Local(local_config) = &config.sources { 25 | info!("Lambda request: sending payload from file"); 26 | 27 | return Response::builder() 28 | .status(hyper::StatusCode::OK) 29 | .header("lambda-runtime-aws-request-id", LOCAL_REQUEST_ID) 30 | .header("lambda-runtime-deadline-ms", "2035313041000") // 2034 31 | .header("lambda-runtime-invoked-function-arn", "from-local-payload") 32 | .header( 33 | "lambda-runtime-trace-id", 34 | "Root=0-00000000-000000000000000000000000;Parent=0000000000000000;Sampled=0;Lineage=00000000:0", 35 | ) 36 | .body(full(local_config.payload.clone())) 37 | .expect("Failed to create a response"); 38 | }; 39 | 40 | // get the next SQS message or wait for it to arrive 41 | // this call will block until a message is available 42 | let sqs_message = sqs::get_input().await; 43 | 44 | info!("Lambda request:\n{}", sqs_message.payload); 45 | 46 | Response::builder() 47 | .status(hyper::StatusCode::OK) 48 | .header("lambda-runtime-aws-request-id", sqs_message.receipt_handle) 49 | .header("lambda-runtime-deadline-ms", sqs_message.ctx.deadline) 50 | .header( 51 | "lambda-runtime-invoked-function-arn", 52 | sqs_message.ctx.invoked_function_arn, 53 | ) 54 | .header( 55 | "lambda-runtime-trace-id", 56 | sqs_message.ctx.xray_trace_id.unwrap_or_else(|| { 57 | "Root=0-00000000-000000000000000000000000;Parent=0000000000000000;Sampled=0;Lineage=00000000:0" 58 | .to_owned() 59 | }), 60 | ) 61 | .body(full(sqs_message.payload)) 62 | .expect("Failed to create a response") 63 | } 64 | 65 | /// Checks BLOCK_NEXT_INVOCATION global flag and 66 | /// blocks the current thread if the current invocation should be blocked. 67 | async fn block_if_rerun() { 68 | // create a local copy of the blocking flag 69 | let block = if let Ok(block) = BLOCK_NEXT_INVOCATION.read() { 70 | *block 71 | } else { 72 | error!("Read deadlock on BLOCK_NEXT_INVOCATION. It's a bug"); 73 | false 74 | }; 75 | 76 | // unblock the next invocation 77 | if block { 78 | if let Ok(mut w) = BLOCK_NEXT_INVOCATION.write() { 79 | *w = false; 80 | } else { 81 | error!("Write deadlock on BLOCK_NEXT_INVOCATION. It's a bug"); 82 | } 83 | } 84 | 85 | // sleep for a month to prevent a rerun 86 | if block { 87 | warn!("Restart your lambda for a rerun"); 88 | sleep(Duration::from_secs(31563000)).await; 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /lambda-debugger/src/main.rs: -------------------------------------------------------------------------------- 1 | use async_once::AsyncOnce; 2 | use config::Config; 3 | use http_body_util::combinators::BoxBody; 4 | use hyper::body::Bytes; 5 | use hyper::server::conn::http1; 6 | use hyper::service::service_fn; 7 | use hyper::{Method, Request, Response}; 8 | use hyper_util::rt::TokioIo; 9 | use lazy_static::lazy_static; 10 | use std::str::FromStr; 11 | use tokio::net::TcpListener; 12 | use tracing::{debug, info, warn}; 13 | use tracing_subscriber::filter::Directive; 14 | use tracing_subscriber::EnvFilter; 15 | 16 | mod config; 17 | mod handlers; 18 | mod sqs; 19 | 20 | // Cannot use std::OnceCell because it does not support async initialization 21 | lazy_static! { 22 | pub(crate) static ref CONFIG: AsyncOnce = AsyncOnce::new(async { Config::from_env().await }); 23 | } 24 | 25 | /// The handler function converted into a Tower service to run in the background 26 | /// and serve the incoming HTTP requests from the local lambda. 27 | async fn lambda_api_handler( 28 | req: Request, 29 | ) -> Result>, hyper::Error> { 30 | debug!("Request URL: {:?}", req.uri()); 31 | 32 | if req.method() == Method::GET && req.uri().path().ends_with("/invocation/next") { 33 | return Ok(handlers::next_invocation::handler().await); 34 | } 35 | 36 | if req.method() != Method::POST { 37 | // There should be no other GET request types other than the above. 38 | panic!("Invalid GET request: {:?}", req); 39 | } 40 | 41 | if req.uri().path().ends_with("/response") { 42 | return Ok(handlers::lambda_response::handler(req).await); 43 | } 44 | 45 | if req.uri().path().ends_with("/error") { 46 | return Ok(handlers::lambda_error::handler(req).await); 47 | } 48 | 49 | // this should not be happening unless there is a bug or someone is sending requests manually 50 | warn!("Unknown request type: {:?}", req); 51 | Ok(handlers::lambda_error::handler(req).await) 52 | } 53 | 54 | #[tokio::main] 55 | async fn main() -> Result<(), Box> { 56 | init_tracing(); 57 | let config = CONFIG.get().await; 58 | 59 | // bind to a TCP port and start a loop to continuously accept incoming connections 60 | let listener = TcpListener::bind(config.lambda_api_listener).await?; 61 | 62 | loop { 63 | let (stream, _) = listener.accept().await?; 64 | let io = TokioIo::new(stream); 65 | 66 | // Spawn a tokio task to serve multiple connections concurrently 67 | tokio::task::spawn(async move { 68 | // bind the incoming connection to lambda_api_handler service 69 | if let Err(err) = http1::Builder::new() 70 | // `service_fn` comes from Tower, convert the handler function into a service 71 | .serve_connection(io, service_fn(lambda_api_handler)) 72 | .await 73 | { 74 | debug!("TCP error: {:?}", err); 75 | info!("Lambda disconnected\n") 76 | } 77 | }); 78 | } 79 | } 80 | 81 | /// Initializes the tracing from RUST_LOG env var if present or sets minimal logging: 82 | /// - INFO for the emulator 83 | /// - ERROR for everything else 84 | fn init_tracing() { 85 | // find out the name of the binary to set the default logging filter 86 | let binary_name = std::env::current_exe() 87 | .expect("Cannot get the path to the current executable") 88 | .file_name() 89 | .map(|name| name.to_string_lossy().to_string()) 90 | .expect("Cannot get the file name of the current executable") 91 | // this replace is needed because tracing uses target names with underscores, e.g. `cargo_lambda_emulator` 92 | .replace('-', "_"); 93 | 94 | tracing_subscriber::fmt() 95 | .with_env_filter( 96 | EnvFilter::builder() 97 | .with_default_directive( 98 | Directive::from_str(&[&binary_name, "=info"].concat()) 99 | .expect("Invalid logging filter. It's a bug."), 100 | ) 101 | .from_env_lossy(), 102 | ) 103 | .with_ansi(true) 104 | .with_target(false) 105 | .compact() 106 | .init(); 107 | } 108 | -------------------------------------------------------------------------------- /lambda-debugger/src/sqs.rs: -------------------------------------------------------------------------------- 1 | use crate::CONFIG; 2 | use async_once::AsyncOnce; 3 | use aws_sdk_sqs::{types::Message, Client as SqsClient}; 4 | use flate2::read::GzEncoder; 5 | use flate2::Compression; 6 | use lambda_runtime::Context as Ctx; 7 | use lazy_static::lazy_static; 8 | use runtime_emulator_types::RequestPayload; 9 | use std::io::prelude::*; 10 | use tokio::time::{sleep, Duration}; 11 | use tracing::{info, warn}; 12 | 13 | // Cannot use OnceCell because it does not support async initialization 14 | lazy_static! { 15 | pub(crate) static ref SQS_CLIENT: AsyncOnce = 16 | AsyncOnce::new(async { SqsClient::new(&aws_config::load_from_env().await) }); 17 | } 18 | 19 | /// A parsed SQS message. 20 | /// The parsing is limited to extracting the data we need and passing the rest to the runtime. 21 | #[derive(Debug)] 22 | pub(crate) struct SqsMessage { 23 | pub payload: String, 24 | /// the message receipt is needed to delete the message from the queue later 25 | pub receipt_handle: String, 26 | /// From the context 27 | pub ctx: Ctx, 28 | } 29 | 30 | /// Reads a message from the specified SQS queue and returns the payload as Lambda structures 31 | pub(crate) async fn get_input() -> SqsMessage { 32 | let config = CONFIG.get().await; 33 | let client = SQS_CLIENT.get().await; 34 | 35 | // time to wait for the next message in seconds 36 | // set to 0 to begin with a friendly message logic 37 | let mut wait_time = 0; 38 | 39 | // start listening to the response 40 | loop { 41 | // try to get the next message and wait for it to arrive if none is ready 42 | // sleep for a bit on error before retrying 43 | let resp = match client 44 | .receive_message() 45 | .max_number_of_messages(1) 46 | .set_queue_url(Some(config.remote_config().request_queue_url.clone())) 47 | .set_wait_time_seconds(Some(wait_time)) 48 | .send() 49 | .await 50 | { 51 | Ok(v) => v, 52 | Err(e) => { 53 | warn!("Failed to get messages: {}", e); 54 | sleep(Duration::from_millis(5000)).await; 55 | continue; 56 | } 57 | }; 58 | 59 | // wait until a message arrives or the function is killed by AWS 60 | if resp.messages.is_none() { 61 | // print a friendly reminder to send an event 62 | if wait_time == 0 { 63 | info!("Lambda connected. Waiting for an incoming event from AWS."); 64 | wait_time = 20; 65 | } 66 | 67 | continue; 68 | } 69 | 70 | // SQS returns an empty list returns when the queue wait time expires 71 | let mut msgs = resp.messages.expect("Failed to get list of messages"); 72 | 73 | // extract the payload and the receipt handle 74 | let (payload, receipt_handle) = if let Some(msg) = msgs.pop() { 75 | match msg { 76 | Message { 77 | body: Some(body), 78 | receipt_handle: Some(receipt_handle), 79 | .. 80 | } => (body, receipt_handle), 81 | _ => panic!("Invalid SQS message. Missing body or receipt: {:?}", msg), 82 | } 83 | } else { 84 | // no messages in the queue 85 | continue; 86 | }; 87 | 88 | // the SQS payload contains event and context that need to be extracted 89 | // there is no way to pass the context to the lambda, but we can at least log it 90 | // the payload that is passed to the lambda is in event property 91 | 92 | // { 93 | // "event": { "command": "value1", "key2": "value2", "key3": "value3" }, 94 | // "ctx": 95 | // { 96 | // "request_id": "4850539c-6316-4af1-9c47-8771cb3baeb1", 97 | // "deadline": 1718071341165, 98 | // "invoked_function_arn": "arn:aws:lambda:us-east-1:512295225992:function:lambda-debug-proxy", 99 | // "xray_trace_id": "Root=1-6667af77-3f5a28b931d7678525d90593;Parent=66ab8e86299a69bc;Sampled=0;Lineage=8af230b3:0", 100 | // "client_context": null, 101 | // "identity": null, 102 | // "env_config": 103 | // { 104 | // "function_name": "lambda-debug-proxy", 105 | // "memory": 128, 106 | // "version": "$LATEST", 107 | // "log_stream": "2024/06/11/lambda-debug-proxy[$LATEST]b1de3d3cab074896b448859c52fa1a2d", 108 | // "log_group": "/aws/lambda/lambda-debug-proxy", 109 | // }, 110 | // }, 111 | // } 112 | 113 | let payload: RequestPayload = serde_json::from_str(&payload).expect("Failed to deserialize msg body"); 114 | let ctx = payload.ctx; 115 | 116 | let payload = serde_json::to_string(&payload.event).expect("event contents cannot be serialized"); 117 | 118 | // if we reached this point, we have a parsed SQS message 119 | // with the payload and the receipt handle 120 | // and should return it to the caller 121 | return SqsMessage { 122 | payload, 123 | receipt_handle, 124 | ctx, 125 | }; 126 | } 127 | } 128 | 129 | /// Returns URLs of the default request and response queues, if they exist. 130 | pub(crate) async fn get_default_queues() -> (Option, Option) { 131 | let client = SQS_CLIENT.get().await; 132 | 133 | // example of the default request queue URL 134 | // https://sqs.us-east-1.amazonaws.com/512295225992/proxy_lambda_req 135 | 136 | // get the list of queues that start with the default queue prefix 137 | let resp = match client 138 | .list_queues() 139 | .set_queue_name_prefix(Some("proxy_lambda_re".to_string())) 140 | .set_max_results(Some(100)) 141 | .send() 142 | .await 143 | { 144 | Ok(v) => v, 145 | Err(e) => { 146 | panic!("Failed to get list of SQS queues: {}", e); 147 | } 148 | }; 149 | 150 | // output containers 151 | let mut req_queue = None; 152 | let mut resp_queue = None; 153 | 154 | // match queue names against the default names 155 | if let Some(queue_urls) = resp.queue_urls { 156 | for url in queue_urls { 157 | if url.ends_with("/proxy_lambda_req") { 158 | req_queue = Some(url); 159 | } else if url.ends_with("/proxy_lambda_resp") { 160 | resp_queue = Some(url); 161 | } 162 | } 163 | } 164 | 165 | (req_queue, resp_queue) 166 | } 167 | 168 | /// Send back the response and delete the message from the queue. 169 | pub(crate) async fn send_output(response: String, receipt_handle: String) { 170 | let config = CONFIG.get().await; 171 | let client = SQS_CLIENT.get().await; 172 | 173 | let response_queue_url = match &config.remote_config().response_queue_url { 174 | Some(v) => v.clone(), 175 | None => { 176 | info!("Response dropped: no response queue configured"); 177 | return; 178 | } 179 | }; 180 | 181 | let response = compress_output(response); 182 | 183 | // SQS messages must be shorter than 262144 bytes 184 | if response.len() < 262144 { 185 | if let Err(e) = client 186 | .send_message() 187 | .set_message_body(Some(response)) 188 | .set_queue_url(Some(response_queue_url)) 189 | .send() 190 | .await 191 | { 192 | panic!("Failed to send SQS response: {}", e); 193 | }; 194 | } else { 195 | info!( 196 | " Response dropped: message size {}B, max allowed by SQS is 262,144 bytes", 197 | response.len() 198 | ); 199 | } 200 | 201 | // delete the request msg from the queue so it cannot be replayed again 202 | if let Err(e) = client 203 | .delete_message() 204 | .set_queue_url(Some(config.remote_config().request_queue_url.to_string())) 205 | .set_receipt_handle(Some(receipt_handle)) 206 | .send() 207 | .await 208 | { 209 | panic!("Failed to send SQS response: {}", e); 210 | }; 211 | 212 | info!("Response sent and request deleted from the queue"); 213 | } 214 | 215 | /// Compresses and encodes the output as Base58 if the message is larger than what is 216 | /// allowed in SQS (262,144 bytes) 217 | fn compress_output(response: String) -> String { 218 | // is it small enough to fit in? 219 | if response.len() < 262144 { 220 | return response; 221 | } 222 | 223 | info!( 224 | "Message size: {}B, max allowed: 262144B. Compressing...", 225 | response.len() 226 | ); 227 | 228 | // try to decompress the body 229 | let mut gzipper = GzEncoder::new(response.as_bytes(), Compression::fast()); 230 | let mut gzipped: Vec = Vec::new(); 231 | let compressed_len = match gzipper.read_to_end(&mut gzipped) { 232 | Ok(v) => v, 233 | Err(e) => { 234 | // this may not be the best option - returning an error may be more appropriate 235 | panic!("Failed to gzip the payload: {}", e); 236 | } 237 | }; 238 | 239 | // encode to base58 240 | let response = bs58::encode(&gzipped).into_string(); 241 | 242 | info!("Compressed: {}, encoded: {}", compressed_len, response.len()); 243 | 244 | response 245 | } 246 | -------------------------------------------------------------------------------- /lambda-debugger/src/types.rs: -------------------------------------------------------------------------------- 1 | use lambda_runtime::Context; 2 | use serde::{Deserialize, Serialize}; 3 | use serde_json::Value; 4 | 5 | /// A local implementation of lambda_runtime::LambdaEvent. 6 | /// It replicates LambdaEvent because we need Ser/Deser traits not implemented for LambdaEvent. 7 | #[derive(Deserialize, Debug, Serialize)] 8 | pub struct RequestPayload { 9 | pub event: Value, // using Value to extract some fields and pass the rest to the runtime 10 | pub ctx: Context, 11 | } 12 | -------------------------------------------------------------------------------- /lambda-debugger/test-payload.json: -------------------------------------------------------------------------------- 1 | { 2 | "command": "echo" 3 | } -------------------------------------------------------------------------------- /proxy-lambda/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "proxy-lambda" 3 | version = "0.2.0" 4 | authors = ["rimutaka "] 5 | edition = "2021" 6 | description = "A proxy AWS Lambda function for forwarding invocations to a locally run lambda for debugging. To be deployed to AWS in place of the function you wan to debug. Requires SQS access." 7 | license = "Apache-2.0" 8 | repository = "https://github.com/rimutaka/lambda-debug-proxy" 9 | categories = ["web-programming::http-server"] 10 | keywords = ["AWS", "Lambda", "API"] 11 | readme = "../../README.md" 12 | 13 | [dependencies] 14 | lambda-debugger = { path = "../lambda-debugger" } 15 | tokio = { workspace = true, features = [ 16 | "macros", 17 | "io-util", 18 | "sync", 19 | "rt-multi-thread", 20 | ] } 21 | serde.workspace = true 22 | serde_json.workspace = true 23 | tracing.workspace = true 24 | tracing-subscriber.workspace = true 25 | lambda_runtime.workspace = true 26 | aws-config = { version = "1.1.7", features = ["behavior-version-latest"] } 27 | aws-sdk-sqs = "1.27" 28 | aws-types = "1.3" 29 | flate2 = "1.0" 30 | bs58 = "0.5" 31 | -------------------------------------------------------------------------------- /proxy-lambda/src/main.rs: -------------------------------------------------------------------------------- 1 | use aws_sdk_sqs::Client as SqsClient; 2 | use flate2::read::GzDecoder; 3 | use lambda_runtime::{service_fn, Error, LambdaEvent}; 4 | use runtime_emulator_types::RequestPayload; 5 | use serde_json::Value; 6 | use std::env::var; 7 | use std::io::Read; 8 | use std::str::FromStr; 9 | use tracing::{debug, error, info}; 10 | use tracing_subscriber::{filter::Directive, EnvFilter}; 11 | 12 | #[tokio::main] 13 | async fn main() -> Result<(), Error> { 14 | // initialize the tracing from RUST_LOG env var if present or sets minimal logging: 15 | // - INFO for the proxy 16 | // - ERROR for everything else 17 | tracing_subscriber::fmt() 18 | .with_env_filter( 19 | EnvFilter::builder() 20 | .with_default_directive( 21 | Directive::from_str("proxy_lambda=info").expect("Invalid logging filter. It's a bug."), 22 | ) 23 | .from_env_lossy(), 24 | ) 25 | .with_ansi(false) 26 | .without_time() 27 | .compact() 28 | .init(); 29 | 30 | print_env_vars(); 31 | 32 | if let Err(e) = lambda_runtime::run(service_fn(my_handler)).await { 33 | error!("Runtime error: {:?}", e); 34 | return Err(Error::from(e)); 35 | } 36 | 37 | Ok(()) 38 | } 39 | 40 | async fn my_handler(event: LambdaEvent) -> Result { 41 | let (event, ctx) = event.into_parts(); 42 | 43 | info!( 44 | "Event:\r{}", 45 | serde_json::to_string(&event).unwrap_or_else(|e| format!("Could not serialize event payload: {:?}", e)) 46 | ); 47 | info!( 48 | "Context:\r{}", 49 | serde_json::to_string(&ctx).unwrap_or_else(|e| format!("Could not serialize context: {:?}", e)) 50 | ); 51 | 52 | // to be used a few times later 53 | let invoked_function_arn = ctx.invoked_function_arn.clone(); 54 | 55 | // check if the request queue URL was specified via an env var 56 | // if not, use the default queue URL 57 | let request_queue_url = match var("PROXY_LAMBDA_REQ_QUEUE_URL") { 58 | Ok(v) => v, 59 | Err(_e) => { 60 | // the env var does not exist - try to use the default queue URL 61 | // there shouldn't be any other env var errors, so the error type can be ignored 62 | let arn = invoked_function_arn.split(':').collect::>(); 63 | // arn example: arn:aws:lambda:us-east-1:512295225992:function:my-lambda 64 | if arn.len() != 7 { 65 | error!( 66 | "ARN should have 7 parts, but it has {}: {}", 67 | arn.len(), 68 | ctx.invoked_function_arn 69 | ); 70 | return Err(Error::from("Context error")); 71 | } 72 | 73 | debug!( 74 | "Sending to default proxy_lambda_req queue name. Use PROXY_LAMBDA_REQ_QUEUE_URL env var to specify a different queue." 75 | ); 76 | 77 | // example: https://sqs.us-east-1.amazonaws.com/512295225992/proxy_lambda_req 78 | format!("https://sqs.{}.amazonaws.com/{}/proxy_lambda_req", arn[3], arn[4]) 79 | } 80 | }; 81 | 82 | debug!("ReqQ URL: {}", request_queue_url); 83 | 84 | let client = SqsClient::new(&aws_config::load_from_env().await); 85 | 86 | // Sending part 87 | let request_payload = RequestPayload { event, ctx }; 88 | 89 | let message_body = match serde_json::to_string(&request_payload) { 90 | Ok(v) => v, 91 | Err(e) => { 92 | error!("Failed to serialize event + context: {:?}", e); 93 | return Err(Error::from(e)); 94 | } 95 | }; 96 | 97 | debug!("Message body: {}", message_body); 98 | 99 | let send_result = match client 100 | .send_message() 101 | .set_message_body(Some(message_body)) 102 | .set_queue_url(Some(request_queue_url.to_string())) 103 | .send() 104 | .await 105 | { 106 | Ok(v) => v, 107 | Err(e) => { 108 | error!("Error sending message: {:?}", e); 109 | return Err(Error::from("Failed to send message")); 110 | } 111 | }; 112 | 113 | let msg_id = send_result.message_id.unwrap_or_default(); 114 | debug!("Sent with ID: {}", msg_id); 115 | 116 | // This proxy should wait for a response from the local lambda if there is a response queue. 117 | // To determine if there is a response queue the proxy checks for the env var and tries to purge it. 118 | // If no env var is set, the proxy tries to purge the default queue. 119 | // Exit with OK if the env var does not exist and the default queue does not exist or gives this lambda no access 120 | let response_queue_url = match var("PROXY_LAMBDA_RESP_QUEUE_URL") { 121 | Ok(response_queue_url) => { 122 | debug!("RespQ URL from env var: {}", response_queue_url); 123 | // clear the response queue to avoid getting a stale message from a previously timed out request 124 | purge_response_queue(&client, &response_queue_url).await?; 125 | response_queue_url 126 | } 127 | Err(_) => { 128 | // queue env var does not exist - try to construct the default queue URL out of the lambda ARN 129 | let arn = invoked_function_arn.split(':').collect::>(); 130 | // arn example: arn:aws:lambda:us-east-1:512295225992:function:my-lambda 131 | 132 | if arn.len() != 7 { 133 | error!( 134 | "ARN should have 7 parts, but it has {}: {}", 135 | arn.len(), 136 | invoked_function_arn 137 | ); 138 | return Err(Error::from("Context error")); 139 | } 140 | 141 | // sample SQS URL https://sqs.us-east-1.amazonaws.com/512295225992/proxy_lambda_resp 142 | let response_queue_url = format!("https://sqs.{}.amazonaws.com/{}/proxy_lambda_resp", arn[3], arn[4]); 143 | 144 | debug!("RespQ URL from default: {}", response_queue_url); 145 | debug!("Use PROXY_LAMBDA_RESP_QUEUE_URL env var to specify a different queue"); 146 | 147 | // if this call fails it may mean the queue does not exist or is misconfigured 148 | // take this as the signal to not wait for a response 149 | if let Err(_e) = purge_response_queue(&client, &response_queue_url).await { 150 | info!("No response queue is configured"); 151 | return Ok(Value::Null); 152 | }; 153 | 154 | response_queue_url 155 | } 156 | }; 157 | 158 | // wait the response until one arrives or the lambda times out 159 | info!( 160 | "Waiting for a response from the local lambda via {}", 161 | response_queue_url 162 | ); 163 | loop { 164 | debug!("20s loop"); 165 | let resp = match client 166 | .receive_message() 167 | .max_number_of_messages(1) 168 | .set_queue_url(Some(response_queue_url.to_string())) 169 | .set_wait_time_seconds(Some(20)) 170 | .send() 171 | .await 172 | { 173 | Ok(v) => v, 174 | Err(e) => { 175 | error!("Error receiving messages: {:?}", e); 176 | return Err(Error::from("Failed to receive messages")); 177 | } 178 | }; 179 | 180 | // wait until a message arrives or the function is killed by AWS 181 | // an empty list returns when the queue wait time expires 182 | let mut msgs = match resp.messages { 183 | Some(v) => v, 184 | None => { 185 | debug!("No messages yet: message list is None"); 186 | continue; 187 | } 188 | }; 189 | if msgs.is_empty() { 190 | debug!("No messages yet: empty message list"); 191 | continue; 192 | } else { 193 | debug!("Received {} messages", msgs.len()); 194 | } 195 | 196 | // message arrived - grab its handle for future reference 197 | let receipt_handle = match msgs[0].receipt_handle.as_ref() { 198 | Some(v) => v, 199 | None => { 200 | return Err(Error::from("Failed to get msg receipt")); 201 | } 202 | } 203 | .to_owned(); 204 | 205 | let body = match match msgs.pop() { 206 | Some(v) => v, 207 | None => { 208 | return Err(Error::from( 209 | "msgs Vec should have been pre-checked for is_empty(). It's a bug.", 210 | )); 211 | } 212 | } 213 | .body 214 | { 215 | Some(v) => v, 216 | None => { 217 | return Err(Error::from("Failed to get message body")); 218 | } 219 | }; 220 | 221 | let body = decode_maybe_binary(body)?; 222 | 223 | // delete it from the queue so it's not picked up again 224 | match client 225 | .delete_message() 226 | .set_queue_url(Some(response_queue_url.to_string())) 227 | .set_receipt_handle(Some(receipt_handle)) 228 | .send() 229 | .await 230 | { 231 | Ok(v) => v, 232 | Err(e) => { 233 | error!("Error deleting messages: {:?}", e); 234 | return Err(Error::from("Error deleting messages")); 235 | } 236 | }; 237 | debug!("Message deleted"); 238 | info!("Response from the local lambda:\r{}", body); 239 | 240 | // return the contents of the message as JSON Value 241 | return Ok(Value::from_str(&body)?); 242 | } 243 | } 244 | 245 | /// Checks if the message is a Base58 encoded compressed text and either decodes/decompresses it 246 | /// or returns as-is if it's not encoded/compressed. 247 | fn decode_maybe_binary(body: String) -> Result { 248 | // check for presence of { at the beginning of the doc to determine if it's JSON or Base58 249 | if body.is_empty() || body.trim_start().starts_with('{') || body.trim() == "null" { 250 | // looks like JSON - return as-is 251 | return Ok(body); 252 | } 253 | 254 | debug!("Response payload before decoding:\r{}", body); 255 | 256 | // try to decode base58 257 | let body_decoded = match bs58::decode(&body).into_vec() { 258 | Ok(v) => v, 259 | Err(e) => { 260 | error!("Failed to decode from maybe base58: {:?}", e); 261 | return Err(Error::from("Failed to decode from maybe base58")); 262 | } 263 | }; 264 | 265 | // try to decompress the body 266 | let mut decoder = GzDecoder::new(body_decoded.as_slice()); 267 | let mut decoded: Vec = Vec::new(); 268 | let len = match decoder.read_to_end(&mut decoded) { 269 | Ok(v) => v, 270 | Err(e) => { 271 | error!("Failed to decompress the payload: {:?}", e); 272 | return Err(Error::from("Failed to decompress the payload")); 273 | } 274 | }; 275 | 276 | info!("Decoded {} bytes of binary response", len); 277 | 278 | // return the bytes converted into a lossy unicode string or an error 279 | match String::from_utf8(decoded) { 280 | Ok(v) => Ok(v), 281 | Err(e) => { 282 | error!("Failed to convert decompressed payload to UTF8: {:?}", e); 283 | Err(Error::from("Failed to convert decompressed payload to UTF8")) 284 | } 285 | } 286 | } 287 | 288 | async fn purge_response_queue(client: &SqsClient, response_queue_url: &str) -> Result<(), Error> { 289 | debug!("Purging the queue, one msg at a time."); 290 | loop { 291 | let resp = match client 292 | .receive_message() 293 | .max_number_of_messages(10) 294 | .set_queue_url(Some(response_queue_url.to_string())) 295 | .set_wait_time_seconds(Some(0)) 296 | .send() 297 | .await 298 | { 299 | Ok(v) => v, 300 | Err(e) => { 301 | debug!("Error receiving stale messages for deletion: {:?}", e); 302 | return Err(Error::from("Error receiving messages")); 303 | } 304 | }; 305 | 306 | // an empty list returns when the queue wait time expires 307 | let msgs = match resp.messages { 308 | Some(v) => v, 309 | None => { 310 | debug!("No stale messages in response queue"); 311 | return Ok(()); 312 | } 313 | }; 314 | 315 | if msgs.is_empty() { 316 | debug!("No stale messages (resp.messages.is_empty)"); 317 | return Ok(()); 318 | } 319 | 320 | info!("Deleting {} stale messages", msgs.len()); 321 | 322 | for msg in msgs { 323 | // delete it from the queue 324 | match client 325 | .delete_message() 326 | .set_queue_url(Some(response_queue_url.to_string())) 327 | .set_receipt_handle(msg.receipt_handle) 328 | .send() 329 | .await 330 | { 331 | Ok(v) => v, 332 | Err(e) => { 333 | error!("Error deleting messages: {:?}", e); 334 | return Err(Error::from("Error deleting messages")); 335 | } 336 | }; 337 | debug!("Message deleted"); 338 | } 339 | } 340 | } 341 | 342 | /// Prints all environment variables to the log in the form of `export KEY=VALUE key2=value2` 343 | fn print_env_vars() { 344 | let mut env_vars = Vec::::with_capacity(30); 345 | env_vars.push(" export".to_string()); // the space at the front is needed to keep EXPORT as the first item of the array 346 | for (key, value) in std::env::vars() { 347 | match key.as_str() { 348 | "AWS_ACCESS_KEY_ID" | "AWS_SECRET_ACCESS_KEY" | "AWS_SESSION_TOKEN" => { 349 | // do not log sensitive vars 350 | } 351 | _ => { 352 | env_vars.push(format!("{}={}", key, value)); 353 | } 354 | } 355 | } 356 | 357 | // the list is easier to deal with when sorted 358 | env_vars.sort(); 359 | 360 | info!("AWS env vars:\r{}", env_vars.join(" ").trim()); 361 | } 362 | -------------------------------------------------------------------------------- /test-lambda/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "test-lambda" 3 | version = "0.2.0" 4 | authors = ["rimutaka "] 5 | edition = "2021" 6 | description = "A sample lambda function for testing the runtime emulator" 7 | license = "Apache-2.0" 8 | repository = "https://github.com/rimutaka/lambda-debug-proxy" 9 | categories = ["web-programming::http-server"] 10 | keywords = ["AWS", "Lambda", "API"] 11 | readme = "../README.md" 12 | 13 | [dependencies] 14 | lambda_runtime = {workspace = true, features = ["tracing"]} 15 | tokio = { version = "1.16", features = [ 16 | "macros", 17 | "io-util", 18 | "sync", 19 | "rt-multi-thread", 20 | ] } 21 | serde.workspace = true 22 | serde_json.workspace = true 23 | tracing.workspace = true 24 | tracing-subscriber.workspace = true 25 | -------------------------------------------------------------------------------- /test-lambda/src/main.rs: -------------------------------------------------------------------------------- 1 | /// This is a basic lambda for testing the emulator locally. 2 | use lambda_runtime::{service_fn, Error, LambdaEvent, Runtime}; 3 | use serde::{Deserialize, Serialize}; 4 | use tracing::info; 5 | 6 | #[derive(Deserialize, Debug)] 7 | struct Request { 8 | command: String, 9 | } 10 | 11 | #[derive(Serialize)] 12 | struct Response { 13 | req_id: String, 14 | msg: String, 15 | } 16 | 17 | #[tokio::main] 18 | async fn main() -> Result<(), Error> { 19 | // minimal logging to keep it simple 20 | // intended to run locally only 21 | tracing_subscriber::fmt() 22 | .without_time() 23 | .with_ansi(true) // the color codes work in the terminal only 24 | .with_target(false) 25 | .init(); 26 | 27 | // init the runtime directly to avoid the extra logging layer 28 | let runtime = Runtime::new(service_fn(my_handler)); 29 | runtime.run().await?; 30 | 31 | Ok(()) 32 | } 33 | 34 | pub(crate) async fn my_handler(event: LambdaEvent) -> Result { 35 | info!("Handler invoked"); 36 | 37 | let command = event.payload.command; 38 | 39 | info!("Command received: {}", command); 40 | 41 | Ok(Response { 42 | req_id: event.context.request_id, 43 | msg: "Hello from Rust!".to_string(), 44 | }) 45 | 46 | // Err(Error::from("Error")) 47 | } 48 | --------------------------------------------------------------------------------