- Docker
- Your own AWS account
- AWS CLI (https://aws.amazon.com/cli/) + configuration
- SAM (https://aws.amazon.com/serverless/sam/)
- Rust toolchain (recommended https://rustup.rs/)
- Zig (https://ziglang.org/)
- Cargo Lambda (https://www.cargo-lambda.info/)
| // api/shared/src/middleware/digest.rs | |
| //! RFC 3230 Digest header middleware for AWS Lambda HTTP responses. | |
| //! | |
| //! This middleware computes the SHA-256 digest of lambda_http response bodies | |
| //! and adds it as a `Digest` header in the format specified by RFC 3230. | |
| //! | |
| //! # Example | |
| //! | |
| //! ```rust,ignore |
| [package] | |
| name = "send-requests" | |
| version = "0.1.0" | |
| edition = "2021" | |
| # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html | |
| [dependencies] | |
| chrono = "0.4.38" | |
| clap = { version = "4.5.4", features = ["derive"] } |
| { | |
| "Version": "2012-10-17", | |
| "Statement": { | |
| "Effect": "Deny", | |
| "Action": "*", | |
| "Resource": "*", | |
| "Condition": { | |
| "DateLessThan": {"aws:TokenIssueTime": "2014-05-07T23:47:00Z"} | |
| } | |
| } |
| if (typeof Promise.withResolvers === 'undefined') { | |
| Promise.withResolvers = function () { | |
| let resolve, reject | |
| const promise = new Promise((res, rej) => { | |
| resolve = res | |
| reject = rej | |
| }) | |
| return { promise, resolve, reject } | |
| } | |
| } |
| [package] | |
| name = "learning-nom" | |
| version = "0.1.0" | |
| edition = "2021" | |
| # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html | |
| [dependencies] | |
| nom = "7.1.2" |
A quick demo that shows how JSDoc annotations can help you to get autocompletion and type checking without having to write (and transpile!) TypeScript code.
The video version is available here: https://youtube.com/shorts/60zUEc_vs1o
For more AWS tutorials, follow us at @AWSBites
Request batching is a Node.js pattern that can be used to optimise how a web server handles identical concurrent requests.
It allows to process the request only once for all the clients concurrently requesting the same information. This can save expensive round trips to backend services and can avoid backend services to be overloaded with many concurrent identical requests. In a way it's a more specialised version of micro-caching.
This can lead to significant performance improvement in cases where there are many concurrent users requesting the same page.
| // prints the first 100 even numbers and exits | |
| let x = 0 | |
| LABEL1: do { | |
| console.log(x) | |
| x = x + 2 | |
| // JUMP TO THE END OF THE DO-WHILE - A FORWARDS GOTO | |
| if (x > 100) break LABEL1 | |
| // JUMP TO THE START OF THE DO WHILE - A BACKWARDS GOTO... | |
| if (x <= 100) continue LABEL1 | |
| } while (true) |