├── zlint.json
├── codegen
├── src
│ ├── serialization.zig
│ ├── FileGenerationState.zig
│ ├── GenerateTypeOptions.zig
│ ├── support.zig
│ ├── GenerationState.zig
│ ├── smithy_tools.zig
│ ├── Hasher.zig
│ └── serialization
│ │ └── json.zig
├── build.zig.zon
├── README.md
└── build.zig
├── .mise.toml
├── .gitignore
├── .envrc
├── lib
├── json
│ ├── build.zig.zon
│ └── build.zig
└── date
│ ├── build.zig.zon
│ ├── src
│ ├── root.zig
│ ├── timestamp.zig
│ └── parsing.zig
│ └── build.zig
├── example
├── build.zig.zon
├── README.md
├── src
│ └── main.zig
└── build.zig
├── Makefile
├── .pre-commit-config.yaml
├── src
├── aws_http_base.zig
├── aws_authentication.zig
├── servicemodel.zig
├── test_ec2_query_no_input.response
├── url.zig
├── main.zig
├── xml.zig
├── aws_http.zig
└── test_rest_json_1_query_no_input.response
├── LICENSE
├── .gitea
└── workflows
│ ├── zig-nightly.yaml
│ ├── zig-mach.yaml
│ ├── zig-previous.yaml
│ └── build.yaml
└── README.md
/zlint.json:
--------------------------------------------------------------------------------
1 | {
2 | "ignore": ["lib/json/src/json.zig"]
3 | }
4 |
--------------------------------------------------------------------------------
/codegen/src/serialization.zig:
--------------------------------------------------------------------------------
1 | pub const json = @import("serialization/json.zig");
2 |
--------------------------------------------------------------------------------
/.mise.toml:
--------------------------------------------------------------------------------
1 | [tools]
2 | pre-commit = "latest"
3 | "ubi:DonIsaac/zlint" = "latest"
4 | zig = "0.15.1"
5 | zls = "0.15.0"
6 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .cache
2 | zig-cache
3 | codegen/models/*.zig
4 | codegen/codegen
5 | *.tgz
6 | service_manifest.zig
7 | demo
8 | src/models/
9 | smithy/zig-out/
10 | libs/
11 | src/git_version.zig
12 | zig-out
13 | core
14 | .zig-cache
15 |
--------------------------------------------------------------------------------
/.envrc:
--------------------------------------------------------------------------------
1 | # vi: ft=sh
2 | # shellcheck shell=bash
3 |
4 | if ! has zvm_direnv_version || ! zvm_direnv_version 2.0.0; then
5 | source_url "https://git.lerch.org/lobo/zvm-direnv/raw/tag/2.0.0/direnvrc" "sha256-8Umzxj32hFU6G0a7Wrq0KTNDQ8XEuje2A3s2ljh/hFY="
6 | fi
7 |
8 | use zig 0.14.0
9 |
--------------------------------------------------------------------------------
/lib/json/build.zig.zon:
--------------------------------------------------------------------------------
1 | .{
2 | .name = .json,
3 | .version = "0.0.0",
4 | .fingerprint = 0x6b0725452065211c, // Changing this has security and trust implications.
5 | .minimum_zig_version = "0.14.0",
6 | .dependencies = .{},
7 | .paths = .{
8 | "build.zig",
9 | "build.zig.zon",
10 | "src",
11 | },
12 | }
13 |
--------------------------------------------------------------------------------
/codegen/src/FileGenerationState.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const smithy = @import("smithy");
3 |
4 | const FileGenerationState = @This();
5 |
6 | protocol: smithy.AwsProtocol,
7 | shapes: std.StringHashMap(smithy.ShapeInfo),
8 | shape_references: std.StringHashMap(u64),
9 | additional_types_to_generate: *std.ArrayList(smithy.ShapeInfo),
10 | additional_types_generated: *std.StringHashMap(void),
11 |
--------------------------------------------------------------------------------
/example/build.zig.zon:
--------------------------------------------------------------------------------
1 | .{
2 | .name = .myapp,
3 | .version = "0.0.1",
4 | .fingerprint = 0x8798022a511224c5,
5 | .paths = .{""},
6 |
7 | .dependencies = .{
8 | .aws = .{
9 | .url = "https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/e41f98b389539c8bc6b1a231d25e2980318e5ef4/e41f98b389539c8bc6b1a231d25e2980318e5ef4-with-models.tar.gz",
10 | .hash = "aws-0.0.1-SbsFcI0RCgBdf1nak95gi1kAtI6sv3Ntb7BPETH30fpS",
11 | },
12 | },
13 | }
14 |
--------------------------------------------------------------------------------
/codegen/build.zig.zon:
--------------------------------------------------------------------------------
1 | .{
2 | .name = .codegen,
3 | .version = "0.0.1",
4 | .paths = .{
5 | "build.zig",
6 | "build.zig.zon",
7 | "src",
8 | "README.md",
9 | "LICENSE",
10 | },
11 | .fingerprint = 0x41c2ec2d551fe279,
12 |
13 | .dependencies = .{
14 | .smithy = .{
15 | .url = "git+https://git.lerch.org/lobo/smithy.git#09c0a618877ebaf8e15fbfc505983876f4e063d5",
16 | .hash = "smithy-1.0.0-uAyBgTnTAgBp2v6vypGcK5-YOCtxs2iEqR-4LfC5FTlS",
17 | },
18 | },
19 | }
20 |
--------------------------------------------------------------------------------
/codegen/src/GenerateTypeOptions.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const case = @import("case");
3 |
4 | const GenerateTypeOptions = @This();
5 |
6 | end_structure: bool,
7 | key_case: case.Case,
8 |
9 | pub fn endStructure(self: @This(), value: bool) GenerateTypeOptions {
10 | return .{
11 | .end_structure = value,
12 | .key_case = self.key_case,
13 | };
14 | }
15 |
16 | pub fn keyCase(self: @This(), value: case.Case) GenerateTypeOptions {
17 | return .{
18 | .end_structure = self.end_structure,
19 | .key_case = value,
20 | };
21 | }
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | start-hand-test: src/main.zig src/aws.zig src/xml.zig
2 | @zig build-exe -static -I/usr/local/include -Isrc/ -lc --strip \
3 | --name start-hand-test src/main.zig src/bitfield-workaround.c \
4 | /usr/local/lib64/libaws-c-*.a \
5 | /usr/local/lib64/libs2n.a \
6 | /usr/local/lib/libcrypto.a \
7 | /usr/local/lib/libssl.a
8 |
9 | elasticurl: curl.c
10 | @zig build-exe -static -I/usr/local/include -Isrc/ -lc --strip \
11 | --name elasticurl curl.c \
12 | /usr/local/lib64/libaws-c-*.a \
13 | /usr/local/lib64/libs2n.a \
14 | /usr/local/lib/libcrypto.a \
15 | /usr/local/lib/libssl.a
16 |
--------------------------------------------------------------------------------
/lib/date/build.zig.zon:
--------------------------------------------------------------------------------
1 | .{
2 | .name = .date,
3 | .version = "0.0.0",
4 | .fingerprint = 0xaa9e377a226d739e, // Changing this has security and trust implications.
5 | .minimum_zig_version = "0.14.0",
6 | .dependencies = .{
7 | .zeit = .{
8 | .url = "git+https://github.com/rockorager/zeit?ref=zig-0.15#ed2ca60db118414bda2b12df2039e33bad3b0b88",
9 | .hash = "zeit-0.6.0-5I6bk0J9AgCVa0nnyL0lNY9Xa9F68hHq-ZarhuXNV-Jb",
10 | },
11 | .json = .{
12 | .path = "../json",
13 | },
14 | },
15 | .paths = .{
16 | "build.zig",
17 | "build.zig.zon",
18 | "src",
19 | },
20 | }
21 |
--------------------------------------------------------------------------------
/lib/date/src/root.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const testing = std.testing;
3 |
4 | const parsing = @import("parsing.zig");
5 | pub const DateTime = parsing.DateTime;
6 | pub const timestampToDateTime = parsing.timestampToDateTime;
7 | pub const parseEnglishToTimestamp = parsing.parseEnglishToTimestamp;
8 | pub const parseEnglishToDateTime = parsing.parseEnglishToDateTime;
9 | pub const parseIso8601ToTimestamp = parsing.parseIso8601ToTimestamp;
10 | pub const parseIso8601ToDateTime = parsing.parseIso8601ToDateTime;
11 | pub const dateTimeToTimestamp = parsing.dateTimeToTimestamp;
12 | pub const printNowUtc = parsing.printNowUtc;
13 |
14 | const timestamp = @import("timestamp.zig");
15 | pub const DateFormat = timestamp.DateFormat;
16 | pub const Timestamp = timestamp.Timestamp;
17 |
18 | test {
19 | testing.refAllDeclsRecursive(@This());
20 | }
21 |
--------------------------------------------------------------------------------
/lib/json/build.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 |
3 | pub fn build(b: *std.Build) void {
4 | const target = b.standardTargetOptions(.{});
5 | const optimize = b.standardOptimizeOption(.{});
6 |
7 | const lib_mod = b.addModule("json", .{
8 | .root_source_file = b.path("src/json.zig"),
9 | .target = target,
10 | .optimize = optimize,
11 | });
12 |
13 | const lib = b.addLibrary(.{
14 | .linkage = .static,
15 | .name = "json",
16 | .root_module = lib_mod,
17 | });
18 |
19 | b.installArtifact(lib);
20 |
21 | const lib_unit_tests = b.addTest(.{
22 | .root_module = lib_mod,
23 | });
24 |
25 | const run_lib_unit_tests = b.addRunArtifact(lib_unit_tests);
26 |
27 | const test_step = b.step("test", "Run unit tests");
28 | test_step.dependOn(&run_lib_unit_tests.step);
29 | }
30 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # See https://pre-commit.com for more information
2 | # See https://pre-commit.com/hooks.html for more hooks
3 | repos:
4 | - repo: https://github.com/pre-commit/pre-commit-hooks
5 | rev: v3.2.0
6 | hooks:
7 | - id: trailing-whitespace
8 | - id: end-of-file-fixer
9 | - id: check-yaml
10 | - id: check-added-large-files
11 | - repo: https://github.com/batmac/pre-commit-zig
12 | rev: v0.3.0
13 | hooks:
14 | - id: zig-fmt
15 | - id: zig-build
16 | - repo: local
17 | hooks:
18 | - id: smoke-test
19 | name: Run zig build smoke-test
20 | entry: zig
21 | args: ["build", "--verbose", "smoke-test"]
22 | language: system
23 | types: [file]
24 | pass_filenames: false
25 | - id: zlint
26 | name: Run zlint
27 | entry: zlint
28 | args: ["--deny-warnings", "--fix"]
29 | language: system
30 | types: [zig]
31 |
--------------------------------------------------------------------------------
/src/aws_http_base.zig:
--------------------------------------------------------------------------------
1 | //! This module provides base data structures for aws http requests
2 | const std = @import("std");
3 | pub const Request = struct {
4 | path: []const u8 = "/",
5 | query: []const u8 = "",
6 | body: []const u8 = "",
7 | method: []const u8 = "POST",
8 | content_type: []const u8 = "application/json", // Can we get away with this?
9 | headers: []const std.http.Header = &.{},
10 | };
11 | pub const Result = struct {
12 | response_code: u16, // actually 3 digits can fit in u10
13 | body: []const u8,
14 | headers: []const std.http.Header,
15 | allocator: std.mem.Allocator,
16 |
17 | pub fn deinit(self: Result) void {
18 | self.allocator.free(self.body);
19 | for (self.headers) |h| {
20 | self.allocator.free(h.name);
21 | self.allocator.free(h.value);
22 | }
23 | self.allocator.free(self.headers);
24 | //log.debug("http result deinit complete", .{});
25 | return;
26 | }
27 | };
28 |
--------------------------------------------------------------------------------
/src/aws_authentication.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 |
3 | pub const Credentials = struct {
4 | access_key: []const u8,
5 | secret_key: []u8,
6 | session_token: ?[]const u8,
7 | // uint64_t expiration_timepoint_seconds);
8 |
9 | allocator: std.mem.Allocator,
10 |
11 | const Self = @This();
12 |
13 | pub fn init(
14 | allocator: std.mem.Allocator,
15 | access_key: []const u8,
16 | secret_key: []u8,
17 | session_token: ?[]const u8,
18 | ) Self {
19 | return .{
20 | .access_key = access_key,
21 | .secret_key = secret_key,
22 | .session_token = session_token,
23 |
24 | .allocator = allocator,
25 | };
26 | }
27 | pub fn deinit(self: Self) void {
28 | std.crypto.secureZero(u8, self.secret_key);
29 | self.allocator.free(self.secret_key);
30 | self.allocator.free(self.access_key);
31 | if (self.session_token) |t| self.allocator.free(t);
32 | }
33 | };
34 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Emil Lerch
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/codegen/src/support.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const case = @import("case");
3 |
4 | const Allocator = std.mem.Allocator;
5 |
6 | pub fn constantName(allocator: Allocator, id: []const u8, comptime to_case: case.Case) ![]const u8 {
7 | // There are some ids that don't follow consistent rules, so we'll
8 | // look for the exceptions and, if not found, revert to the snake case
9 | // algorithm
10 |
11 | var buf = std.mem.zeroes([256]u8);
12 | @memcpy(buf[0..id.len], id);
13 |
14 | var name = try allocator.dupe(u8, id);
15 |
16 | const simple_replacements = &.{
17 | &.{ "DevOps", "Devops" },
18 | &.{ "IoT", "Iot" },
19 | &.{ "FSx", "Fsx" },
20 | &.{ "CloudFront", "Cloudfront" },
21 | };
22 |
23 | inline for (simple_replacements) |rep| {
24 | if (std.mem.indexOf(u8, name, rep[0])) |idx| @memcpy(name[idx .. idx + rep[0].len], rep[1]);
25 | }
26 |
27 | if (to_case == .snake) {
28 | if (std.mem.eql(u8, id, "SESv2")) return try std.fmt.allocPrint(allocator, "ses_v2", .{});
29 | if (std.mem.eql(u8, id, "ETag")) return try std.fmt.allocPrint(allocator, "e_tag", .{});
30 | }
31 |
32 | return try case.allocTo(allocator, to_case, name);
33 | }
34 |
--------------------------------------------------------------------------------
/lib/date/build.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 |
3 | pub fn build(b: *std.Build) void {
4 | const target = b.standardTargetOptions(.{});
5 | const optimize = b.standardOptimizeOption(.{});
6 |
7 | const lib_mod = b.addModule("date", .{
8 | .root_source_file = b.path("src/root.zig"),
9 | .target = target,
10 | .optimize = optimize,
11 | });
12 |
13 | const lib = b.addLibrary(.{
14 | .linkage = .static,
15 | .name = "date",
16 | .root_module = lib_mod,
17 | });
18 |
19 | b.installArtifact(lib);
20 |
21 | const lib_unit_tests = b.addTest(.{
22 | .root_module = lib_mod,
23 | });
24 |
25 | const run_lib_unit_tests = b.addRunArtifact(lib_unit_tests);
26 |
27 | const test_step = b.step("test", "Run unit tests");
28 | test_step.dependOn(&run_lib_unit_tests.step);
29 |
30 | const dep_zeit = b.dependency("zeit", .{
31 | .target = target,
32 | .optimize = optimize,
33 | });
34 | lib_mod.addImport("zeit", dep_zeit.module("zeit"));
35 |
36 | const dep_json = b.dependency("json", .{
37 | .target = target,
38 | .optimize = optimize,
39 | });
40 | lib_mod.addImport("json", dep_json.module("json"));
41 | }
42 |
--------------------------------------------------------------------------------
/codegen/src/GenerationState.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const smithy = @import("smithy");
3 |
4 | const FileGenerationState = @import("FileGenerationState.zig");
5 |
6 | const GenerationState = @This();
7 |
8 | type_stack: *std.ArrayList(*const smithy.ShapeInfo),
9 | file_state: FileGenerationState,
10 | // we will need some sort of "type decls needed" for recursive structures
11 | allocator: std.mem.Allocator,
12 | indent_level: u64,
13 |
14 | pub fn appendToTypeStack(self: @This(), shape_info: *const smithy.ShapeInfo) !void {
15 | try self.type_stack.append(self.allocator, shape_info);
16 | }
17 |
18 | pub fn popFromTypeStack(self: @This()) void {
19 | _ = self.type_stack.pop();
20 | }
21 |
22 | pub fn getTypeRecurrenceCount(self: @This(), id: []const u8) u8 {
23 | var self_occurences: u8 = 0;
24 |
25 | for (self.type_stack.items) |i| {
26 | if (std.mem.eql(u8, i.id, id)) {
27 | self_occurences += 1;
28 | }
29 | }
30 |
31 | return self_occurences;
32 | }
33 |
34 | pub fn indent(self: @This()) GenerationState {
35 | var new_state = self.clone();
36 | new_state.indent_level += 1;
37 | return new_state;
38 | }
39 |
40 | pub fn deindent(self: @This()) GenerationState {
41 | var new_state = self.clone();
42 | new_state.indent_level = @max(0, new_state.indent_level - 1);
43 | return new_state;
44 | }
45 |
46 | pub fn clone(self: @This()) GenerationState {
47 | return GenerationState{
48 | .type_stack = self.type_stack,
49 | .file_state = self.file_state,
50 | .allocator = self.allocator,
51 | .indent_level = self.indent_level,
52 | };
53 | }
54 |
--------------------------------------------------------------------------------
/codegen/README.md:
--------------------------------------------------------------------------------
1 | Model generation
2 | ================
3 |
4 | Because only models actually used by the application will be
5 | generated, one model or separate models do not make as much of a difference
6 | as they do in other languages. We can combine all models from AWS into a single
7 | comptime constant even, however, we're keeping zig files 1:1 with json files
8 | for now.
9 |
10 | Optimization plan will be done by the placing of a json file in the output
11 | directory. The json file will contain a mapping between input files and generated
12 | outputs, as well as a top level directory hash. We can skip the output generation
13 | entirely if the top level hash matches, otherwise, individual hashes will be
14 | compared and output files will only regenerate if the input or output has changed.
15 |
16 |
17 | Todo
18 | ----
19 |
20 | * I do not think all the optional types have been sorted.
21 | * I think there is necessary metadata missing from EC2Query style services
22 | * It handles all the types in existing AWS services, but it does not handle
23 | all known Smithy types (e.g. blob and document are missing)
24 | * It would be awesome to bring over the documentation from the model into
25 | zig-style doc comments
26 | * Self-referencing types are hard-coded to cut off after several nesting
27 | operations. Ideally these would be pulled out into their own types, but
28 | realistically I'm not sure if that will matter long term, and it's a fair
29 | amount of work as everything now can be done in a single pass without post
30 | processing.
31 |
32 | The models are Smithy json files, sourced from the AWS v2 go sdk
33 | for lack of a better place. Details are in build.zig of the parent project
34 | that is now responsible for downloading/caching the project.
35 |
--------------------------------------------------------------------------------
/example/README.md:
--------------------------------------------------------------------------------
1 | Example usage of aws-zig module by a client application
2 | =======================================================
3 |
4 | This directory has a fully functional command line application that utilizes
5 | the aws-zig module using the Zig package manager introduced in Zig 0.11.
6 |
7 | A couple things of note:
8 |
9 | * Rather than the typical "we will use the source code repository archive",
10 | you will notice in build.zig.zon that the dependency URL is a Gitea actions
11 | artifact. This is due to the fact that the aws service models are generated,
12 | and the package manager does not currently (I think) have a way to perform
13 | compile steps when pulling in a package and using a module. In any case, this
14 | seems like a reasonable restriction. The aws-zig SDK repository will build
15 | and test each code change, along with model generation, then capture the
16 | generated files along with the actual SDK source code and upload the resulting
17 | artifact for use. To find the correct artifact, look at the [actions page](https://git.lerch.org/lobo/aws-sdk-for-zig/actions)
18 | and choose a run ([example](https://git.lerch.org/lobo/aws-sdk-for-zig/actions/runs/57)).
19 | Under Artifacts, you will see the tarball and can paste that link into `build.zig.zon`.
20 | * The action naming is incorrect according to the zig naming guidelines. This
21 | will be fixed in the code generation process shortly, and this example will be
22 | updated accordingly.
23 | * Many (most) AWS services still don't support TLS 1.3. I recommend using
24 | [mitmproxy](https://mitmproxy.org) during development. Otherwise, it is
25 | likely best to wait until service(s) important to you are supported on
26 | TLS 1.3.
27 |
28 | Usage
29 | -----
30 |
31 | After configuring your AWS credentials using the standard tools, a simple
32 | `zig build run` should be sufficient to run this example.
33 |
--------------------------------------------------------------------------------
/codegen/src/smithy_tools.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const smithy = @import("smithy");
3 |
4 | pub const Shape = @FieldType(smithy.ShapeInfo, "shape");
5 | pub const ServiceShape = @TypeOf((Shape{ .service = undefined }).service);
6 | pub const ListShape = @TypeOf((Shape{ .list = undefined }).list);
7 | pub const MapShape = @TypeOf((Shape{ .map = undefined }).map);
8 |
9 | pub fn getShapeInfo(id: []const u8, shapes: std.StringHashMap(smithy.ShapeInfo)) !smithy.ShapeInfo {
10 | return shapes.get(id) orelse {
11 | std.debug.print("Shape ID not found. This is most likely a bug. Shape ID: {s}\n", .{id});
12 | return error.InvalidType;
13 | };
14 | }
15 |
16 | pub fn getShapeTraits(shape: Shape) []smithy.Trait {
17 | return switch (shape) {
18 | .service, .operation, .resource => std.debug.panic("Unexpected shape type: {}", .{shape}),
19 | inline else => |s| s.traits,
20 | };
21 | }
22 |
23 | pub fn getShapeMembers(shape: Shape) []smithy.TypeMember {
24 | return switch (shape) {
25 | inline .structure, .uniontype => |s| s.members,
26 | else => std.debug.panic("Unexpected shape type: {}", .{shape}),
27 | };
28 | }
29 |
30 | pub fn shapeIsLeaf(shape: Shape) bool {
31 | return switch (shape) {
32 | .@"enum",
33 | .bigDecimal,
34 | .bigInteger,
35 | .blob,
36 | .boolean,
37 | .byte,
38 | .document,
39 | .double,
40 | .float,
41 | .integer,
42 | .long,
43 | .short,
44 | .string,
45 | .timestamp,
46 | => true,
47 | else => false,
48 | };
49 | }
50 |
51 | pub fn shapeIsOptional(traits: []smithy.Trait) bool {
52 | return !hasTrait(.required, traits);
53 | }
54 |
55 | pub fn findTrait(trait_type: smithy.TraitType, traits: []smithy.Trait) ?smithy.Trait {
56 | for (traits) |trait| {
57 | if (trait == trait_type) {
58 | return trait;
59 | }
60 | }
61 |
62 | return null;
63 | }
64 |
65 | pub fn hasTrait(trait_type: smithy.TraitType, traits: []smithy.Trait) bool {
66 | return findTrait(trait_type, traits) != null;
67 | }
68 |
--------------------------------------------------------------------------------
/src/servicemodel.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const service_list = @import("service_manifest");
3 | const expectEqualStrings = std.testing.expectEqualStrings;
4 |
5 | pub fn Services(comptime service_imports: anytype) type {
6 | if (service_imports.len == 0) return services;
7 | // From here, the fields of our structure can be generated at comptime...
8 | var fields: [serviceCount(service_imports)]std.builtin.Type.StructField = undefined;
9 |
10 | for (&fields, 0..) |*item, i| {
11 | const import_field = @field(service_list, @tagName(service_imports[i]));
12 | item.* = .{
13 | .name = @tagName(service_imports[i]),
14 | .type = @TypeOf(import_field),
15 | .default_value_ptr = &import_field,
16 | .is_comptime = false,
17 | .alignment = std.meta.alignment(@TypeOf(import_field)),
18 | };
19 | }
20 |
21 | // finally, generate the type
22 | return @Type(.{
23 | .@"struct" = .{
24 | .layout = .auto,
25 | .fields = &fields,
26 | .decls = &[_]std.builtin.Type.Declaration{},
27 | .is_tuple = false,
28 | },
29 | });
30 | }
31 |
32 | fn serviceCount(desired_services: anytype) usize {
33 | if (desired_services.len == 0) return @TypeOf(service_list).Struct.fields.len;
34 | return desired_services.len;
35 | }
36 |
37 | /// Using this constant may blow up build times. Recommed using Services()
38 | /// function directly, e.g. const services = Services(.{.sts, .ec2, .s3, .ddb}){};
39 | pub const services = service_list;
40 |
41 | test "services includes sts" {
42 | try expectEqualStrings("2011-06-15", services.sts.version.?);
43 | }
44 | test "sts includes get_caller_identity" {
45 | try expectEqualStrings("GetCallerIdentity", services.sts.get_caller_identity.action_name);
46 | }
47 | test "can get service and action name from request" {
48 | // get request object. This call doesn't have parameters
49 | const metadata = services.sts.get_caller_identity.Request.metaInfo();
50 | try expectEqualStrings("2011-06-15", metadata.service_metadata.version.?);
51 | }
52 | test "can filter services" {
53 | const filtered_services = Services(.{ .sts, .wafv2 }){};
54 | try expectEqualStrings("2011-06-15", filtered_services.sts.version.?);
55 | }
56 |
--------------------------------------------------------------------------------
/lib/date/src/timestamp.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const zeit = @import("zeit");
3 |
4 | pub const DateFormat = enum {
5 | rfc1123,
6 | iso8601,
7 | };
8 |
9 | pub const Timestamp = enum(zeit.Nanoseconds) {
10 | _,
11 |
12 | pub fn jsonStringify(value: Timestamp, jw: anytype) !void {
13 | const instant = zeit.instant(.{
14 | .source = .{
15 | .unix_nano = @intFromEnum(value),
16 | },
17 | }) catch std.debug.panic("Failed to parse timestamp to instant: {d}", .{value});
18 |
19 | const fmt = "Mon, 02 Jan 2006 15:04:05 GMT";
20 | var buf: [fmt.len]u8 = undefined;
21 |
22 | var fbs = std.Io.Writer.fixed(&buf);
23 | instant.time().gofmt(&fbs, fmt) catch std.debug.panic("Failed to format instant: {d}", .{instant.timestamp});
24 |
25 | try jw.write(&buf);
26 | }
27 |
28 | pub fn parse(val: []const u8) !Timestamp {
29 | const date_format = blk: {
30 | if (std.ascii.isDigit(val[0])) {
31 | break :blk DateFormat.iso8601;
32 | } else {
33 | break :blk DateFormat.rfc1123;
34 | }
35 | };
36 |
37 | const ins = try zeit.instant(.{
38 | .source = switch (date_format) {
39 | DateFormat.iso8601 => .{
40 | .iso8601 = val,
41 | },
42 | DateFormat.rfc1123 => .{
43 | .rfc1123 = val,
44 | },
45 | },
46 | });
47 |
48 | return @enumFromInt(ins.timestamp);
49 | }
50 | };
51 |
52 | test Timestamp {
53 | const in_date = "Wed, 23 Apr 2025 11:23:45 GMT";
54 |
55 | const expected_ts: Timestamp = @enumFromInt(1745407425000000000);
56 | const actual_ts = try Timestamp.parse(in_date);
57 |
58 | try std.testing.expectEqual(expected_ts, actual_ts);
59 |
60 | var buf: [100]u8 = undefined;
61 | var fbs = std.io.fixedBufferStream(&buf);
62 | var counting_writer = std.io.countingWriter(fbs.writer());
63 | try Timestamp.jsonStringify(expected_ts, .{}, counting_writer.writer());
64 |
65 | const expected_json = "\"" ++ in_date ++ "\"";
66 | const actual_json = buf[0..counting_writer.bytes_written];
67 |
68 | try std.testing.expectEqualStrings(expected_json, actual_json);
69 | }
70 |
--------------------------------------------------------------------------------
/example/src/main.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const aws = @import("aws");
3 |
4 | pub const std_options: std.Options = .{
5 | .log_level = .info,
6 |
7 | // usually log_level is enough, but log_scope_levels can be used
8 | // for finer grained control
9 | .log_scope_levels = &[_]std.log.ScopeLevel{
10 | .{ .scope = .awshttp, .level = .warn },
11 | },
12 | };
13 |
14 | pub fn main() anyerror!void {
15 | var gpa = std.heap.GeneralPurposeAllocator(.{}){};
16 | defer _ = gpa.deinit();
17 | const allocator = gpa.allocator();
18 | var stdout_buffer: [1024]u8 = undefined;
19 | var stdout_raw = std.fs.File.stdout().writer(&stdout_buffer);
20 | const stdout = &stdout_raw.interface;
21 | defer stdout.flush() catch unreachable;
22 |
23 | // To use a proxy, uncomment the following with your own configuration
24 | // const proxy = std.http.Proxy{
25 | // .protocol = .plain,
26 | // .host = "localhost",
27 | // .port = 8080,
28 | // };
29 | //
30 | // var client = aws.Client.init(allocator, .{ .proxy = proxy });
31 | var client = aws.Client.init(allocator, .{});
32 | defer client.deinit();
33 |
34 | const options = aws.Options{
35 | .region = "us-west-2",
36 | .client = client,
37 | };
38 |
39 | const services = aws.Services(.{ .sts, .kms }){};
40 | try stdout.print("Calling KMS ListKeys\n", .{});
41 | try stdout.print("You likely have at least some AWS-generated keys in your account,\n", .{});
42 | try stdout.print("but if the account has not had many services used, this may return 0 keys\n\n", .{});
43 | const call_kms = try aws.Request(services.kms.list_keys).call(.{}, options);
44 | try stdout.print("\trequestId: {s}\n", .{call_kms.response_metadata.request_id});
45 | try stdout.print("\tkey count: {d}\n", .{call_kms.response.keys.?.len});
46 | for (call_kms.response.keys.?) |key| {
47 | try stdout.print("\t\tkey id: {s}\n", .{key.key_id.?});
48 | try stdout.print("\t\tkey arn: {s}\n", .{key.key_arn.?});
49 | }
50 | defer call_kms.deinit();
51 |
52 | try stdout.print("\n\n\nCalling STS GetCallerIdentity\n", .{});
53 | const call = try aws.Request(services.sts.get_caller_identity).call(.{}, options);
54 | defer call.deinit();
55 | try stdout.print("\tarn: {s}\n", .{call.response.arn.?});
56 | try stdout.print("\tid: {s}\n", .{call.response.user_id.?});
57 | try stdout.print("\taccount: {s}\n", .{call.response.account.?});
58 | try stdout.print("\trequestId: {s}\n", .{call.response_metadata.request_id});
59 | }
60 |
--------------------------------------------------------------------------------
/codegen/build.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 |
3 | // Although this function looks imperative, note that its job is to
4 | // declaratively construct a build graph that will be executed by an external
5 | // runner.
6 | pub fn build(b: *std.build.Builder) !void {
7 | // Standard target options allows the person running `zig build` to choose
8 | // what target to build for. Here we do not override the defaults, which
9 | // means any target is allowed, and the default is native. Other options
10 | // for restricting supported target set are available.
11 | const target = b.standardTargetOptions(.{});
12 |
13 | // Standard optimization options allow the person running `zig build` to select
14 | // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
15 | // set a preferred release mode, allowing the user to decide how to optimize.
16 | const optimize = b.standardOptimizeOption(.{});
17 |
18 | const exe = b.addExecutable(.{
19 | .name = "codegen",
20 | .root_source_file = .{ .path = "src/main.zig" },
21 | .target = target,
22 | .optimize = optimize,
23 | });
24 |
25 | const smithy_dep = b.dependency("smithy", .{
26 | .target = target,
27 | .optimize = optimize,
28 | });
29 | exe.addModule("smithy", smithy_dep.module("smithy"));
30 |
31 | // This declares intent for the executable to be installed into the
32 | // standard location when the user invokes the "install" step (the default
33 | // step when running `zig build`).
34 | b.installArtifact(exe);
35 |
36 | // This *creates* a Run step in the build graph, to be executed when another
37 | // step is evaluated that depends on it. The next line below will establish
38 | // such a dependency.
39 | const run_cmd = b.addRunArtifact(exe);
40 |
41 | // By making the run step depend on the install step, it will be run from the
42 | // installation directory rather than directly from within the cache directory.
43 | // This is not necessary, however, if the application depends on other installed
44 | // files, this ensures they will be present and in the expected location.
45 | run_cmd.step.dependOn(b.getInstallStep());
46 |
47 | // This allows the user to pass arguments to the application in the build
48 | // command itself, like this: `zig build run -- arg1 arg2 etc`
49 | if (b.args) |args| {
50 | run_cmd.addArgs(args);
51 | }
52 |
53 | // This creates a build step. It will be visible in the `zig build --help` menu,
54 | // and can be selected like this: `zig build run`
55 | // This will evaluate the `run` step rather than the default, which is "install".
56 | const run_step = b.step("run", "Run the app");
57 | run_step.dependOn(&run_cmd.step);
58 |
59 | // Creates a step for unit testing. This only builds the test executable
60 | // but does not run it.
61 | const unit_tests = b.addTest(.{
62 | .root_source_file = .{ .path = "src/main.zig" },
63 | .target = target,
64 | .optimize = optimize,
65 | });
66 |
67 | const run_unit_tests = b.addRunArtifact(unit_tests);
68 |
69 | // Similar to creating the run step earlier, this exposes a `test` step to
70 | // the `zig build --help` menu, providing a way for the user to request
71 | // running the unit tests.
72 | const test_step = b.step("test", "Run unit tests");
73 | test_step.dependOn(&run_unit_tests.step);
74 | }
75 |
--------------------------------------------------------------------------------
/example/build.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 |
3 | // Although this function looks imperative, note that its job is to
4 | // declaratively construct a build graph that will be executed by an external
5 | // runner.
6 | pub fn build(b: *std.Build) void {
7 | // Standard target options allows the person running `zig build` to choose
8 | // what target to build for. Here we do not override the defaults, which
9 | // means any target is allowed, and the default is native. Other options
10 | // for restricting supported target set are available.
11 | const target = b.standardTargetOptions(.{});
12 |
13 | // Standard optimization options allow the person running `zig build` to select
14 | // between Debug, ReleaseSafe, ReleaseFast, and ReleaseSmall. Here we do not
15 | // set a preferred release mode, allowing the user to decide how to optimize.
16 | const optimize = b.standardOptimizeOption(.{});
17 |
18 | const mod_exe = b.createModule(.{
19 | .root_source_file = b.path("src/main.zig"),
20 | .target = target,
21 | .optimize = optimize,
22 | });
23 |
24 | const exe = b.addExecutable(.{
25 | .name = "tmp",
26 | .root_module = mod_exe,
27 | });
28 |
29 | const aws_dep = b.dependency("aws", .{
30 | // These are the two arguments to the dependency. It expects a target and optimization level.
31 | .target = target,
32 | .optimize = optimize,
33 | });
34 | const aws_module = aws_dep.module("aws");
35 | exe.root_module.addImport("aws", aws_module);
36 | // This declares intent for the executable to be installed into the
37 | // standard location when the user invokes the "install" step (the default
38 | // step when running `zig build`).
39 | b.installArtifact(exe);
40 |
41 | // This *creates* a Run step in the build graph, to be executed when another
42 | // step is evaluated that depends on it. The next line below will establish
43 | // such a dependency.
44 | const run_cmd = b.addRunArtifact(exe);
45 |
46 | // By making the run step depend on the install step, it will be run from the
47 | // installation directory rather than directly from within the cache directory.
48 | // This is not necessary, however, if the application depends on other installed
49 | // files, this ensures they will be present and in the expected location.
50 | run_cmd.step.dependOn(b.getInstallStep());
51 |
52 | // This allows the user to pass arguments to the application in the build
53 | // command itself, like this: `zig build run -- arg1 arg2 etc`
54 | if (b.args) |args| {
55 | run_cmd.addArgs(args);
56 | }
57 |
58 | // This creates a build step. It will be visible in the `zig build --help` menu,
59 | // and can be selected like this: `zig build run`
60 | // This will evaluate the `run` step rather than the default, which is "install".
61 | const run_step = b.step("run", "Run the app");
62 | run_step.dependOn(&run_cmd.step);
63 |
64 | const mod_unit_tests = b.createModule(.{
65 | .root_source_file = b.path("src/main.zig"),
66 | .target = target,
67 | .optimize = optimize,
68 | });
69 | // Creates a step for unit testing. This only builds the test executable
70 | // but does not run it.
71 | const unit_tests = b.addTest(.{
72 | .root_module = mod_unit_tests,
73 | });
74 |
75 | const run_unit_tests = b.addRunArtifact(unit_tests);
76 |
77 | // Similar to creating the run step earlier, this exposes a `test` step to
78 | // the `zig build --help` menu, providing a way for the user to request
79 | // running the unit tests.
80 | const test_step = b.step("test", "Run unit tests");
81 | test_step.dependOn(&run_unit_tests.step);
82 | }
83 |
--------------------------------------------------------------------------------
/.gitea/workflows/zig-nightly.yaml:
--------------------------------------------------------------------------------
1 | name: aws-zig nightly build
2 | on:
3 | workflow_dispatch:
4 | schedule:
5 | - cron: '30 12 * * *' # 12:30 UTC, 4:30AM Pacific
6 | push:
7 | branches:
8 | - 'zig-develop'
9 | env:
10 | PKG_PREFIX: nightly-zig
11 | jobs:
12 | build-zig-nightly:
13 | container:
14 | # We need CAP_SYS_PTRACE for stack traces due to a regression in 0.14.0
15 | # TODO: Remove this after https://github.com/ziglang/zig/issues/21815 is
16 | # addressed
17 | options: --cap-add CAP_SYS_PTRACE
18 | runs-on: ubuntu-latest
19 | # Need to use the default container with node and all that, so we can
20 | # use JS-based actions like actions/checkout@v3...
21 | # container:
22 | # image: alpine:3.15.0
23 | steps:
24 | - name: Check out repository code
25 | uses: actions/checkout@v4
26 | with:
27 | ref: zig-develop
28 | - name: Setup Zig
29 | uses: https://github.com/mlugg/setup-zig@v2.0.5
30 | with:
31 | version: master
32 | - name: Run smoke test
33 | run: zig build smoke-test --verbose
34 | - name: Run full tests
35 | run: zig build test --verbose --summary all
36 | - name: Run tests (release mode)
37 | run: zig build test -Doptimize=ReleaseSafe --verbose
38 | # Zig package manager expects everything to be inside a directory in the archive,
39 | # which it then strips out on download. So we need to shove everything inside a directory
40 | # the way GitHub/Gitea does for repo archives
41 | #
42 | # Also, zig tar process doesn't handle gnu format for long names, nor does it seam to
43 | # handle posix long name semantics cleanly either. ustar works. This
44 | # should be using git archive, but we need our generated code to be part of it
45 | - name: Package source code with generated models
46 | run: |
47 | tar -czf ${{ runner.temp }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz \
48 | --format ustar \
49 | --exclude 'zig-*' \
50 | --transform 's,^,${{ github.sha }}/,' *
51 | # - name: Sign
52 | # id: sign
53 | # uses: https://git.lerch.org/lobo/action-hsm-sign@v1
54 | # with:
55 | # pin: ${{ secrets.HSM_USER_PIN }}
56 | # files: ???
57 | # public_key: 'https://emil.lerch.org/serverpublic.pem'
58 | # - run: |
59 | # echo "Source 0 should be ./bar: ${{ steps.sign.outputs.SOURCE_0 }}"
60 | # - run: |
61 | # echo "Signature 0 should be ./bar.sig: ${{ steps.sign.outputs.SIG_0 }}"
62 | # - run: echo "URL of bar (0) is ${{ steps.sign.outputs.URL_0 }}"
63 | # - run: |
64 | # echo "Source 1 should be ./foo: ${{ steps.sign.outputs.SOURCE_1 }}"
65 | # - run: |
66 | # echo "Signature 1 should be ./foo.sig: ${{ steps.sign.outputs.SIG_1 }}"
67 | # - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
68 | - name: Publish source code with generated models
69 | run: |
70 | curl -s --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
71 | --upload-file ${{ runner.temp }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz \
72 | https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz
73 | - name: Build example
74 | run: ( cd example && zig build ) # Make sure example builds
75 | - name: Notify
76 | uses: https://git.lerch.org/lobo/action-notify-ntfy@v2
77 | if: always()
78 | with:
79 | host: ${{ secrets.NTFY_HOST }}
80 | topic: ${{ secrets.NTFY_TOPIC }}
81 | user: ${{ secrets.NTFY_USER }}
82 | password: ${{ secrets.NTFY_PASSWORD }}
83 |
--------------------------------------------------------------------------------
/.gitea/workflows/zig-mach.yaml:
--------------------------------------------------------------------------------
1 | name: aws-zig mach nominated build
2 | on:
3 | workflow_dispatch:
4 | # schedule:
5 | # - cron: '0 12 * * *' # noon UTC, 4AM Pacific
6 | push:
7 | branches:
8 | - 'zig-mach'
9 | env:
10 | PKG_PREFIX: nominated-zig
11 | jobs:
12 | build-zig-nominated-mach-latest:
13 | container:
14 | # We need CAP_SYS_PTRACE for stack traces due to a regression in 0.14.0
15 | # TODO: Remove this after https://github.com/ziglang/zig/issues/21815 is
16 | # addressed
17 | options: --cap-add CAP_SYS_PTRACE
18 | runs-on: ubuntu-latest
19 | # Need to use the default container with node and all that, so we can
20 | # use JS-based actions like actions/checkout@v3...
21 | # container:
22 | # image: alpine:3.15.0
23 | steps:
24 | - name: Check out repository code
25 | uses: actions/checkout@v4
26 | with:
27 | ref: zig-mach
28 | - name: Setup Zig
29 | uses: https://github.com/mlugg/setup-zig@v2.0.1
30 | with:
31 | version: mach-latest
32 | - name: Restore Zig caches
33 | uses: https://github.com/Hanaasagi/zig-action-cache@3954aae427f8b05914e08dfd79f15e1f2e435929
34 | - name: Run gen
35 | run: zig build gen --verbose
36 | - name: Run smoke test
37 | run: zig build smoke-test --verbose
38 | - name: Run full tests
39 | run: zig build test --verbose --summary all
40 | # TODO: Zig mach currently tracking behind zig 0.14.0 branch - enable this test after update
41 | # - name: Run tests (release mode)
42 | # run: zig build test -Doptimize=ReleaseSafe --verbose
43 | # Zig package manager expects everything to be inside a directory in the archive,
44 | # which it then strips out on download. So we need to shove everything inside a directory
45 | # the way GitHub/Gitea does for repo archives
46 | #
47 | # Also, zig tar process doesn't handle gnu format for long names, nor does it seam to
48 | # handle posix long name semantics cleanly either. ustar works. This
49 | # should be using git archive, but we need our generated code to be part of it
50 | - name: Package source code with generated models
51 | run: |
52 | tar -czf ${{ runner.temp }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz \
53 | --format ustar \
54 | --exclude 'zig-*' \
55 | --transform 's,^,${{ github.sha }}/,' *
56 | # - name: Sign
57 | # id: sign
58 | # uses: https://git.lerch.org/lobo/action-hsm-sign@v1
59 | # with:
60 | # pin: ${{ secrets.HSM_USER_PIN }}
61 | # files: ???
62 | # public_key: 'https://emil.lerch.org/serverpublic.pem'
63 | # - run: |
64 | # echo "Source 0 should be ./bar: ${{ steps.sign.outputs.SOURCE_0 }}"
65 | # - run: |
66 | # echo "Signature 0 should be ./bar.sig: ${{ steps.sign.outputs.SIG_0 }}"
67 | # - run: echo "URL of bar (0) is ${{ steps.sign.outputs.URL_0 }}"
68 | # - run: |
69 | # echo "Source 1 should be ./foo: ${{ steps.sign.outputs.SOURCE_1 }}"
70 | # - run: |
71 | # echo "Signature 1 should be ./foo.sig: ${{ steps.sign.outputs.SIG_1 }}"
72 | # - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
73 | - name: Publish source code with generated models
74 | run: |
75 | curl -s --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
76 | --upload-file ${{ runner.temp }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz \
77 | https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}${{ env.PKG_PREFIX }}-with-models.tar.gz
78 | - name: Build example
79 | run: ( cd example && zig build ) # Make sure example builds
80 | - name: Notify
81 | uses: https://git.lerch.org/lobo/action-notify-ntfy@v2
82 | if: always()
83 | with:
84 | host: ${{ secrets.NTFY_HOST }}
85 | topic: ${{ secrets.NTFY_TOPIC }}
86 | user: ${{ secrets.NTFY_USER }}
87 | password: ${{ secrets.NTFY_PASSWORD }}
88 |
--------------------------------------------------------------------------------
/src/test_ec2_query_no_input.response:
--------------------------------------------------------------------------------
1 |
2 |
3 | 4cdbdd69-800c-49b5-8474-ae4c17709782
4 |
5 | -
6 | ap-south-1
7 | ec2.ap-south-1.amazonaws.com
8 | opt-in-not-required
9 |
10 | -
11 | eu-north-1
12 | ec2.eu-north-1.amazonaws.com
13 | opt-in-not-required
14 |
15 | -
16 | eu-west-3
17 | ec2.eu-west-3.amazonaws.com
18 | opt-in-not-required
19 |
20 | -
21 | eu-west-2
22 | ec2.eu-west-2.amazonaws.com
23 | opt-in-not-required
24 |
25 | -
26 | eu-west-1
27 | ec2.eu-west-1.amazonaws.com
28 | opt-in-not-required
29 |
30 | -
31 | ap-northeast-3
32 | ec2.ap-northeast-3.amazonaws.com
33 | opt-in-not-required
34 |
35 | -
36 | ap-northeast-2
37 | ec2.ap-northeast-2.amazonaws.com
38 | opt-in-not-required
39 |
40 | -
41 | ap-northeast-1
42 | ec2.ap-northeast-1.amazonaws.com
43 | opt-in-not-required
44 |
45 | -
46 | ca-central-1
47 | ec2.ca-central-1.amazonaws.com
48 | opt-in-not-required
49 |
50 | -
51 | sa-east-1
52 | ec2.sa-east-1.amazonaws.com
53 | opt-in-not-required
54 |
55 | -
56 | ap-southeast-1
57 | ec2.ap-southeast-1.amazonaws.com
58 | opt-in-not-required
59 |
60 | -
61 | ap-southeast-2
62 | ec2.ap-southeast-2.amazonaws.com
63 | opt-in-not-required
64 |
65 | -
66 | eu-central-1
67 | ec2.eu-central-1.amazonaws.com
68 | opt-in-not-required
69 |
70 | -
71 | us-east-1
72 | ec2.us-east-1.amazonaws.com
73 | opt-in-not-required
74 |
75 | -
76 | us-east-2
77 | ec2.us-east-2.amazonaws.com
78 | opt-in-not-required
79 |
80 | -
81 | us-west-1
82 | ec2.us-west-1.amazonaws.com
83 | opt-in-not-required
84 |
85 | -
86 | us-west-2
87 | ec2.us-west-2.amazonaws.com
88 | opt-in-not-required
89 |
90 |
91 |
92 |
--------------------------------------------------------------------------------
/.gitea/workflows/zig-previous.yaml:
--------------------------------------------------------------------------------
1 | name: AWS-Zig Build
2 | on:
3 | workflow_dispatch:
4 | push:
5 | branches:
6 | - 'zig-0.14.x'
7 | env:
8 | ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
9 | ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
10 | jobs:
11 | build-zig-amd64-host:
12 | runs-on: ubuntu-latest
13 | # Need to use the default container with node and all that, so we can
14 | # use JS-based actions like actions/checkout@v3...
15 | # container:
16 | # image: alpine:3.15.0
17 | steps:
18 | - name: Check out repository code
19 | uses: actions/checkout@v4
20 | with:
21 | ref: zig-0.14.x
22 | - name: Setup Zig
23 | uses: https://github.com/mlugg/setup-zig@v2.0.1
24 | with:
25 | version: 0.14.0
26 | - name: Run smoke test
27 | run: zig build smoke-test --verbose
28 | - name: Run full tests
29 | run: zig build test --verbose --summary all
30 | # Release mode fix not backported to 0.13.0 code
31 | #- name: Run tests (release mode)
32 | # run: zig build test -Doptimize=ReleaseSafe --verbose
33 | # Zig build scripts don't have the ability to import depenedencies directly
34 | # (https://github.com/ziglang/zig/issues/18164). We can allow downstream
35 | # build scripts to import aws with a few tweaks, but we can't @import("src/aws.zig")
36 | # until we have our models built. So we have to have the build script
37 | # basically modified, only during packaging, to allow this use case
38 | #
39 | # Zig package manager expects everything to be inside a directory in the archive,
40 | # which it then strips out on download. So we need to shove everything inside a directory
41 | # the way GitHub/Gitea does for repo archives
42 | #
43 | # Also, zig tar process doesn't handle gnu format for long names, nor does it seam to
44 | # handle posix long name semantics cleanly either. ustar works. This
45 | # should be using git archive, but we need our generated code to be part of it
46 | - name: Package source code with generated models
47 | run: |
48 | sed -i 's#// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //##' build.zig
49 | tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
50 | --format ustar \
51 | --exclude 'zig-*' \
52 | *
53 | # Something in this PR broke this transform. I don't mind removing it, but
54 | # the PR attempts to handle situations with or without a prefix, but it
55 | # doesn't. I have not yet determined what the problem is, though
56 | # https://github.com/ziglang/zig/pull/19111/files
57 | # --transform 's,^,${{ github.sha }}/,' *
58 | # - name: Sign
59 | # id: sign
60 | # uses: https://git.lerch.org/lobo/action-hsm-sign@v1
61 | # with:
62 | # pin: ${{ secrets.HSM_USER_PIN }}
63 | # files: ???
64 | # public_key: 'https://emil.lerch.org/serverpublic.pem'
65 | # - run: |
66 | # echo "Source 0 should be ./bar: ${{ steps.sign.outputs.SOURCE_0 }}"
67 | # - run: |
68 | # echo "Signature 0 should be ./bar.sig: ${{ steps.sign.outputs.SIG_0 }}"
69 | # - run: echo "URL of bar (0) is ${{ steps.sign.outputs.URL_0 }}"
70 | # - run: |
71 | # echo "Source 1 should be ./foo: ${{ steps.sign.outputs.SOURCE_1 }}"
72 | # - run: |
73 | # echo "Signature 1 should be ./foo.sig: ${{ steps.sign.outputs.SIG_1 }}"
74 | # - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
75 | - name: Publish source code with generated models
76 | run: |
77 | curl -s --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
78 | --upload-file ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
79 | https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}-with-models.tar.gz
80 | - name: Build example
81 | run: ( cd example && zig build ) # Make sure example builds
82 | - name: Notify
83 | uses: https://git.lerch.org/lobo/action-notify-ntfy@v2
84 | if: always()
85 | with:
86 | host: ${{ secrets.NTFY_HOST }}
87 | topic: ${{ secrets.NTFY_TOPIC }}
88 | user: ${{ secrets.NTFY_USER }}
89 | password: ${{ secrets.NTFY_PASSWORD }}
90 |
--------------------------------------------------------------------------------
/.gitea/workflows/build.yaml:
--------------------------------------------------------------------------------
1 | name: AWS-Zig Build
2 | on:
3 | workflow_dispatch:
4 | push:
5 | branches:
6 | - 'master'
7 | env:
8 | ACTIONS_RUNTIME_TOKEN: ${{ secrets.GITHUB_TOKEN }}
9 | ACTIONS_RUNTIME_URL: ${{ env.GITHUB_SERVER_URL }}/api/actions_pipeline/
10 | jobs:
11 | build-zig-amd64-host:
12 | runs-on: ubuntu-latest
13 | # Need to use the default container with node and all that, so we can
14 | # use JS-based actions like actions/checkout@v3...
15 | # container:
16 | # image: alpine:3.15.0
17 | steps:
18 | - name: Check out repository code
19 | uses: actions/checkout@v4
20 | - name: Setup Zig
21 | uses: https://github.com/mlugg/setup-zig@v2.0.5
22 | # We will let setup-zig use minimum_zig_version from build.zig.zon
23 | # setup-zig also sets up the zig cache appropriately
24 | - name: Ulimit
25 | run: ulimit -a
26 | - name: Run smoke test
27 | run: zig build smoke-test --verbose
28 | - name: Run tests
29 | run: zig build test --verbose --summary all
30 | - name: Run tests (release mode)
31 | run: zig build test -Doptimize=ReleaseSafe --verbose
32 | # Zig build scripts don't have the ability to import depenedencies directly
33 | # (https://github.com/ziglang/zig/issues/18164). We can allow downstream
34 | # build scripts to import aws with a few tweaks, but we can't @import("src/aws.zig")
35 | # until we have our models built. So we have to have the build script
36 | # basically modified, only during packaging, to allow this use case
37 | #
38 | # Zig package manager expects everything to be inside a directory in the archive,
39 | # which it then strips out on download. So we need to shove everything inside a directory
40 | # the way GitHub/Gitea does for repo archives
41 | #
42 | # Also, zig tar process doesn't handle gnu format for long names, nor does it seam to
43 | # handle posix long name semantics cleanly either. ustar works. This
44 | # should be using git archive, but we need our generated code to be part of it
45 | - name: Package source code with generated models
46 | run: |
47 | sed -i 's#// UNCOMMENT AFTER MODEL GEN TO USE IN BUILD SCRIPTS //##' build.zig
48 | tar -czf ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
49 | --format ustar \
50 | --exclude 'zig-*' \
51 | *
52 | # Something in this PR broke this transform. I don't mind removing it, but
53 | # the PR attempts to handle situations with or without a prefix, but it
54 | # doesn't. I have not yet determined what the problem is, though
55 | # https://github.com/ziglang/zig/pull/19111/files
56 | # --transform 's,^,${{ github.sha }}/,' *
57 | # - name: Sign
58 | # id: sign
59 | # uses: https://git.lerch.org/lobo/action-hsm-sign@v1
60 | # with:
61 | # pin: ${{ secrets.HSM_USER_PIN }}
62 | # files: ???
63 | # public_key: 'https://emil.lerch.org/serverpublic.pem'
64 | # - run: |
65 | # echo "Source 0 should be ./bar: ${{ steps.sign.outputs.SOURCE_0 }}"
66 | # - run: |
67 | # echo "Signature 0 should be ./bar.sig: ${{ steps.sign.outputs.SIG_0 }}"
68 | # - run: echo "URL of bar (0) is ${{ steps.sign.outputs.URL_0 }}"
69 | # - run: |
70 | # echo "Source 1 should be ./foo: ${{ steps.sign.outputs.SOURCE_1 }}"
71 | # - run: |
72 | # echo "Signature 1 should be ./foo.sig: ${{ steps.sign.outputs.SIG_1 }}"
73 | # - run: echo "URL of foo (1) is ${{ steps.sign.outputs.URL_1 }}"
74 | - name: Publish source code with generated models
75 | run: |
76 | curl -s --user ${{ github.actor }}:${{ secrets.PACKAGE_PUSH }} \
77 | --upload-file ${{ runner.temp }}/${{ github.sha }}-with-models.tar.gz \
78 | https://git.lerch.org/api/packages/lobo/generic/aws-sdk-with-models/${{ github.sha }}/${{ github.sha }}-with-models.tar.gz
79 | - name: Build example
80 | run: ( cd example && zig build ) # Make sure example builds
81 | - name: Notify
82 | uses: https://git.lerch.org/lobo/action-notify-ntfy@v2
83 | if: always()
84 | with:
85 | host: ${{ secrets.NTFY_HOST }}
86 | topic: ${{ secrets.NTFY_TOPIC }}
87 | user: ${{ secrets.NTFY_USER }}
88 | password: ${{ secrets.NTFY_PASSWORD }}
89 |
--------------------------------------------------------------------------------
/lib/date/src/parsing.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const log = std.log.scoped(.date);
3 | const zeit = @import("zeit");
4 |
5 | pub const DateTime = struct {
6 | day: u8,
7 | month: u8,
8 | year: u16,
9 | hour: u8,
10 | minute: u8,
11 | second: u8,
12 |
13 | pub fn fromInstant(val: zeit.Instant) DateTime {
14 | return fromTime(val.time());
15 | }
16 |
17 | pub fn fromTime(val: zeit.Time) DateTime {
18 | return DateTime{
19 | .day = val.day,
20 | .month = @intFromEnum(val.month),
21 | .year = @intCast(val.year),
22 | .hour = val.hour,
23 | .minute = val.minute,
24 | .second = val.second,
25 | };
26 | }
27 |
28 | pub fn time(self: DateTime) zeit.Time {
29 | return zeit.Time{
30 | .day = @intCast(self.day),
31 | .month = @enumFromInt(self.month),
32 | .year = self.year,
33 | .hour = @intCast(self.hour),
34 | .minute = @intCast(self.minute),
35 | .second = @intCast(self.second),
36 | };
37 | }
38 |
39 | pub fn instant(self: DateTime) !zeit.Instant {
40 | return try zeit.instant(.{ .source = .{ .time = self.time() } });
41 | }
42 | };
43 |
44 | pub fn timestampToDateTime(timestamp: zeit.Seconds) DateTime {
45 | const ins = zeit.instant(.{ .source = .{ .unix_timestamp = timestamp } }) catch @panic("Failed to create instant from timestamp");
46 | return DateTime.fromInstant(ins);
47 | }
48 |
49 | pub fn parseEnglishToTimestamp(data: []const u8) !i64 {
50 | return try dateTimeToTimestamp(try parseEnglishToDateTime(data));
51 | }
52 |
53 | /// Converts a string to a timestamp value. May not handle dates before the
54 | /// epoch. Dates should look like "Fri, 03 Jun 2022 18:12:36 GMT"
55 | pub fn parseEnglishToDateTime(data: []const u8) !DateTime {
56 | const ins = try zeit.instant(.{ .source = .{ .rfc1123 = data } });
57 | return DateTime.fromInstant(ins);
58 | }
59 |
60 | pub fn parseIso8601ToTimestamp(data: []const u8) !i64 {
61 | return try dateTimeToTimestamp(try parseIso8601ToDateTime(data));
62 | }
63 |
64 | /// Converts a string to a timestamp value. May not handle dates before the
65 | /// epoch
66 | pub fn parseIso8601ToDateTime(data: []const u8) !DateTime {
67 | const ins = try zeit.instant(.{ .source = .{ .iso8601 = data } });
68 | return DateTime.fromInstant(ins);
69 | }
70 |
71 | pub fn dateTimeToTimestamp(datetime: DateTime) !zeit.Seconds {
72 | return (try datetime.instant()).unixTimestamp();
73 | }
74 |
75 | fn printDateTime(dt: DateTime) void {
76 | log.debug("{:0>4}-{:0>2}-{:0>2}T{:0>2}:{:0>2}:{:0<2}Z", .{
77 | dt.year,
78 | dt.month,
79 | dt.day,
80 | dt.hour,
81 | dt.minute,
82 | dt.second,
83 | });
84 | }
85 |
86 | pub fn printNowUtc() void {
87 | printDateTime(timestampToDateTime(std.time.timestamp()));
88 | }
89 |
90 | test "Convert timestamp to datetime" {
91 | printDateTime(timestampToDateTime(std.time.timestamp()));
92 | try std.testing.expectEqual(DateTime{ .year = 2020, .month = 8, .day = 28, .hour = 9, .minute = 32, .second = 27 }, timestampToDateTime(1598607147));
93 |
94 | try std.testing.expectEqual(DateTime{ .year = 2020, .month = 11, .day = 1, .hour = 5, .minute = 6, .second = 7 }, timestampToDateTime(1604207167));
95 | // Get time for date: https://wtools.io/convert-date-time-to-unix-time
96 | try std.testing.expectEqual(DateTime{ .year = 2015, .month = 8, .day = 30, .hour = 12, .minute = 36, .second = 0 }, timestampToDateTime(1440938160));
97 | }
98 |
99 | test "Convert datetime to timestamp" {
100 | try std.testing.expectEqual(@as(i64, 1598607147), try dateTimeToTimestamp(DateTime{ .year = 2020, .month = 8, .day = 28, .hour = 9, .minute = 32, .second = 27 }));
101 | try std.testing.expectEqual(@as(i64, 1604207167), try dateTimeToTimestamp(DateTime{ .year = 2020, .month = 11, .day = 1, .hour = 5, .minute = 6, .second = 7 }));
102 | try std.testing.expectEqual(@as(i64, 1440938160), try dateTimeToTimestamp(DateTime{ .year = 2015, .month = 8, .day = 30, .hour = 12, .minute = 36, .second = 0 }));
103 | }
104 |
105 | test "Convert ISO8601 string to timestamp" {
106 | try std.testing.expectEqual(DateTime{ .year = 2020, .month = 8, .day = 28, .hour = 9, .minute = 32, .second = 27 }, try parseIso8601ToDateTime("20200828T093227"));
107 | try std.testing.expectEqual(DateTime{ .year = 2020, .month = 8, .day = 28, .hour = 9, .minute = 32, .second = 27 }, try parseIso8601ToDateTime("2020-08-28T9:32:27Z"));
108 | try std.testing.expectEqual(DateTime{ .year = 2020, .month = 11, .day = 1, .hour = 5, .minute = 6, .second = 7 }, try parseIso8601ToDateTime("2020-11-01T5:06:7Z"));
109 | try std.testing.expectEqual(DateTime{ .year = 2015, .month = 8, .day = 30, .hour = 12, .minute = 36, .second = 0 }, try parseIso8601ToDateTime("2015-08-30T12:36:00.000Z"));
110 | }
111 | test "Convert datetime to timestamp before 1970" {
112 | try std.testing.expectEqual(@as(i64, -449392815), try dateTimeToTimestamp(DateTime{ .year = 1955, .month = 10, .day = 5, .hour = 16, .minute = 39, .second = 45 }));
113 | }
114 |
115 | test "Convert whatever AWS is sending us to timestamp" {
116 | const string_date = "Fri, 03 Jun 2022 18:12:36 GMT";
117 | try std.testing.expectEqual(DateTime{ .year = 2022, .month = 6, .day = 3, .hour = 18, .minute = 12, .second = 36 }, try parseEnglishToDateTime(string_date));
118 | }
119 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | AWS SDK for Zig
2 | ===============
3 |
4 | [Zig 0.15.1](https://ziglang.org/download/#release-0.15.1):
5 |
6 | [](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=build.yaml&state=closed)
7 |
8 | [Nightly Zig](https://ziglang.org/download/):
9 |
10 | [](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-nightly.yaml&state=closed)
11 |
12 | [Zig 0.14.1](https://ziglang.org/download/#release-0.14.1):
13 |
14 | [](https://git.lerch.org/lobo/aws-sdk-for-zig/actions?workflow=zig-previous.yaml&state=closed)
15 |
16 | Current executable size for the demo is 980k after compiling with -Doptimize=ReleaseSmall
17 | in x86_64-linux, and will vary based on services used. Tested targets:
18 |
19 | * x86_64-linux
20 | * riscv64-linux
21 | * aarch64-linux
22 | * x86_64-windows
23 | * arm-linux
24 | * aarch64-macos
25 | * x86_64-macos
26 |
27 | Tested targets are built, but not continuously tested, by CI.
28 |
29 | Branches
30 | --------
31 |
32 | * **zig-develop**: This branch tracks zig nightly, and is used mainly as a canary
33 | for breaking changes that will need to be dealt with when
34 | a new zig release appears. Expect significant delays in any
35 | build failures (PRs always welcome!).
36 | * **master**: This branch tracks the latest released zig version
37 | * **zig-0.14.x**: This branch tracks the 0.14/0.14.1 released zig versions.
38 | Support for these previous version is best effort, generally
39 | degrading over time. Fixes will generally appear in master, then
40 | backported into the previous version.
41 |
42 | Other branches/tags exist but are unsupported
43 |
44 | Building
45 | --------
46 |
47 | `zig build` should work. It will build the code generation project, fetch model
48 | files from upstream AWS Go SDK v2, run the code generation, then build the main
49 | project with the generated code. Testing can be done with `zig build test`. Note that
50 | this command tests on all supported architectures, so for a faster testing
51 | process, use `zig build smoke-test` instead.
52 |
53 | To make development even faster, a build option is provided to avoid the use of
54 | LLVM. To use this, use the command `zig build -Dno-llvm smoke-test`. This
55 | can reduce build/test time 300%. Note, however, native code generation in zig
56 | is not yet complete, so you may see errors.
57 |
58 | Using
59 | -----
60 |
61 | This is designed for use with the Zig package manager, and exposes a module
62 | called "aws". Set up `build.zig.zon` and add the dependency/module to your project
63 | as normal and the package manager should do its thing. A full example can be found
64 | in [/example](example/build.zig.zon). This can also be used at build time in
65 | a downstream project's `build.zig`.
66 |
67 | Configuring the module and/or Running the demo
68 | ----------------------------------------------
69 |
70 | This library mimics the aws c libraries for it's work, so it operates like most
71 | other 'AWS things'. [/src/main.zig](src/main.zig) gives you a handful of examples
72 | for working with services. For local testing or alternative endpoints, there's
73 | no real standard, so there is code to look for an environment variable
74 | `AWS_ENDPOINT_URL` variable that will supersede all other configuration.
75 |
76 | Limitations
77 | -----------
78 |
79 | WebIdentityToken is not yet implemented.
80 |
81 | TODO List:
82 |
83 | * Json parsing is based on a fork of the 0.9.0 (maybe earlier?) json parser.
84 | This needs a re-visit. Note also that a json.zig file is embedded/copied
85 | from the codegen project, so that also needs a second look.
86 | * Take a look to see about compilation speed. With codegen caching this is
87 | reasonable, but still takes longer than needed.
88 | * Upgrade the model files. This is a simple tasks, but I'd like the first
89 | item on this list to be completed first.
90 | * Implement sigv4a signing
91 | * Implement jitter/exponential backoff
92 | * Implement timeouts and other TODO's in the code
93 | * Add option to cache signature keys
94 | * Add CBOR support
95 |
96 | Dependency tree
97 | ---------------
98 |
99 | No dependencies:
100 | * aws_authentication: base structure for credentials (only one type)
101 | * aws_http_base: contains basic structures for http requests/results
102 | * case: provides functions to change casing
103 | * date: provides limited date manipulation functions
104 | * json: custom version of earlier stdlib json parser
105 | * xml: custom xml parser library
106 | * url: custom url encoding
107 |
108 | aws_credentials: Allows credential handling
109 | aws_authentication
110 |
111 | aws_http:
112 | aws_http_base
113 | aws_signing
114 |
115 | aws_signing: handles signing of http requests
116 | aws_http_base
117 | aws_authentication
118 | date
119 |
120 | aws: main usage point for libraries
121 | aws_http
122 | json
123 | url
124 | case
125 | date
126 | servicemodel
127 | xml_shaper
128 | aws_credentials
129 | aws_authentication
130 |
131 | main: main entrypoint for demo executable
132 | aws
133 |
134 | servicemodel: Provides access to all aws service generated models
135 | all generated model files
136 |
137 | xml_shaper: Manages interface from xml to in memory structures
138 | xml
139 | date
140 |
--------------------------------------------------------------------------------
/codegen/src/Hasher.zig:
--------------------------------------------------------------------------------
1 | const builtin = @import("builtin");
2 | const std = @import("std");
3 | const Hash = std.crypto.hash.sha2.Sha256;
4 |
5 | pub const HashedFile = struct {
6 | fs_path: []const u8,
7 | normalized_path: []const u8,
8 | hash: [Hash.digest_length]u8,
9 | failure: Error!void,
10 |
11 | const Error = std.fs.File.OpenError || std.fs.File.ReadError || std.fs.File.StatError;
12 |
13 | fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool {
14 | _ = context;
15 | return std.mem.lessThan(u8, lhs.normalized_path, rhs.normalized_path);
16 | }
17 | };
18 |
19 | const multihash_len = 1 + 1 + Hash.digest_length;
20 | pub const hex_multihash_len = 2 * multihash_len;
21 | pub const digest_len = Hash.digest_length;
22 |
23 | const MultihashFunction = enum(u16) {
24 | identity = 0x00,
25 | sha1 = 0x11,
26 | @"sha2-256" = 0x12,
27 | @"sha2-512" = 0x13,
28 | @"sha3-512" = 0x14,
29 | @"sha3-384" = 0x15,
30 | @"sha3-256" = 0x16,
31 | @"sha3-224" = 0x17,
32 | @"sha2-384" = 0x20,
33 | @"sha2-256-trunc254-padded" = 0x1012,
34 | @"sha2-224" = 0x1013,
35 | @"sha2-512-224" = 0x1014,
36 | @"sha2-512-256" = 0x1015,
37 | @"blake2b-256" = 0xb220,
38 | _,
39 | };
40 |
41 | const multihash_function: MultihashFunction = switch (Hash) {
42 | std.crypto.hash.sha2.Sha256 => .@"sha2-256",
43 | else => @compileError("unreachable"),
44 | };
45 | comptime {
46 | // We avoid unnecessary uleb128 code in hexDigest by asserting here the
47 | // values are small enough to be contained in the one-byte encoding.
48 | std.debug.assert(@intFromEnum(multihash_function) < 127);
49 | std.debug.assert(Hash.digest_length < 127);
50 | }
51 | const hex_charset = "0123456789abcdef";
52 |
53 | pub fn hexDigest(digest: [Hash.digest_length]u8) [multihash_len * 2]u8 {
54 | var result: [multihash_len * 2]u8 = undefined;
55 |
56 | result[0] = hex_charset[@intFromEnum(multihash_function) >> 4];
57 | result[1] = hex_charset[@intFromEnum(multihash_function) & 15];
58 |
59 | result[2] = hex_charset[Hash.digest_length >> 4];
60 | result[3] = hex_charset[Hash.digest_length & 15];
61 |
62 | for (digest, 0..) |byte, i| {
63 | result[4 + i * 2] = hex_charset[byte >> 4];
64 | result[5 + i * 2] = hex_charset[byte & 15];
65 | }
66 | return result;
67 | }
68 | pub fn hex64(x: u64) [16]u8 {
69 | var result: [16]u8 = undefined;
70 | var i: usize = 0;
71 | while (i < 8) : (i += 1) {
72 | const byte: u8 = @truncate(x >> @as(u6, @intCast(8 * i)));
73 | result[i * 2 + 0] = hex_charset[byte >> 4];
74 | result[i * 2 + 1] = hex_charset[byte & 15];
75 | }
76 | return result;
77 | }
78 |
79 | pub const walkerFn = *const fn (std.fs.Dir.Walker.Entry) bool;
80 |
81 | fn included(entry: std.fs.Dir.Walker.Entry) bool {
82 | _ = entry;
83 | return true;
84 | }
85 | fn excluded(entry: std.fs.Dir.Walker.Entry) bool {
86 | _ = entry;
87 | return false;
88 | }
89 | pub const ComputeDirectoryOptions = struct {
90 | isIncluded: walkerFn = included,
91 | isExcluded: walkerFn = excluded,
92 | fileHashes: []*HashedFile = undefined,
93 | needFileHashes: bool = false,
94 | };
95 |
96 | pub fn computeDirectoryHash(
97 | thread_pool: *std.Thread.Pool,
98 | dir: std.fs.Dir,
99 | options: *ComputeDirectoryOptions,
100 | ) ![Hash.digest_length]u8 {
101 | const gpa = thread_pool.allocator;
102 |
103 | // We'll use an arena allocator for the path name strings since they all
104 | // need to be in memory for sorting.
105 | var arena_instance = std.heap.ArenaAllocator.init(gpa);
106 | defer arena_instance.deinit();
107 | const arena = arena_instance.allocator();
108 |
109 | // Collect all files, recursively, then sort.
110 | // Normally we're looking at around 300 model files
111 | var all_files = try std.ArrayList(*HashedFile).initCapacity(gpa, 300);
112 | defer all_files.deinit(gpa);
113 |
114 | var walker = try dir.walk(gpa);
115 | defer walker.deinit();
116 |
117 | {
118 | // The final hash will be a hash of each file hashed independently. This
119 | // allows hashing in parallel.
120 | var wait_group: std.Thread.WaitGroup = .{};
121 | defer wait_group.wait();
122 |
123 | while (try walker.next()) |entry| {
124 | switch (entry.kind) {
125 | .directory => continue,
126 | .file => {},
127 | else => return error.IllegalFileTypeInPackage,
128 | }
129 | if (options.isExcluded(entry) or !options.isIncluded(entry))
130 | continue;
131 | const alloc = if (options.needFileHashes) gpa else arena;
132 | const hashed_file = try alloc.create(HashedFile);
133 | const fs_path = try alloc.dupe(u8, entry.path);
134 | hashed_file.* = .{
135 | .fs_path = fs_path,
136 | .normalized_path = try normalizePath(alloc, fs_path),
137 | .hash = undefined, // to be populated by the worker
138 | .failure = undefined, // to be populated by the worker
139 | };
140 | wait_group.start();
141 | try thread_pool.spawn(workerHashFile, .{ dir, hashed_file, &wait_group });
142 |
143 | try all_files.append(gpa, hashed_file);
144 | }
145 | }
146 |
147 | std.mem.sort(*HashedFile, all_files.items, {}, HashedFile.lessThan);
148 |
149 | var hasher = Hash.init(.{});
150 | var any_failures = false;
151 | for (all_files.items) |hashed_file| {
152 | hashed_file.failure catch |err| {
153 | any_failures = true;
154 | std.log.err("unable to hash '{s}': {s}", .{ hashed_file.fs_path, @errorName(err) });
155 | };
156 | hasher.update(&hashed_file.hash);
157 | }
158 | if (any_failures) return error.DirectoryHashUnavailable;
159 | if (options.needFileHashes) options.fileHashes = try all_files.toOwnedSlice(gpa);
160 | return hasher.finalResult();
161 | }
162 | fn workerHashFile(dir: std.fs.Dir, hashed_file: *HashedFile, wg: *std.Thread.WaitGroup) void {
163 | defer wg.finish();
164 | hashed_file.failure = hashFileFallible(dir, hashed_file);
165 | }
166 |
167 | fn hashFileFallible(dir: std.fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void {
168 | var buf: [8000]u8 = undefined;
169 | var file = try dir.openFile(hashed_file.fs_path, .{});
170 | defer file.close();
171 | var hasher = Hash.init(.{});
172 | hasher.update(hashed_file.normalized_path);
173 | hasher.update(&.{ 0, @intFromBool(try isExecutable(file)) });
174 | while (true) {
175 | const bytes_read = try file.read(&buf);
176 | if (bytes_read == 0) break;
177 | hasher.update(buf[0..bytes_read]);
178 | }
179 | hasher.final(&hashed_file.hash);
180 | }
181 |
182 | /// Make a file system path identical independently of operating system path inconsistencies.
183 | /// This converts backslashes into forward slashes.
184 | fn normalizePath(arena: std.mem.Allocator, fs_path: []const u8) ![]const u8 {
185 | const canonical_sep = '/';
186 |
187 | if (std.fs.path.sep == canonical_sep)
188 | return fs_path;
189 |
190 | const normalized = try arena.dupe(u8, fs_path);
191 | for (normalized) |*byte| {
192 | switch (byte.*) {
193 | std.fs.path.sep => byte.* = canonical_sep,
194 | else => continue,
195 | }
196 | }
197 | return normalized;
198 | }
199 |
200 | fn isExecutable(file: std.fs.File) !bool {
201 | if (builtin.os.tag == .windows) {
202 | // TODO check the ACL on Windows.
203 | // Until this is implemented, this could be a false negative on
204 | // Windows, which is why we do not yet set executable_bit_only above
205 | // when unpacking the tarball.
206 | return false;
207 | } else {
208 | const stat = try file.stat();
209 | return (stat.mode & std.posix.S.IXUSR) != 0;
210 | }
211 | }
212 |
--------------------------------------------------------------------------------
/src/url.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 |
3 | fn defaultTransformer(allocator: std.mem.Allocator, field_name: []const u8) anyerror![]const u8 {
4 | _ = allocator;
5 | return field_name;
6 | }
7 |
8 | pub const fieldNameTransformerFn = *const fn (std.mem.Allocator, []const u8) anyerror![]const u8;
9 |
10 | pub const EncodingOptions = struct {
11 | field_name_transformer: fieldNameTransformerFn = defaultTransformer,
12 | };
13 |
14 | pub fn encode(allocator: std.mem.Allocator, obj: anytype, writer: *std.Io.Writer, comptime options: EncodingOptions) !void {
15 | _ = try encodeInternal(allocator, "", "", true, obj, writer, options);
16 | }
17 |
18 | fn encodeStruct(
19 | allocator: std.mem.Allocator,
20 | parent: []const u8,
21 | first: bool,
22 | obj: anytype,
23 | writer: *std.Io.Writer,
24 | comptime options: EncodingOptions,
25 | ) !bool {
26 | var rc = first;
27 | var arena = std.heap.ArenaAllocator.init(allocator);
28 | defer arena.deinit();
29 | const arena_alloc = arena.allocator();
30 | inline for (@typeInfo(@TypeOf(obj)).@"struct".fields) |field| {
31 | const field_name = try options.field_name_transformer(arena_alloc, field.name);
32 | // @compileLog(@typeInfo(field.field_type).Pointer);
33 | rc = try encodeInternal(allocator, parent, field_name, rc, @field(obj, field.name), writer, options);
34 | }
35 | return rc;
36 | }
37 |
38 | pub fn encodeInternal(
39 | allocator: std.mem.Allocator,
40 | parent: []const u8,
41 | field_name: []const u8,
42 | first: bool,
43 | obj: anytype,
44 | writer: *std.Io.Writer,
45 | comptime options: EncodingOptions,
46 | ) !bool {
47 | // @compileLog(@typeName(@TypeOf(obj)));
48 | // @compileLog(@typeInfo(@TypeOf(obj)));
49 | var rc = first;
50 | switch (@typeInfo(@TypeOf(obj))) {
51 | .optional => if (obj) |o| {
52 | rc = try encodeInternal(allocator, parent, field_name, first, o, writer, options);
53 | },
54 | .pointer => |ti| if (ti.size == .one) {
55 | rc = try encodeInternal(allocator, parent, field_name, first, obj.*, writer, options);
56 | } else {
57 | if (!first) _ = try writer.write("&");
58 | // @compileLog(@typeInfo(@TypeOf(obj)));
59 | switch (ti.child) {
60 | // TODO: not sure this first one is valid. How should [][]const u8 be serialized here?
61 | []const u8 => {
62 | // if (true) @panic("panic at the disco!");
63 | std.log.warn(
64 | "encoding object of type [][]const u8...pretty sure this is wrong {s}{s}={any}",
65 | .{ parent, field_name, obj },
66 | );
67 | try writer.print("{s}{s}={any}", .{ parent, field_name, obj });
68 | },
69 | u8 => try writer.print("{s}{s}={s}", .{ parent, field_name, obj }),
70 | else => try writer.print("{s}{s}={any}", .{ parent, field_name, obj }),
71 | }
72 | rc = false;
73 | },
74 | .@"struct" => if (std.mem.eql(u8, "", field_name)) {
75 | rc = try encodeStruct(allocator, parent, first, obj, writer, options);
76 | } else {
77 | // TODO: It would be lovely if we could concat at compile time or allocPrint at runtime
78 | // XOR have compile time allocator support. Alas, neither are possible:
79 | // https://github.com/ziglang/zig/issues/868: Comptime detection (feels like foot gun)
80 | // https://github.com/ziglang/zig/issues/1291: Comptime allocator
81 | const new_parent = try std.fmt.allocPrint(allocator, "{s}{s}.", .{ parent, field_name });
82 | defer allocator.free(new_parent);
83 | rc = try encodeStruct(allocator, new_parent, first, obj, writer, options);
84 | // try encodeStruct(parent ++ field_name ++ ".", first, obj, writer, options);
85 | },
86 | .array => {
87 | if (!first) _ = try writer.write("&");
88 | try writer.print("{s}{s}={s}", .{ parent, field_name, obj });
89 | rc = false;
90 | },
91 | .int, .comptime_int, .float, .comptime_float => {
92 | if (!first) _ = try writer.write("&");
93 | try writer.print("{s}{s}={d}", .{ parent, field_name, obj });
94 | rc = false;
95 | },
96 | // BUGS! any doesn't work - a lot. Check this out:
97 | // https://github.com/ziglang/zig/blob/master/lib/std/fmt.zig#L424
98 | else => {
99 | if (!first) _ = try writer.write("&");
100 | try writer.print("{s}{s}={any}", .{ parent, field_name, obj });
101 | rc = false;
102 | },
103 | }
104 | return rc;
105 | }
106 |
107 | test "can urlencode an object" {
108 | const expected = "Action=GetCallerIdentity&Version=2021-01-01";
109 | var aw = std.Io.Writer.Allocating.init(std.testing.allocator);
110 | defer aw.deinit();
111 | try encode(
112 | std.testing.allocator,
113 | .{ .Action = "GetCallerIdentity", .Version = "2021-01-01" },
114 | &aw.writer,
115 | .{},
116 | );
117 | try std.testing.expectEqualStrings(expected, aw.written());
118 | }
119 | test "can urlencode an object with integer" {
120 | const expected = "Action=GetCallerIdentity&Duration=32";
121 | var aw = std.Io.Writer.Allocating.init(std.testing.allocator);
122 | defer aw.deinit();
123 | try encode(
124 | std.testing.allocator,
125 | .{ .Action = "GetCallerIdentity", .Duration = 32 },
126 | &aw.writer,
127 | .{},
128 | );
129 | try std.testing.expectEqualStrings(expected, aw.written());
130 | }
131 | const UnsetValues = struct {
132 | action: ?[]const u8 = null,
133 | duration: ?i64 = null,
134 | val1: ?i64 = null,
135 | val2: ?[]const u8 = null,
136 | };
137 | test "can urlencode an object with unset values" {
138 | const expected = "action=GetCallerIdentity&duration=32";
139 | var aw = std.Io.Writer.Allocating.init(std.testing.allocator);
140 | defer aw.deinit();
141 | try encode(
142 | std.testing.allocator,
143 | UnsetValues{ .action = "GetCallerIdentity", .duration = 32 },
144 | &aw.writer,
145 | .{},
146 | );
147 | try std.testing.expectEqualStrings(expected, aw.written());
148 | }
149 | test "can urlencode a complex object" {
150 | const expected = "Action=GetCallerIdentity&Version=2021-01-01&complex.innermember=foo";
151 | var aw = std.Io.Writer.Allocating.init(std.testing.allocator);
152 | defer aw.deinit();
153 | try encode(
154 | std.testing.allocator,
155 | .{ .Action = "GetCallerIdentity", .Version = "2021-01-01", .complex = .{ .innermember = "foo" } },
156 | &aw.writer,
157 | .{},
158 | );
159 | try std.testing.expectEqualStrings(expected, aw.written());
160 | }
161 |
162 | const Filter = struct {
163 | name: ?[]const u8 = null,
164 | values: ?[][]const u8 = null,
165 |
166 | pub fn fieldNameFor(_: @This(), comptime field_name: []const u8) []const u8 {
167 | const mappings = .{
168 | .name = "Name",
169 | .values = "Value",
170 | };
171 | return @field(mappings, field_name);
172 | }
173 | };
174 |
175 | const Request: type = struct {
176 | filters: ?[]Filter = null,
177 | region_names: ?[][]const u8 = null,
178 | dry_run: ?bool = null,
179 | all_regions: ?bool = null,
180 | };
181 | test "can urlencode an EC2 Filter" {
182 | // TODO: This is a strange test, mainly to document current behavior
183 | // EC2 filters are supposed to be something like
184 | // Filter.Name=foo&Filter.Values=bar or, when there is more, something like
185 | // Filter.1.Name=instance-type&Filter.1.Value.1=m1.small&Filter.1.Value.2=m1.large&Filter.2.Name=block-device-mapping.status&Filter.2.Value.1=attached
186 | //
187 | // This looks like a real PITA, so until it is actually needed, this is
188 | // a placeholder test to track what actual encoding is happening. This
189 | // changed between zig 0.14.x and 0.15.1, and I'm not entirely sure why
190 | // yet, but because the remaining functionality is fine, we're going with
191 | // this
192 | const zig_14x_expected = "filters={ url.Filter{ .name = { 102, 111, 111 }, .values = { { ... } } } }";
193 | _ = zig_14x_expected;
194 | const expected = "filters={ .{ .name = { 102, 111, 111 }, .values = { { ... } } } }";
195 | var aw = std.Io.Writer.Allocating.init(std.testing.allocator);
196 | defer aw.deinit();
197 | try encode(
198 | std.testing.allocator,
199 | Request{
200 | .filters = @constCast(&[_]Filter{.{ .name = "foo", .values = @constCast(&[_][]const u8{"bar"}) }}),
201 | },
202 | &aw.writer,
203 | .{},
204 | );
205 | try std.testing.expectEqualStrings(expected, aw.written());
206 | }
207 |
--------------------------------------------------------------------------------
/codegen/src/serialization/json.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const smithy = @import("smithy");
3 | const smithy_tools = @import("../smithy_tools.zig");
4 | const support = @import("../support.zig");
5 |
6 | const GenerationState = @import("../GenerationState.zig");
7 | const GenerateTypeOptions = @import("../GenerateTypeOptions.zig");
8 | const Allocator = std.mem.Allocator;
9 |
10 | const Shape = smithy_tools.Shape;
11 |
12 | const JsonMember = struct {
13 | field_name: []const u8,
14 | json_key: []const u8,
15 | target: []const u8,
16 | type_member: smithy.TypeMember,
17 | shape_info: smithy.ShapeInfo,
18 | };
19 |
20 | pub fn generateToJsonFunction(shape_id: []const u8, writer: *std.Io.Writer, state: GenerationState, comptime options: GenerateTypeOptions) !void {
21 | _ = options;
22 | const allocator = state.allocator;
23 |
24 | const shape_info = try smithy_tools.getShapeInfo(shape_id, state.file_state.shapes);
25 | const shape = shape_info.shape;
26 |
27 | if (try getJsonMembers(allocator, shape, state)) |json_members| {
28 | if (json_members.items.len > 0) {
29 | try writer.writeAll("pub fn jsonStringify(self: @This(), jw: anytype) !void {\n");
30 | try writer.writeAll("try jw.beginObject();\n");
31 | try writer.writeAll("{\n");
32 |
33 | for (json_members.items) |member| {
34 | const member_value = try getMemberValueJson(allocator, "self", member);
35 | defer allocator.free(member_value);
36 |
37 | try writer.print("try jw.objectField(\"{s}\");\n", .{member.json_key});
38 | try writeMemberJson(
39 | .{
40 | .shape_id = member.target,
41 | .field_name = member.field_name,
42 | .field_value = member_value,
43 | .state = state.indent(),
44 | .member = member.type_member,
45 | },
46 | writer,
47 | );
48 | }
49 |
50 | try writer.writeAll("}\n");
51 | try writer.writeAll("try jw.endObject();\n");
52 | try writer.writeAll("}\n\n");
53 | }
54 | }
55 | }
56 |
57 | fn getJsonMembers(allocator: Allocator, shape: Shape, state: GenerationState) !?std.ArrayListUnmanaged(JsonMember) {
58 | const is_json_shape = switch (state.file_state.protocol) {
59 | .json_1_0, .json_1_1, .rest_json_1 => true,
60 | else => false,
61 | };
62 |
63 | if (!is_json_shape) {
64 | return null;
65 | }
66 |
67 | var hash_map = std.StringHashMapUnmanaged(smithy.TypeMember){};
68 |
69 | const shape_members = smithy_tools.getShapeMembers(shape);
70 | for (shape_members) |member| {
71 | try hash_map.putNoClobber(state.allocator, member.name, member);
72 | }
73 |
74 | for (shape_members) |member| {
75 | for (member.traits) |trait| {
76 | switch (trait) {
77 | .http_header, .http_query => {
78 | std.debug.assert(hash_map.remove(member.name));
79 | break;
80 | },
81 | else => continue,
82 | }
83 | }
84 | }
85 |
86 | if (hash_map.count() == 0) {
87 | return null;
88 | }
89 |
90 | var json_members = std.ArrayListUnmanaged(JsonMember){};
91 |
92 | var iter = hash_map.iterator();
93 | while (iter.next()) |kvp| {
94 | const member = kvp.value_ptr.*;
95 |
96 | const key = blk: {
97 | if (smithy_tools.findTrait(.json_name, member.traits)) |trait| {
98 | break :blk trait.json_name;
99 | }
100 |
101 | break :blk member.name;
102 | };
103 |
104 | try json_members.append(allocator, .{
105 | .field_name = try support.constantName(allocator, member.name, .snake),
106 | .json_key = key,
107 | .target = member.target,
108 | .type_member = member,
109 | .shape_info = try smithy_tools.getShapeInfo(member.target, state.file_state.shapes),
110 | });
111 | }
112 |
113 | return json_members;
114 | }
115 |
116 | fn getMemberValueJson(allocator: std.mem.Allocator, source: []const u8, member: JsonMember) ![]const u8 {
117 | const member_value = try std.fmt.allocPrint(allocator, "@field({s}, \"{s}\")", .{ source, member.field_name });
118 | defer allocator.free(member_value);
119 |
120 | var output_block = std.Io.Writer.Allocating.init(allocator);
121 | defer output_block.deinit();
122 |
123 | try writeMemberValue(
124 | &output_block.writer,
125 | member_value,
126 | );
127 |
128 | return output_block.toOwnedSlice();
129 | }
130 |
131 | fn getShapeJsonValueType(shape: Shape) []const u8 {
132 | return switch (shape) {
133 | .string, .@"enum", .blob, .document, .timestamp => ".string",
134 | .boolean => ".bool",
135 | .integer, .bigInteger, .short, .long => ".integer",
136 | .float, .double, .bigDecimal => ".float",
137 | else => std.debug.panic("Unexpected shape: {}", .{shape}),
138 | };
139 | }
140 |
141 | fn writeMemberValue(
142 | writer: *std.Io.Writer,
143 | member_value: []const u8,
144 | ) !void {
145 | try writer.writeAll(member_value);
146 | }
147 |
148 | const WriteMemberJsonParams = struct {
149 | shape_id: []const u8,
150 | field_name: []const u8,
151 | field_value: []const u8,
152 | state: GenerationState,
153 | member: smithy.TypeMember,
154 | };
155 |
156 | fn writeStructureJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) !void {
157 | const shape_type = "structure";
158 | const allocator = params.state.allocator;
159 | const state = params.state;
160 |
161 | const shape_info = try smithy_tools.getShapeInfo(params.shape_id, state.file_state.shapes);
162 | const shape = shape_info.shape;
163 |
164 | const structure_name = try std.fmt.allocPrint(params.state.allocator, "{s}_{s}_{d}", .{ params.field_name, shape_type, state.indent_level });
165 | defer params.state.allocator.free(structure_name);
166 |
167 | const object_value_capture = try std.fmt.allocPrint(allocator, "{s}_capture", .{structure_name});
168 | defer allocator.free(object_value_capture);
169 |
170 | try writer.print("\n// start {s}: {s}\n", .{ shape_type, structure_name });
171 | defer writer.print("// end {s}: {s}\n", .{ shape_type, structure_name }) catch std.debug.panic("Unreachable", .{});
172 |
173 | if (try getJsonMembers(allocator, shape, state)) |json_members| {
174 | if (json_members.items.len > 0) {
175 | const is_optional = smithy_tools.shapeIsOptional(params.member.traits);
176 |
177 | var object_value = params.field_value;
178 |
179 | if (is_optional) {
180 | object_value = object_value_capture;
181 |
182 | try writer.print("if ({s}) |{s}|", .{ params.field_value, object_value_capture });
183 | try writer.writeAll("{\n");
184 | }
185 |
186 | try writer.writeAll("try jw.beginObject();\n");
187 | try writer.writeAll("{\n");
188 |
189 | // this is a workaround in case a child structure doesn't have any fields
190 | // and therefore doesn't use the structure variable so we capture it here.
191 | // the compiler should optimize this away
192 | try writer.print("const unused_capture_{s} = {s};\n", .{ structure_name, object_value });
193 | try writer.print("_ = unused_capture_{s};\n", .{structure_name});
194 |
195 | for (json_members.items) |member| {
196 | const member_value = try getMemberValueJson(allocator, object_value, member);
197 | defer allocator.free(member_value);
198 |
199 | try writer.print("try jw.objectField(\"{s}\");\n", .{member.json_key});
200 | try writeMemberJson(
201 | .{
202 | .shape_id = member.target,
203 | .field_name = member.field_name,
204 | .field_value = member_value,
205 | .state = state.indent(),
206 | .member = member.type_member,
207 | },
208 | writer,
209 | );
210 | }
211 |
212 | try writer.writeAll("}\n");
213 | try writer.writeAll("try jw.endObject();\n");
214 |
215 | if (is_optional) {
216 | try writer.writeAll("} else {\n");
217 | try writer.writeAll("try jw.write(null);\n");
218 | try writer.writeAll("}\n");
219 | }
220 | }
221 | }
222 | }
223 |
224 | fn writeListJson(list: smithy_tools.ListShape, params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerror!void {
225 | const state = params.state;
226 | const allocator = state.allocator;
227 |
228 | const list_name = try std.fmt.allocPrint(allocator, "{s}_list_{d}", .{ params.field_name, state.indent_level });
229 | defer state.allocator.free(list_name);
230 |
231 | try writer.print("\n// start list: {s}\n", .{list_name});
232 | defer writer.print("// end list: {s}\n", .{list_name}) catch std.debug.panic("Unreachable", .{});
233 |
234 | const list_each_value = try std.fmt.allocPrint(allocator, "{s}_value", .{list_name});
235 | defer allocator.free(list_each_value);
236 |
237 | const list_capture = try std.fmt.allocPrint(allocator, "{s}_capture", .{list_name});
238 | defer allocator.free(list_capture);
239 |
240 | {
241 | const list_is_optional = smithy_tools.shapeIsOptional(list.traits);
242 |
243 | var list_value = params.field_value;
244 |
245 | if (list_is_optional) {
246 | list_value = list_capture;
247 |
248 | try writer.print("if ({s}) |{s}| ", .{
249 | params.field_value,
250 | list_capture,
251 | });
252 | try writer.writeAll("{\n");
253 | }
254 |
255 | // start loop
256 | try writer.writeAll("try jw.beginArray();\n");
257 | try writer.print("for ({s}) |{s}|", .{ list_value, list_each_value });
258 | try writer.writeAll("{\n");
259 | try writer.writeAll("try jw.write(");
260 | try writeMemberValue(
261 | writer,
262 | list_each_value,
263 | );
264 | try writer.writeAll(");\n");
265 | try writer.writeAll("}\n");
266 | try writer.writeAll("try jw.endArray();\n");
267 | // end loop
268 |
269 | if (list_is_optional) {
270 | try writer.writeAll("} else {\n");
271 | try writer.writeAll("try jw.write(null);\n");
272 | try writer.writeAll("}\n");
273 | }
274 | }
275 | }
276 |
277 | fn writeMapJson(map: smithy_tools.MapShape, params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerror!void {
278 | const state = params.state;
279 | const name = params.field_name;
280 | const value = params.field_value;
281 | const allocator = state.allocator;
282 |
283 | const map_name = try std.fmt.allocPrint(allocator, "{s}_object_map_{d}", .{ name, state.indent_level });
284 | defer allocator.free(map_name);
285 |
286 | try writer.print("\n// start map: {s}\n", .{map_name});
287 | defer writer.print("// end map: {s}\n", .{map_name}) catch std.debug.panic("Unreachable", .{});
288 |
289 | const map_value_capture = try std.fmt.allocPrint(allocator, "{s}_kvp", .{map_name});
290 | defer allocator.free(map_value_capture);
291 |
292 | const map_capture_key = try std.fmt.allocPrint(allocator, "{s}.key", .{map_value_capture});
293 | defer allocator.free(map_capture_key);
294 |
295 | const map_capture_value = try std.fmt.allocPrint(allocator, "{s}.value", .{map_value_capture});
296 | defer allocator.free(map_capture_value);
297 |
298 | const value_shape_info = try smithy_tools.getShapeInfo(map.value, state.file_state.shapes);
299 |
300 | const value_member = smithy.TypeMember{
301 | .name = "value",
302 | .target = map.value,
303 | .traits = smithy_tools.getShapeTraits(value_shape_info.shape),
304 | };
305 |
306 | const map_capture = try std.fmt.allocPrint(state.allocator, "{s}_capture", .{map_name});
307 |
308 | {
309 | const map_member = params.member;
310 | const map_is_optional = !smithy_tools.hasTrait(.required, map_member.traits);
311 |
312 | var map_value = value;
313 |
314 | if (map_is_optional) {
315 | map_value = map_capture;
316 |
317 | try writer.print("if ({s}) |{s}| ", .{
318 | value,
319 | map_capture,
320 | });
321 | try writer.writeAll("{\n");
322 | }
323 |
324 | try writer.writeAll("try jw.beginObject();\n");
325 | try writer.writeAll("{\n");
326 |
327 | // start loop
328 | try writer.print("for ({s}) |{s}|", .{ map_value, map_value_capture });
329 | try writer.writeAll("{\n");
330 | try writer.print("try jw.objectField({s});\n", .{map_capture_key});
331 |
332 | try writeMemberJson(.{
333 | .shape_id = map.value,
334 | .field_name = "value",
335 | .field_value = map_capture_value,
336 | .state = state.indent(),
337 | .member = value_member,
338 | }, writer);
339 |
340 | try writer.writeAll("}\n");
341 | // end loop
342 |
343 | try writer.writeAll("}\n");
344 | try writer.writeAll("try jw.endObject();\n");
345 |
346 | if (map_is_optional) {
347 | try writer.writeAll("} else {\n");
348 | try writer.writeAll("try jw.write(null);\n");
349 | try writer.writeAll("}\n");
350 | }
351 | }
352 | }
353 |
354 | fn writeScalarJson(comment: []const u8, params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerror!void {
355 | try writer.print("try jw.write({s}); // {s}\n\n", .{ params.field_value, comment });
356 | }
357 |
358 | fn writeMemberJson(params: WriteMemberJsonParams, writer: *std.Io.Writer) anyerror!void {
359 | const shape_id = params.shape_id;
360 | const state = params.state;
361 | const shape_info = try smithy_tools.getShapeInfo(shape_id, state.file_state.shapes);
362 | const shape = shape_info.shape;
363 |
364 | if (state.getTypeRecurrenceCount(shape_id) > 2) {
365 | return;
366 | }
367 |
368 | try state.appendToTypeStack(&shape_info);
369 | defer state.popFromTypeStack();
370 |
371 | switch (shape) {
372 | .structure, .uniontype => try writeStructureJson(params, writer),
373 | .list => |l| try writeListJson(l, params, writer),
374 | .map => |m| try writeMapJson(m, params, writer),
375 | .timestamp => try writeScalarJson("timestamp", params, writer),
376 | .string => try writeScalarJson("string", params, writer),
377 | .@"enum" => try writeScalarJson("enum", params, writer),
378 | .document => try writeScalarJson("document", params, writer),
379 | .blob => try writeScalarJson("blob", params, writer),
380 | .boolean => try writeScalarJson("bool", params, writer),
381 | .float => try writeScalarJson("float", params, writer),
382 | .integer => try writeScalarJson("integer", params, writer),
383 | .long => try writeScalarJson("long", params, writer),
384 | .double => try writeScalarJson("double", params, writer),
385 | .bigDecimal => try writeScalarJson("bigDecimal", params, writer),
386 | .bigInteger => try writeScalarJson("bigInteger", params, writer),
387 | .unit => try writeScalarJson("unit", params, writer),
388 | .byte => try writeScalarJson("byte", params, writer),
389 | .short => try writeScalarJson("short", params, writer),
390 | .service, .resource, .operation, .member, .set => std.debug.panic("Shape type not supported: {}", .{shape}),
391 | }
392 | }
393 |
--------------------------------------------------------------------------------
/src/main.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const aws = @import("aws.zig");
3 | const json = @import("json");
4 |
5 | var verbose: u8 = 0;
6 |
7 | pub fn log(
8 | comptime level: std.log.Level,
9 | comptime scope: @TypeOf(.EnumLiteral),
10 | comptime format: []const u8,
11 | args: anytype,
12 | ) void {
13 | // Ignore aws_signing messages
14 | if (verbose < 3 and scope == .aws_signing and @intFromEnum(level) >= @intFromEnum(std.log.Level.debug))
15 | return;
16 | // Ignore aws_credentials messages
17 | if (verbose < 3 and scope == .aws_credentials and @intFromEnum(level) >= @intFromEnum(std.log.Level.debug))
18 | return;
19 | // Ignore xml_shaper messages
20 | if (verbose < 3 and scope == .xml_shaper and @intFromEnum(level) >= @intFromEnum(std.log.Level.debug))
21 | return;
22 | // Ignore date messages
23 | if (verbose < 3 and scope == .date and @intFromEnum(level) >= @intFromEnum(std.log.Level.debug))
24 | return;
25 | // Ignore awshttp messages
26 | if (verbose < 2 and scope == .awshttp and @intFromEnum(level) >= @intFromEnum(std.log.Level.debug))
27 | return;
28 |
29 | if (verbose < 1 and scope == .aws and @intFromEnum(level) >= @intFromEnum(std.log.Level.debug))
30 | return;
31 | const scope_prefix = "(" ++ @tagName(scope) ++ "): ";
32 | const prefix = "[" ++ @tagName(level) ++ "] " ++ scope_prefix;
33 |
34 | // Print the message to stderr, silently ignoring any errors
35 | std.debug.lockStdErr();
36 | defer std.debug.unlockStdErr();
37 | var stderr_writer = std.fs.File.stderr().writer(&.{});
38 | const stderr = &stderr_writer.interface;
39 | nosuspend stderr.print(prefix ++ format ++ "\n", args) catch return;
40 | }
41 |
42 | pub const std_options = std.Options{
43 | .logFn = log,
44 | };
45 | const Tests = enum {
46 | query_no_input,
47 | query_with_input,
48 | ec2_query_no_input,
49 | ec2_query_with_input,
50 | json_1_0_query_with_input,
51 | json_1_0_query_no_input,
52 | json_1_1_query_with_input,
53 | json_1_1_query_no_input,
54 | rest_json_1_query_no_input,
55 | rest_json_1_query_with_input,
56 | rest_json_1_work_with_lambda,
57 | rest_xml_no_input,
58 | rest_xml_anything_but_s3,
59 | rest_xml_work_with_s3,
60 | };
61 |
62 | pub fn main() anyerror!void {
63 | var gpa = std.heap.GeneralPurposeAllocator(.{}){};
64 | defer _ = gpa.deinit();
65 | const allocator = gpa.allocator();
66 | var tests = try std.ArrayList(Tests).initCapacity(allocator, @typeInfo(Tests).@"enum".fields.len);
67 | defer tests.deinit(allocator);
68 | var args = try std.process.argsWithAllocator(allocator);
69 | defer args.deinit();
70 | var stdout_buf: [4096]u8 = undefined;
71 | const stdout_raw = std.fs.File.stdout().writer(&stdout_buf);
72 | var stdout = stdout_raw.interface;
73 | defer stdout.flush() catch @panic("could not flush stdout");
74 | var arg0: ?[]const u8 = null;
75 | var proxy: ?std.http.Client.Proxy = null;
76 | while (args.next()) |arg| {
77 | if (arg0 == null) arg0 = arg;
78 | if (std.mem.eql(u8, "-h", arg) or std.mem.eql(u8, "--help", arg)) {
79 | try stdout.print(
80 | \\usage: {?s} [-h|--help] [-v][-v][-v] [-x|--proxy ] [tests...]
81 | \\
82 | \\Where tests are one of the following:
83 | \\
84 | , .{arg0});
85 | inline for (std.meta.fields(Tests)) |enumfield| {
86 | try stdout.print("* {s}\n", .{enumfield.name});
87 | }
88 | return;
89 | }
90 | if (std.mem.eql(u8, "-x", arg) or std.mem.eql(u8, "--proxy", arg)) {
91 | proxy = try proxyFromString(args.next().?); // parse stuff
92 | continue;
93 | }
94 | if (std.mem.startsWith(u8, arg, "-v")) {
95 | for (arg[1..]) |c| {
96 | if (c != 'v') return error.InvalidArgument;
97 | verbose += 1;
98 | }
99 | continue;
100 | }
101 | inline for (@typeInfo(Tests).@"enum".fields) |f| {
102 | if (std.mem.eql(u8, f.name, arg)) {
103 | try tests.append(allocator, @field(Tests, f.name));
104 | break;
105 | }
106 | }
107 | }
108 | if (tests.items.len == 0) {
109 | inline for (@typeInfo(Tests).@"enum".fields) |f|
110 | try tests.append(allocator, @field(Tests, f.name));
111 | }
112 |
113 | std.log.info("Start\n", .{});
114 | const client_options = aws.ClientOptions{ .proxy = proxy };
115 | var client = aws.Client.init(allocator, client_options);
116 | const options = aws.Options{
117 | .region = "us-west-2",
118 | .client = client,
119 | };
120 | defer client.deinit();
121 |
122 | // As of 2023-08-28, only ECS from this list supports TLS v1.3
123 | // AWS commitment is to enable all services by 2023-12-31
124 | const services = aws.Services(.{ .sts, .ec2, .dynamo_db, .ecs, .lambda, .sqs, .s3, .cloudfront }){};
125 |
126 | for (tests.items) |t| {
127 | std.log.info("===== Start Test: {s} =====", .{@tagName(t)});
128 | switch (t) {
129 | .query_no_input => {
130 | const call = try aws.Request(services.sts.get_caller_identity).call(.{}, options);
131 | // const call = try client.call(services.sts.get_caller_identity.Request{}, options);
132 | defer call.deinit();
133 | std.log.info("arn: {s}", .{call.response.arn.?});
134 | std.log.info("id: {s}", .{call.response.user_id.?});
135 | std.log.info("account: {s}", .{call.response.account.?});
136 | std.log.info("requestId: {s}", .{call.response_metadata.request_id});
137 | },
138 | .query_with_input => {
139 | const call = try client.call(services.sqs.list_queues.Request{
140 | .queue_name_prefix = "s",
141 | }, options);
142 | defer call.deinit();
143 | std.log.info("request id: {s}", .{call.response_metadata.request_id});
144 | std.log.info("account has queues with prefix 's': {}", .{call.response.queue_urls != null});
145 | },
146 | .json_1_0_query_with_input => {
147 | const call = try client.call(services.dynamo_db.list_tables.Request{
148 | .limit = 1,
149 | }, options);
150 | defer call.deinit();
151 | std.log.info("request id: {s}", .{call.response_metadata.request_id});
152 | std.log.info("account has tables: {}", .{call.response.table_names.?.len > 0});
153 | },
154 | .json_1_0_query_no_input => {
155 | const call = try client.call(services.dynamo_db.describe_limits.Request{}, options);
156 | defer call.deinit();
157 | std.log.info("account read capacity limit: {?d}", .{call.response.account_max_read_capacity_units});
158 | },
159 | .json_1_1_query_with_input => {
160 | const call = try client.call(services.ecs.list_clusters.Request{
161 | .max_results = 1,
162 | }, options);
163 | defer call.deinit();
164 | std.log.info("request id: {s}", .{call.response_metadata.request_id});
165 | std.log.info("account has clusters: {}", .{call.response.cluster_arns.?.len > 0});
166 | },
167 | .json_1_1_query_no_input => {
168 | const call = try client.call(services.ecs.list_clusters.Request{}, options);
169 | defer call.deinit();
170 | std.log.info("request id: {s}", .{call.response_metadata.request_id});
171 | std.log.info("account has clusters: {}", .{call.response.cluster_arns.?.len > 0});
172 | },
173 | .rest_json_1_query_with_input => {
174 | const call = try client.call(services.lambda.list_functions.Request{
175 | .max_items = 1,
176 | }, options);
177 | defer call.deinit();
178 | std.log.info("request id: {s}", .{call.response_metadata.request_id});
179 | std.log.info("account has functions: {}", .{call.response.functions.?.len > 0});
180 | },
181 | .rest_json_1_query_no_input => {
182 | const call = try client.call(services.lambda.list_functions.Request{}, options);
183 | defer call.deinit();
184 | std.log.info("request id: {s}", .{call.response_metadata.request_id});
185 | std.log.info("account has functions: {}", .{call.response.functions.?.len > 0});
186 | },
187 | .rest_json_1_work_with_lambda => {
188 | const call = try client.call(services.lambda.list_functions.Request{}, options);
189 | defer call.deinit();
190 | std.log.info("list request id: {s}", .{call.response_metadata.request_id});
191 | if (call.response.functions) |fns| {
192 | if (fns.len > 0) {
193 | const func = fns[0];
194 | const arn = func.function_arn.?;
195 | // This is a bit ugly. Maybe a helper function in the library would help?
196 | var tags = try std.ArrayList(aws.services.lambda.TagKeyValue).initCapacity(allocator, 1);
197 | defer tags.deinit(allocator);
198 | tags.appendAssumeCapacity(.{ .key = "Foo", .value = "Bar" });
199 | const req = services.lambda.tag_resource.Request{ .resource = arn, .tags = tags.items };
200 | const addtag = try aws.Request(services.lambda.tag_resource).call(req, options);
201 | // TODO: This is failing due to double-encoding (see zig issue 17015)
202 | defer addtag.deinit();
203 | // const addtag = try client.call(services.lambda.tag_resource.Request{ .resource = arn, .tags = &.{.{ .key = "Foo", .value = "Bar" }} }, options);
204 | std.log.info("add tag request id: {s}", .{addtag.response_metadata.request_id});
205 | var keys = [_][]const u8{"Foo"}; // Would love to have a way to express this without burning a var here
206 | const deletetag = try aws.Request(services.lambda.untag_resource).call(.{ .tag_keys = keys[0..], .resource = arn }, options);
207 | defer deletetag.deinit();
208 | std.log.info("delete tag request id: {s}", .{deletetag.response_metadata.request_id});
209 | } else {
210 | std.log.err("no functions to work with", .{});
211 | }
212 | } else {
213 | std.log.err("no functions to work with", .{});
214 | }
215 | },
216 | .ec2_query_no_input => {
217 | // Describe regions is a simpler request and easier to debug
218 | const result = try client.call(services.ec2.describe_regions.Request{}, options);
219 | defer result.deinit();
220 | std.log.info("request id: {s}", .{result.response_metadata.request_id});
221 | std.log.info("region count: {d}", .{result.response.regions.?.len});
222 | },
223 | .ec2_query_with_input => {
224 | // Describe instances is more interesting
225 | const result = try client.call(services.ec2.describe_instances.Request{ .max_results = 6 }, options);
226 | defer result.deinit();
227 | std.log.info("reservation count: {d}", .{result.response.reservations.?.len});
228 | var items: usize = 0;
229 | for (result.response.reservations.?) |reservation| {
230 | items += reservation.instances.?.len;
231 | }
232 | std.log.info("items count: {d}", .{items});
233 | var next = result.response.next_token;
234 | while (next) |next_token| {
235 | std.log.info("more results available: fetching again", .{});
236 |
237 | const more = try aws.Request(services.ec2.describe_instances)
238 | .call(.{ .next_token = next_token, .max_results = 6 }, options);
239 | defer more.deinit();
240 | // we could have exactly 6, which means we have a next token(?!) but not
241 | // any actual additional data
242 | if (more.response.reservations == null) break;
243 | std.log.info("reservation count: {d}", .{more.response.reservations.?.len});
244 | var batch_items: usize = 0;
245 | for (more.response.reservations.?) |reservation| {
246 | batch_items += reservation.instances.?.len;
247 | }
248 | std.log.info("items count: {d}", .{batch_items});
249 | items += batch_items;
250 | std.log.info("total items count: {d}", .{items});
251 | next = more.response.next_token;
252 | }
253 | },
254 | // ^^ under test. vv still in progress
255 | .rest_xml_no_input => {
256 | const result = try client.call(services.s3.list_buckets.Request{}, options);
257 | defer result.deinit();
258 | std.log.info("request id: {s}", .{result.response_metadata.request_id});
259 | std.log.info("bucket count: {d}", .{result.response.buckets.?.len});
260 | },
261 | .rest_xml_anything_but_s3 => {
262 | const result = try client.call(services.cloudfront.list_key_groups.Request{}, options);
263 | defer result.deinit();
264 | std.log.info("request id: {s}", .{result.response_metadata.request_id});
265 | const list = result.response.key_group_list.?;
266 | std.log.info("key group list max: {d}", .{list.max_items});
267 | std.log.info("key group quantity: {d}", .{list.quantity});
268 | },
269 | .rest_xml_work_with_s3 => {
270 | const key = "i/am/a/teapot/foo";
271 | // // const key = "foo";
272 | //
273 | const bucket = blk: {
274 | const result = try client.call(services.s3.list_buckets.Request{}, options);
275 | defer result.deinit();
276 | const bucket = result.response.buckets.?[result.response.buckets.?.len - 1];
277 | std.log.info("ListBuckets request id: {s}", .{result.response_metadata.request_id});
278 | std.log.info("bucket name: {s}", .{bucket.name.?});
279 | break :blk try allocator.dupe(u8, bucket.name.?);
280 | };
281 | defer allocator.free(bucket);
282 | const location = blk: {
283 | const result = try aws.Request(services.s3.get_bucket_location).call(.{
284 | .bucket = bucket,
285 | }, options);
286 | defer result.deinit();
287 | const location = result.response.location_constraint.?;
288 | std.log.info("GetBucketLocation request id: {s}", .{result.response_metadata.request_id});
289 | std.log.info("location: {s}", .{location});
290 | break :blk try allocator.dupe(u8, location);
291 | };
292 | defer allocator.free(location);
293 |
294 | const s3opts = aws.Options{
295 | .region = location,
296 | .client = client,
297 | };
298 | {
299 | const result = try aws.Request(services.s3.put_object).call(.{
300 | .bucket = bucket,
301 | .key = key,
302 | .content_type = "text/plain",
303 | .body = "bar",
304 | .storage_class = "STANDARD",
305 | }, s3opts);
306 | std.log.info("PutObject Request id: {s}", .{result.response_metadata.request_id});
307 | std.log.info("PutObject etag: {s}", .{result.response.e_tag.?});
308 | defer result.deinit();
309 | }
310 | {
311 | // Note that boto appears to redirect by default, but java
312 | // does not. We will not
313 | const result = try aws.Request(services.s3.get_object).call(.{
314 | .bucket = bucket,
315 | .key = key,
316 | }, s3opts);
317 | std.log.info("GetObject Request id: {s}", .{result.response_metadata.request_id});
318 | std.log.info("GetObject Body: {s}", .{result.response.body.?});
319 | std.log.info("GetObject etag: {s}", .{result.response.e_tag.?});
320 | std.log.info("GetObject last modified (seconds since epoch): {d}", .{result.response.last_modified.?});
321 | defer result.deinit();
322 | }
323 | {
324 | const result = try aws.Request(services.s3.delete_object).call(.{
325 | .bucket = bucket,
326 | .key = key,
327 | }, s3opts);
328 | std.log.info("DeleteObject Request id: {s}", .{result.response_metadata.request_id});
329 | defer result.deinit();
330 | }
331 | {
332 | const result = try aws.Request(services.s3.list_objects).call(.{
333 | .bucket = bucket,
334 | }, s3opts);
335 | std.log.info("ListObject Request id: {s}", .{result.response_metadata.request_id});
336 | std.log.info("Object count: {d}", .{result.response.contents.?.len});
337 | defer result.deinit();
338 | }
339 | },
340 | }
341 | std.log.info("===== End Test: {s} =====\n", .{@tagName(t)});
342 | }
343 |
344 | // if (test_twice) {
345 | // std.time.sleep(1000 * std.time.ns_per_ms);
346 | // std.log.info("second request", .{});
347 | //
348 | // var client2 = aws.Aws.init(allocator);
349 | // defer client2.deinit();
350 | // const resp2 = try client2.call(services.sts.get_caller_identity.Request{}, options); // catch here and try alloc?
351 | // defer resp2.deinit();
352 | // }
353 |
354 | std.log.info("===== Tests complete =====", .{});
355 | }
356 |
357 | fn proxyFromString(string: []const u8) !std.http.Client.Proxy {
358 | var rc = std.http.Client.Proxy{
359 | .protocol = undefined,
360 | .host = undefined,
361 | .authorization = null,
362 | .port = undefined,
363 | .supports_connect = true, // TODO: Is this a good default?
364 | };
365 | var remaining: []const u8 = string;
366 | if (std.mem.startsWith(u8, string, "http://")) {
367 | remaining = remaining["http://".len..];
368 | rc.protocol = .plain;
369 | rc.port = 80;
370 | } else if (std.mem.startsWith(u8, string, "https://")) {
371 | remaining = remaining["https://".len..];
372 | rc.port = 443;
373 | rc.protocol = .tls;
374 | } else return error.InvalidScheme;
375 | var split_iterator = std.mem.splitScalar(u8, remaining, ':');
376 | rc.host = std.mem.trimRight(u8, split_iterator.first(), "/");
377 | if (split_iterator.next()) |port|
378 | rc.port = try std.fmt.parseInt(u16, port, 10);
379 | return rc;
380 | }
381 | fn typeForField(comptime T: type, comptime field_name: []const u8) !type {
382 | const ti = @typeInfo(T);
383 | switch (ti) {
384 | .@"struct" => {
385 | inline for (ti.@"struct".fields) |field| {
386 | if (std.mem.eql(u8, field.name, field_name))
387 | return field.type;
388 | }
389 | },
390 | else => return error.TypeIsNotAStruct, // should not hit this
391 | }
392 | return error.FieldNotFound;
393 | }
394 |
395 | // TODO: Move into json.zig
396 | pub fn jsonFun() !void {
397 | // Standard behavior
398 | const payload =
399 | \\{"GetCallerIdentityResponse":{"GetCallerIdentityResult":{"Account":"0123456789","Arn":"arn:aws:iam::0123456789:user/test","UserId":"MYUSERID"},"ResponseMetadata":{"RequestId":"3b80a99b-7df8-4bcb-96ee-b2759878a5f2"}}}
400 | ;
401 | const Ret3 = struct {
402 | getCallerIdentityResponse: struct { getCallerIdentityResult: struct { account: []u8, arn: []u8, user_id: []u8 }, responseMetadata: struct { requestId: []u8 } },
403 | };
404 | var stream3 = json.TokenStream.init(payload);
405 | const res3 = json.parse(Ret3, &stream3, .{
406 | .allocator = std.heap.c_allocator,
407 | .allow_camel_case_conversion = true, // new option
408 | .allow_snake_case_conversion = true, // new option
409 | .allow_unknown_fields = true, // new option
410 | }) catch unreachable;
411 | std.log.info("{}", .{res3});
412 | std.log.info("{any}", .{res3.getCallerIdentityResponse.getCallerIdentityResult.user_id});
413 | }
414 |
--------------------------------------------------------------------------------
/src/xml.zig:
--------------------------------------------------------------------------------
1 | // File sourced from:
2 | // https://github.com/Snektron/vulkan-zig/blob/797ae8af88e84753af9640266de61a985b76b580/generator/xml.zig
3 | const std = @import("std");
4 | const mem = std.mem;
5 | const testing = std.testing;
6 | const Allocator = mem.Allocator;
7 | const ArenaAllocator = std.heap.ArenaAllocator;
8 | const ArrayList = std.ArrayList;
9 |
10 | pub const Attribute = struct {
11 | name: []const u8,
12 | value: []const u8,
13 | };
14 |
15 | pub const Content = union(enum) {
16 | CharData: []const u8,
17 | Comment: []const u8,
18 | Element: *Element,
19 | };
20 |
21 | pub const Element = struct {
22 | pub const AttributeList = ArrayList(*Attribute);
23 | pub const ContentList = ArrayList(Content);
24 |
25 | tag: []const u8,
26 | attributes: AttributeList,
27 | children: ContentList,
28 | next_sibling: ?*Element = null,
29 | allocator: std.mem.Allocator,
30 |
31 | fn init(tag: []const u8, alloc: Allocator) Element {
32 | return .{
33 | .tag = tag,
34 | .attributes = AttributeList{},
35 | .children = ContentList{},
36 | .allocator = alloc,
37 | };
38 | }
39 |
40 | pub fn getAttribute(self: *Element, attrib_name: []const u8) ?[]const u8 {
41 | for (self.attributes.items) |child| {
42 | if (mem.eql(u8, child.name, attrib_name)) {
43 | return child.value;
44 | }
45 | }
46 |
47 | return null;
48 | }
49 |
50 | pub fn getCharData(self: *Element, child_tag: []const u8) ?[]const u8 {
51 | const child = (self.findChildByTag(child_tag) catch return null) orelse return null;
52 | if (child.children.items.len != 1) {
53 | return null;
54 | }
55 |
56 | return switch (child.children.items[0]) {
57 | .CharData => |char_data| char_data,
58 | else => null,
59 | };
60 | }
61 |
62 | pub fn iterator(self: *Element) ChildIterator {
63 | return .{
64 | .items = self.children.items,
65 | .i = 0,
66 | };
67 | }
68 |
69 | pub fn elements(self: *Element) ChildElementIterator {
70 | return .{
71 | .inner = self.iterator(),
72 | };
73 | }
74 |
75 | pub fn findChildByTag(self: *Element, tag: []const u8) !?*Element {
76 | var it = self.findChildrenByTag(tag);
77 | return try it.next();
78 | }
79 |
80 | pub fn findChildrenByTag(self: *Element, tag: []const u8) FindChildrenByTagIterator {
81 | return .{
82 | .inner = self.elements(),
83 | .tag = tag,
84 | };
85 | }
86 |
87 | pub const ChildIterator = struct {
88 | items: []Content,
89 | i: usize,
90 |
91 | pub fn next(self: *ChildIterator) ?*Content {
92 | if (self.i < self.items.len) {
93 | self.i += 1;
94 | return &self.items[self.i - 1];
95 | }
96 |
97 | return null;
98 | }
99 | };
100 |
101 | pub const ChildElementIterator = struct {
102 | inner: ChildIterator,
103 |
104 | pub fn next(self: *ChildElementIterator) ?*Element {
105 | while (self.inner.next()) |child| {
106 | if (child.* != .Element) {
107 | continue;
108 | }
109 |
110 | return child.*.Element;
111 | }
112 |
113 | return null;
114 | }
115 | };
116 |
117 | fn strictEqual(a: []const u8, b: []const u8, _: PredicateOptions) !bool {
118 | return mem.eql(u8, a, b);
119 | }
120 | pub const FindChildrenByTagIterator = struct {
121 | inner: ChildElementIterator,
122 | tag: []const u8,
123 | predicate: *const fn (a: []const u8, b: []const u8, options: PredicateOptions) anyerror!bool = strictEqual,
124 | predicate_options: PredicateOptions = .{},
125 |
126 | pub fn next(self: *FindChildrenByTagIterator) !?*Element {
127 | while (self.inner.next()) |child| {
128 | if (!try self.predicate(child.tag, self.tag, self.predicate_options)) {
129 | continue;
130 | }
131 |
132 | return child;
133 | }
134 |
135 | return null;
136 | }
137 | };
138 | };
139 |
140 | pub const PredicateOptions = struct {
141 | allocator: ?std.mem.Allocator = null,
142 | };
143 | pub const XmlDecl = struct {
144 | version: []const u8,
145 | encoding: ?[]const u8,
146 | standalone: ?bool,
147 | };
148 |
149 | pub const Document = struct {
150 | arena: ArenaAllocator,
151 | xml_decl: ?*XmlDecl,
152 | root: *Element,
153 |
154 | pub fn deinit(self: Document) void {
155 | var arena = self.arena; // Copy to stack so self can be taken by value.
156 | arena.deinit();
157 | }
158 | };
159 |
160 | const ParseContext = struct {
161 | source: []const u8,
162 | offset: usize,
163 | line: usize,
164 | column: usize,
165 |
166 | fn init(source: []const u8) ParseContext {
167 | return .{
168 | .source = source,
169 | .offset = 0,
170 | .line = 0,
171 | .column = 0,
172 | };
173 | }
174 |
175 | fn peek(self: *ParseContext) ?u8 {
176 | return if (self.offset < self.source.len) self.source[self.offset] else null;
177 | }
178 |
179 | fn consume(self: *ParseContext) !u8 {
180 | if (self.offset < self.source.len) {
181 | return self.consumeNoEof();
182 | }
183 |
184 | return error.UnexpectedEof;
185 | }
186 |
187 | fn consumeNoEof(self: *ParseContext) u8 {
188 | std.debug.assert(self.offset < self.source.len);
189 | const c = self.source[self.offset];
190 | self.offset += 1;
191 |
192 | if (c == '\n') {
193 | self.line += 1;
194 | self.column = 0;
195 | } else {
196 | self.column += 1;
197 | }
198 |
199 | return c;
200 | }
201 |
202 | fn eat(self: *ParseContext, char: u8) bool {
203 | self.expect(char) catch return false;
204 | return true;
205 | }
206 |
207 | fn expect(self: *ParseContext, expected: u8) !void {
208 | if (self.peek()) |actual| {
209 | if (expected != actual) {
210 | return error.UnexpectedCharacter;
211 | }
212 |
213 | _ = self.consumeNoEof();
214 | return;
215 | }
216 |
217 | return error.UnexpectedEof;
218 | }
219 |
220 | fn eatStr(self: *ParseContext, text: []const u8) bool {
221 | self.expectStr(text) catch return false;
222 | return true;
223 | }
224 |
225 | fn expectStr(self: *ParseContext, text: []const u8) !void {
226 | if (self.source.len < self.offset + text.len) {
227 | return error.UnexpectedEof;
228 | } else if (std.mem.startsWith(u8, self.source[self.offset..], text)) {
229 | var i: usize = 0;
230 | while (i < text.len) : (i += 1) {
231 | _ = self.consumeNoEof();
232 | }
233 |
234 | return;
235 | }
236 |
237 | return error.UnexpectedCharacter;
238 | }
239 |
240 | fn eatWs(self: *ParseContext) bool {
241 | var ws = false;
242 |
243 | while (self.peek()) |ch| {
244 | switch (ch) {
245 | ' ', '\t', '\n', '\r' => {
246 | ws = true;
247 | _ = self.consumeNoEof();
248 | },
249 | else => break,
250 | }
251 | }
252 |
253 | return ws;
254 | }
255 |
256 | fn expectWs(self: *ParseContext) !void {
257 | if (!self.eatWs()) return error.UnexpectedCharacter;
258 | }
259 |
260 | fn currentLine(self: ParseContext) []const u8 {
261 | var begin: usize = 0;
262 | if (mem.lastIndexOfScalar(u8, self.source[0..self.offset], '\n')) |prev_nl| {
263 | begin = prev_nl + 1;
264 | }
265 |
266 | const end = mem.indexOfScalarPos(u8, self.source, self.offset, '\n') orelse self.source.len;
267 | return self.source[begin..end];
268 | }
269 | };
270 |
271 | test "ParseContext" {
272 | {
273 | var ctx = ParseContext.init("I like pythons");
274 | try testing.expectEqual(@as(?u8, 'I'), ctx.peek());
275 | try testing.expectEqual(@as(u8, 'I'), ctx.consumeNoEof());
276 | try testing.expectEqual(@as(?u8, ' '), ctx.peek());
277 | try testing.expectEqual(@as(u8, ' '), try ctx.consume());
278 |
279 | try testing.expect(ctx.eat('l'));
280 | try testing.expectEqual(@as(?u8, 'i'), ctx.peek());
281 | try testing.expectEqual(false, ctx.eat('a'));
282 | try testing.expectEqual(@as(?u8, 'i'), ctx.peek());
283 |
284 | try ctx.expect('i');
285 | try testing.expectEqual(@as(?u8, 'k'), ctx.peek());
286 | try testing.expectError(error.UnexpectedCharacter, ctx.expect('a'));
287 | try testing.expectEqual(@as(?u8, 'k'), ctx.peek());
288 |
289 | try testing.expect(ctx.eatStr("ke"));
290 | try testing.expectEqual(@as(?u8, ' '), ctx.peek());
291 |
292 | try testing.expect(ctx.eatWs());
293 | try testing.expectEqual(@as(?u8, 'p'), ctx.peek());
294 | try testing.expectEqual(false, ctx.eatWs());
295 | try testing.expectEqual(@as(?u8, 'p'), ctx.peek());
296 |
297 | try testing.expectEqual(false, ctx.eatStr("aaaaaaaaa"));
298 | try testing.expectEqual(@as(?u8, 'p'), ctx.peek());
299 |
300 | try testing.expectError(error.UnexpectedEof, ctx.expectStr("aaaaaaaaa"));
301 | try testing.expectEqual(@as(?u8, 'p'), ctx.peek());
302 | try testing.expectError(error.UnexpectedCharacter, ctx.expectStr("pytn"));
303 | try testing.expectEqual(@as(?u8, 'p'), ctx.peek());
304 | try ctx.expectStr("python");
305 | try testing.expectEqual(@as(?u8, 's'), ctx.peek());
306 | }
307 |
308 | {
309 | var ctx = ParseContext.init("");
310 | try testing.expectEqual(ctx.peek(), null);
311 | try testing.expectError(error.UnexpectedEof, ctx.consume());
312 | try testing.expectEqual(ctx.eat('p'), false);
313 | try testing.expectError(error.UnexpectedEof, ctx.expect('p'));
314 | }
315 | }
316 |
317 | pub const ParseError = error{
318 | IllegalCharacter,
319 | UnexpectedEof,
320 | UnexpectedCharacter,
321 | UnclosedValue,
322 | UnclosedComment,
323 | InvalidName,
324 | InvalidEntity,
325 | InvalidStandaloneValue,
326 | NonMatchingClosingTag,
327 | InvalidDocument,
328 | OutOfMemory,
329 | };
330 |
331 | pub fn parse(backing_allocator: Allocator, source: []const u8) !Document {
332 | var ctx = ParseContext.init(source);
333 | return try parseDocument(&ctx, backing_allocator);
334 | }
335 |
336 | fn parseDocument(ctx: *ParseContext, backing_allocator: Allocator) !Document {
337 | var doc = Document{
338 | .arena = ArenaAllocator.init(backing_allocator),
339 | .xml_decl = null,
340 | .root = undefined,
341 | };
342 |
343 | errdefer doc.deinit();
344 |
345 | const allocator = doc.arena.allocator();
346 |
347 | try trySkipComments(ctx, allocator);
348 |
349 | doc.xml_decl = try tryParseProlog(ctx, allocator);
350 | _ = ctx.eatWs();
351 | try trySkipComments(ctx, allocator);
352 |
353 | doc.root = (try tryParseElement(ctx, allocator, null)) orelse return error.InvalidDocument;
354 | _ = ctx.eatWs();
355 | try trySkipComments(ctx, allocator);
356 |
357 | if (ctx.peek() != null) return error.InvalidDocument;
358 |
359 | return doc;
360 | }
361 |
362 | fn parseAttrValue(ctx: *ParseContext, alloc: Allocator) ![]const u8 {
363 | const quote = try ctx.consume();
364 | if (quote != '"' and quote != '\'') return error.UnexpectedCharacter;
365 |
366 | const begin = ctx.offset;
367 |
368 | while (true) {
369 | const c = ctx.consume() catch return error.UnclosedValue;
370 | if (c == quote) break;
371 | }
372 |
373 | const end = ctx.offset - 1;
374 |
375 | return try dupeAndUnescape(alloc, ctx.source[begin..end]);
376 | }
377 |
378 | fn parseEqAttrValue(ctx: *ParseContext, alloc: Allocator) ![]const u8 {
379 | _ = ctx.eatWs();
380 | try ctx.expect('=');
381 | _ = ctx.eatWs();
382 |
383 | return try parseAttrValue(ctx, alloc);
384 | }
385 |
386 | fn parseNameNoDupe(ctx: *ParseContext) ![]const u8 {
387 | // XML's spec on names is very long, so to make this easier
388 | // we just take any character that is not special and not whitespace
389 | const begin = ctx.offset;
390 |
391 | while (ctx.peek()) |ch| {
392 | switch (ch) {
393 | ' ', '\t', '\n', '\r' => break,
394 | '&', '"', '\'', '<', '>', '?', '=', '/' => break,
395 | else => _ = ctx.consumeNoEof(),
396 | }
397 | }
398 |
399 | const end = ctx.offset;
400 | if (begin == end) return error.InvalidName;
401 |
402 | return ctx.source[begin..end];
403 | }
404 |
405 | fn tryParseCharData(ctx: *ParseContext, alloc: Allocator) !?[]const u8 {
406 | const begin = ctx.offset;
407 |
408 | while (ctx.peek()) |ch| {
409 | switch (ch) {
410 | '<' => break,
411 | else => _ = ctx.consumeNoEof(),
412 | }
413 | }
414 |
415 | const end = ctx.offset;
416 | if (begin == end) return null;
417 |
418 | return try dupeAndUnescape(alloc, ctx.source[begin..end]);
419 | }
420 |
421 | fn parseContent(ctx: *ParseContext, alloc: Allocator, parent: ?*Element) ParseError!Content {
422 | if (try tryParseCharData(ctx, alloc)) |cd| {
423 | return Content{ .CharData = cd };
424 | } else if (try tryParseComment(ctx, alloc)) |comment| {
425 | return Content{ .Comment = comment };
426 | } else if (try tryParseElement(ctx, alloc, parent)) |elem| {
427 | return Content{ .Element = elem };
428 | } else {
429 | return error.UnexpectedCharacter;
430 | }
431 | }
432 |
433 | fn tryParseAttr(ctx: *ParseContext, alloc: Allocator) !?*Attribute {
434 | const name = parseNameNoDupe(ctx) catch return null;
435 | _ = ctx.eatWs();
436 | try ctx.expect('=');
437 | _ = ctx.eatWs();
438 | const value = try parseAttrValue(ctx, alloc);
439 |
440 | const attr = try alloc.create(Attribute);
441 | attr.name = try alloc.dupe(u8, name);
442 | attr.value = value;
443 | return attr;
444 | }
445 |
446 | fn tryParseElement(ctx: *ParseContext, alloc: Allocator, parent: ?*Element) !?*Element {
447 | const start = ctx.offset;
448 | if (!ctx.eat('<')) return null;
449 | const tag = parseNameNoDupe(ctx) catch {
450 | ctx.offset = start;
451 | return null;
452 | };
453 |
454 | const element = try alloc.create(Element);
455 | element.* = Element.init(try alloc.dupe(u8, tag), alloc);
456 |
457 | while (ctx.eatWs()) {
458 | const attr = (try tryParseAttr(ctx, alloc)) orelse break;
459 | try element.attributes.append(element.allocator, attr);
460 | }
461 |
462 | if (ctx.eatStr("/>")) {
463 | return element;
464 | }
465 |
466 | try ctx.expect('>');
467 |
468 | while (true) {
469 | if (ctx.peek() == null) {
470 | return error.UnexpectedEof;
471 | } else if (ctx.eatStr("")) {
472 | break;
473 | }
474 |
475 | const content = try parseContent(ctx, alloc, element);
476 | try element.children.append(element.allocator, content);
477 | }
478 |
479 | const closing_tag = try parseNameNoDupe(ctx);
480 | if (!std.mem.eql(u8, tag, closing_tag)) {
481 | return error.NonMatchingClosingTag;
482 | }
483 |
484 | _ = ctx.eatWs();
485 | try ctx.expect('>');
486 |
487 | if (parent) |p| {
488 | var last_element: ?*Element = null;
489 |
490 | for (0..p.children.items.len) |i| {
491 | const child = p.children.items[p.children.items.len - i - 1];
492 | if (child == .Element) {
493 | last_element = child.Element;
494 | break;
495 | }
496 | }
497 |
498 | if (last_element) |lc| {
499 | lc.next_sibling = element;
500 | }
501 | }
502 |
503 | return element;
504 | }
505 |
506 | test "tryParseElement" {
507 | var arena = std.heap.ArenaAllocator.init(testing.allocator);
508 | defer arena.deinit();
509 | const alloc = arena.allocator();
510 |
511 | {
512 | var ctx = ParseContext.init("<= a='b'/>");
513 | try testing.expectEqual(@as(?*Element, null), try tryParseElement(&ctx, alloc, null));
514 | try testing.expectEqual(@as(?u8, '<'), ctx.peek());
515 | }
516 |
517 | {
518 | var ctx = ParseContext.init("");
519 | const elem = try tryParseElement(&ctx, alloc, null);
520 | try testing.expectEqualSlices(u8, elem.?.tag, "python");
521 |
522 | const size_attr = elem.?.attributes.items[0];
523 | try testing.expectEqualSlices(u8, size_attr.name, "size");
524 | try testing.expectEqualSlices(u8, size_attr.value, "15");
525 |
526 | const color_attr = elem.?.attributes.items[1];
527 | try testing.expectEqualSlices(u8, color_attr.name, "color");
528 | try testing.expectEqualSlices(u8, color_attr.value, "green");
529 | }
530 |
531 | {
532 | var ctx = ParseContext.init("test");
533 | const elem = try tryParseElement(&ctx, alloc, null);
534 | try testing.expectEqualSlices(u8, elem.?.tag, "python");
535 | try testing.expectEqualSlices(u8, elem.?.children.items[0].CharData, "test");
536 | }
537 |
538 | {
539 | var ctx = ParseContext.init("bdf");
540 | const elem = try tryParseElement(&ctx, alloc, null);
541 | try testing.expectEqualSlices(u8, elem.?.tag, "a");
542 | try testing.expectEqualSlices(u8, elem.?.children.items[0].CharData, "b");
543 | try testing.expectEqualSlices(u8, elem.?.children.items[1].Element.tag, "c");
544 | try testing.expectEqualSlices(u8, elem.?.children.items[2].CharData, "d");
545 | try testing.expectEqualSlices(u8, elem.?.children.items[3].Element.tag, "e");
546 | try testing.expectEqualSlices(u8, elem.?.children.items[4].CharData, "f");
547 | try testing.expectEqualSlices(u8, elem.?.children.items[5].Comment, "g");
548 | }
549 | }
550 |
551 | fn tryParseProlog(ctx: *ParseContext, alloc: Allocator) !?*XmlDecl {
552 | const start = ctx.offset;
553 | if (!ctx.eatStr("") or !mem.eql(u8, try parseNameNoDupe(ctx), "xml")) {
554 | ctx.offset = start;
555 | return null;
556 | }
557 |
558 | const decl = try alloc.create(XmlDecl);
559 | decl.encoding = null;
560 | decl.standalone = null;
561 |
562 | // Version info is mandatory
563 | try ctx.expectWs();
564 | try ctx.expectStr("version");
565 | decl.version = try parseEqAttrValue(ctx, alloc);
566 |
567 | if (ctx.eatWs()) {
568 | // Optional encoding and standalone info
569 | var require_ws = false;
570 |
571 | if (ctx.eatStr("encoding")) {
572 | decl.encoding = try parseEqAttrValue(ctx, alloc);
573 | require_ws = true;
574 | }
575 |
576 | if (require_ws == ctx.eatWs() and ctx.eatStr("standalone")) {
577 | const standalone = try parseEqAttrValue(ctx, alloc);
578 | if (std.mem.eql(u8, standalone, "yes")) {
579 | decl.standalone = true;
580 | } else if (std.mem.eql(u8, standalone, "no")) {
581 | decl.standalone = false;
582 | } else {
583 | return error.InvalidStandaloneValue;
584 | }
585 | }
586 |
587 | _ = ctx.eatWs();
588 | }
589 |
590 | try ctx.expectStr("?>");
591 | return decl;
592 | }
593 |
594 | test "tryParseProlog" {
595 | var arena = std.heap.ArenaAllocator.init(testing.allocator);
596 | defer arena.deinit();
597 | const alloc = arena.allocator();
598 |
599 | {
600 | var ctx = ParseContext.init("");
601 | try testing.expectEqual(@as(?*XmlDecl, null), try tryParseProlog(&ctx, alloc));
602 | try testing.expectEqual(@as(?u8, '<'), ctx.peek());
603 | }
604 |
605 | {
606 | var ctx = ParseContext.init("");
607 | const decl = try tryParseProlog(&ctx, alloc);
608 | try testing.expectEqualSlices(u8, "aa", decl.?.version);
609 | try testing.expectEqual(@as(?[]const u8, null), decl.?.encoding);
610 | try testing.expectEqual(@as(?bool, null), decl.?.standalone);
611 | }
612 |
613 | {
614 | var ctx = ParseContext.init("");
615 | const decl = try tryParseProlog(&ctx, alloc);
616 | try testing.expectEqualSlices(u8, "aa", decl.?.version);
617 | try testing.expectEqualSlices(u8, "bbb", decl.?.encoding.?);
618 | try testing.expectEqual(@as(?bool, true), decl.?.standalone.?);
619 | }
620 | }
621 |
622 | fn trySkipComments(ctx: *ParseContext, alloc: Allocator) !void {
623 | while (try tryParseComment(ctx, alloc)) |_| {
624 | _ = ctx.eatWs();
625 | }
626 | }
627 |
628 | fn tryParseComment(ctx: *ParseContext, alloc: Allocator) !?[]const u8 {
629 | if (!ctx.eatStr("")) {
633 | _ = ctx.consume() catch return error.UnclosedComment;
634 | }
635 |
636 | const end = ctx.offset - "-->".len;
637 | return try alloc.dupe(u8, ctx.source[begin..end]);
638 | }
639 |
640 | fn unescapeEntity(text: []const u8) !u8 {
641 | const EntitySubstition = struct { text: []const u8, replacement: u8 };
642 |
643 | const entities = [_]EntitySubstition{
644 | .{ .text = "<", .replacement = '<' },
645 | .{ .text = ">", .replacement = '>' },
646 | .{ .text = "&", .replacement = '&' },
647 | .{ .text = "'", .replacement = '\'' },
648 | .{ .text = """, .replacement = '"' },
649 | };
650 |
651 | for (entities) |entity| {
652 | if (std.mem.eql(u8, text, entity.text)) return entity.replacement;
653 | }
654 |
655 | return error.InvalidEntity;
656 | }
657 |
658 | fn dupeAndUnescape(alloc: Allocator, text: []const u8) ![]const u8 {
659 | const str = try alloc.alloc(u8, text.len);
660 |
661 | var j: usize = 0;
662 | var i: usize = 0;
663 | while (i < text.len) : (j += 1) {
664 | if (text[i] == '&') {
665 | const entity_end = 1 + (mem.indexOfScalarPos(u8, text, i, ';') orelse return error.InvalidEntity);
666 | str[j] = try unescapeEntity(text[i..entity_end]);
667 | i = entity_end;
668 | } else {
669 | str[j] = text[i];
670 | i += 1;
671 | }
672 | }
673 |
674 | // This error is not strictly true, but we need to match one of the items
675 | // from the error set provided by the other stdlib calls at the calling site
676 | if (!alloc.resize(str, j)) {
677 | defer alloc.free(str);
678 | return alloc.dupe(u8, str[0..j]) catch return error.OutOfMemory;
679 | }
680 | return str[0..j];
681 | }
682 |
683 | test "dupeAndUnescape" {
684 | var arena = std.heap.ArenaAllocator.init(testing.allocator);
685 | defer arena.deinit();
686 | const alloc = arena.allocator();
687 |
688 | const duped = try dupeAndUnescape(testing.allocator, "test");
689 | defer testing.allocator.free(duped);
690 | try testing.expectEqualSlices(u8, "test", duped);
691 | const duped2 = try dupeAndUnescape(testing.allocator, "a<b&c>d"e'f<");
692 | defer testing.allocator.free(duped2);
693 | try testing.expectEqualSlices(u8, "ad\"e'f<", duped2);
694 | try testing.expectError(error.InvalidEntity, dupeAndUnescape(alloc, "python&"));
695 | try testing.expectError(error.InvalidEntity, dupeAndUnescape(alloc, "python&&"));
696 | try testing.expectError(error.InvalidEntity, dupeAndUnescape(alloc, "python&test;"));
697 | try testing.expectError(error.InvalidEntity, dupeAndUnescape(alloc, "python&boa"));
698 | }
699 |
700 | test "Top level comments" {
701 | var arena = std.heap.ArenaAllocator.init(testing.allocator);
702 | defer arena.deinit();
703 | const alloc = arena.allocator();
704 |
705 | const doc = try parse(alloc, "");
706 | try testing.expectEqualSlices(u8, "python", doc.root.tag);
707 | }
708 |
--------------------------------------------------------------------------------
/src/aws_http.zig:
--------------------------------------------------------------------------------
1 | //! This module provides a low level http interface for working with AWS
2 | //! It also provides an option to operate outside the AWS ecosystem through
3 | //! the makeRequest call with a null signingOptions.
4 | //!
5 | //! Typical usage:
6 | //! const client = awshttp.AwsHttp.init(allocator);
7 | //! defer client.deinit()
8 | //! const result = client.callApi (or client.makeRequest)
9 | //! defer result.deinit();
10 | const std = @import("std");
11 | const base = @import("aws_http_base.zig");
12 | const signing = @import("aws_signing.zig");
13 | const credentials = @import("aws_credentials.zig");
14 |
15 | const CN_NORTH_1_HASH = std.hash_map.hashString("cn-north-1");
16 | const CN_NORTHWEST_1_HASH = std.hash_map.hashString("cn-northwest-1");
17 | const US_ISO_EAST_1_HASH = std.hash_map.hashString("us-iso-east-1");
18 | const US_ISOB_EAST_1_HASH = std.hash_map.hashString("us-isob-east-1");
19 |
20 | const scoped_log = std.log.scoped(.awshttp);
21 |
22 | /// Specifies logging level. This should not be touched unless the normal
23 | /// zig logging capabilities are inaccessible (e.g. during a build)
24 | pub var log_level: std.log.Level = .debug;
25 |
26 | /// Turn off logging completely
27 | pub var logs_off: bool = false;
28 | const log = struct {
29 | /// Log an error message. This log level is intended to be used
30 | /// when something has gone wrong. This might be recoverable or might
31 | /// be followed by the program exiting.
32 | pub fn err(
33 | comptime format: []const u8,
34 | args: anytype,
35 | ) void {
36 | if (!logs_off and @intFromEnum(std.log.Level.err) <= @intFromEnum(log_level))
37 | scoped_log.err(format, args);
38 | }
39 |
40 | /// Log a warning message. This log level is intended to be used if
41 | /// it is uncertain whether something has gone wrong or not, but the
42 | /// circumstances would be worth investigating.
43 | pub fn warn(
44 | comptime format: []const u8,
45 | args: anytype,
46 | ) void {
47 | if (!logs_off and @intFromEnum(std.log.Level.warn) <= @intFromEnum(log_level))
48 | scoped_log.warn(format, args);
49 | }
50 |
51 | /// Log an info message. This log level is intended to be used for
52 | /// general messages about the state of the program.
53 | pub fn info(
54 | comptime format: []const u8,
55 | args: anytype,
56 | ) void {
57 | if (!logs_off and @intFromEnum(std.log.Level.info) <= @intFromEnum(log_level))
58 | scoped_log.info(format, args);
59 | }
60 |
61 | /// Log a debug message. This log level is intended to be used for
62 | /// messages which are only useful for debugging.
63 | pub fn debug(
64 | comptime format: []const u8,
65 | args: anytype,
66 | ) void {
67 | if (!logs_off and @intFromEnum(std.log.Level.debug) <= @intFromEnum(log_level))
68 | scoped_log.debug(format, args);
69 | }
70 | };
71 |
72 | pub const AwsError = error{
73 | AddHeaderError,
74 | AlpnError,
75 | CredentialsError,
76 | HttpClientConnectError,
77 | HttpRequestError,
78 | SignableError,
79 | SigningInitiationError,
80 | TlsError,
81 | RequestCreateError,
82 | SetupConnectionError,
83 | StatusCodeError,
84 | SetRequestMethodError,
85 | SetRequestPathError,
86 | };
87 |
88 | pub const Options = struct {
89 | region: []const u8 = "aws-global",
90 | dualstack: bool = false,
91 | sigv4_service_name: ?[]const u8 = null,
92 |
93 | mock: ?Mock = null,
94 | };
95 |
96 | /// mocking methods for isolated testing
97 | pub const Mock = struct {
98 | /// Used to provide consistent signing
99 | signing_time: ?i64,
100 | /// context is desiged to be type-erased pointer (@intFromPtr)
101 | context: usize = 0,
102 | request_fn: *const fn (
103 | usize,
104 | std.http.Method,
105 | std.Uri,
106 | std.http.Client.RequestOptions,
107 | ) std.http.Client.RequestError!std.http.Client.Request,
108 | send_body_complete: *const fn (usize, []u8) std.Io.Writer.Error!void,
109 | receive_head: *const fn (usize) std.http.Client.Request.ReceiveHeadError!std.http.Client.Response,
110 | reader_decompressing: *const fn (usize) *std.Io.Reader,
111 |
112 | fn request(m: Mock, method: std.http.Method, uri: std.Uri, options: std.http.Client.RequestOptions) std.http.Client.RequestError!std.http.Client.Request {
113 | return m.request_fn(m.context, method, uri, options);
114 | }
115 | fn sendBodyComplete(m: Mock, body: []u8) std.Io.Writer.Error!void {
116 | return m.send_body_complete(m.context, body);
117 | }
118 | fn receiveHead(m: Mock) std.http.Client.Request.ReceiveHeadError!std.http.Client.Response {
119 | return m.receive_head(m.context);
120 | }
121 | fn readerDecompressing(m: Mock) *std.Io.Reader {
122 | return m.reader_decompressing(m.context);
123 | }
124 | };
125 |
126 | pub const Header = std.http.Header;
127 | pub const HttpRequest = base.Request;
128 | pub const HttpResult = base.Result;
129 |
130 | const EndPoint = struct {
131 | uri: []const u8,
132 | host: []const u8,
133 | scheme: []const u8,
134 | port: u16,
135 | path: []const u8,
136 | allocator: std.mem.Allocator,
137 |
138 | fn deinit(self: EndPoint) void {
139 | self.allocator.free(self.uri);
140 | self.allocator.free(self.host);
141 | self.allocator.free(self.path);
142 | }
143 | };
144 | pub const AwsHttp = struct {
145 | allocator: std.mem.Allocator,
146 | proxy: ?std.http.Client.Proxy,
147 |
148 | const Self = @This();
149 |
150 | pub fn init(allocator: std.mem.Allocator, proxy: ?std.http.Client.Proxy) Self {
151 | return Self{
152 | .allocator = allocator,
153 | .proxy = proxy,
154 | // .credentialsProvider = // creds provider could be useful
155 | };
156 | }
157 |
158 | pub fn deinit(self: *AwsHttp) void {
159 | _ = self;
160 | log.debug("Deinit complete", .{});
161 | }
162 |
163 | /// callApi allows the calling of AWS APIs through a higher-level interface.
164 | /// It will calculate the appropriate endpoint and action parameters for the
165 | /// service called, and will set up the signing options. The return
166 | /// value is simply a raw HttpResult
167 | pub fn callApi(self: Self, service: []const u8, request: HttpRequest, options: Options) !HttpResult {
168 | // This function or regionSubDomain needs altering for virtual host
169 | // addressing (for S3). Botocore, and I suspect other SDKs, have
170 | // hardcoded exceptions for S3:
171 | // https://github.com/boto/botocore/blob/f2b0dbb800b8dc2a3541334d5ca1190faf900150/botocore/utils.py#L2160-L2181
172 | // Boto assumes virtual host addressing unless the endpoint url is configured
173 | //
174 | // NOTE: There are 4 rest_xml services. They are:
175 | // * CloudFront
176 | // * Route53
177 | // * S3
178 | // * S3 control
179 | //
180 | // All 4 are non-standard. Route53 and CloudFront are global endpoints
181 | // S3 uses virtual host addressing (except when it doesn't), and
182 | // S3 control uses .s3-control..amazonaws.com
183 | //
184 | // So this regionSubDomain call needs to handle generic customization
185 | const endpoint = try endpointForRequest(self.allocator, service, request, options);
186 | defer endpoint.deinit();
187 | log.debug("Calling endpoint {s}", .{endpoint.uri});
188 | // TODO: Should we allow customization here?
189 | const creds = try credentials.getCredentials(self.allocator, .{});
190 | defer creds.deinit();
191 | const signing_config: signing.Config = .{
192 | .region = getRegion(service, options.region),
193 | .service = options.sigv4_service_name orelse service,
194 | .credentials = creds,
195 | .signing_time = if (options.mock) |m| m.signing_time else null,
196 | };
197 | return try self.makeRequest(endpoint, request, signing_config, options);
198 | }
199 |
200 | /// makeRequest is a low level http/https function that can be used inside
201 | /// or outside the context of AWS services. To use it outside AWS, simply
202 | /// pass a null value in for signing_options.
203 | ///
204 | /// Otherwise, it will simply take a URL endpoint (without path information),
205 | /// HTTP method (e.g. GET, POST, etc.), and request body.
206 | ///
207 | /// At the moment this does not allow changing headers, but addtional
208 | /// ones are possible. This is likely to change. Current headers are:
209 | ///
210 | /// Accept: application/json
211 | /// User-Agent: zig-aws 1.0, Powered by the AWS Common Runtime.
212 | /// Content-Type: application/x-www-form-urlencoded
213 | /// Content-Length: (length of body)
214 | ///
215 | /// Return value is an HttpResult, which will need the caller to deinit().
216 | pub fn makeRequest(
217 | self: Self,
218 | endpoint: EndPoint,
219 | request: HttpRequest,
220 | signing_config: ?signing.Config,
221 | options: Options,
222 | ) !HttpResult {
223 | var request_cp = request;
224 |
225 | log.debug("Request Path: {s}", .{request_cp.path});
226 | log.debug("Endpoint Path (actually used): {s}", .{endpoint.path});
227 | log.debug("Query: {s}", .{request_cp.query});
228 | log.debug("Request additional header count: {d}", .{request_cp.headers.len});
229 | log.debug("Method: {s}", .{request_cp.method});
230 | log.debug("body length: {d}", .{request_cp.body.len});
231 | log.debug("Body\n====\n{s}\n====", .{request_cp.body});
232 |
233 | // Endpoint calculation might be different from the request (e.g. S3 requests)
234 | // We will use endpoint instead
235 | request_cp.path = endpoint.path;
236 |
237 | var request_headers = std.ArrayList(std.http.Header){};
238 | defer request_headers.deinit(self.allocator);
239 |
240 | const len = try addHeaders(self.allocator, &request_headers, endpoint.host, request_cp.body, request_cp.content_type, request_cp.headers);
241 | defer if (len) |l| self.allocator.free(l);
242 | request_cp.headers = request_headers.items;
243 |
244 | if (signing_config) |opts| request_cp = try signing.signRequest(self.allocator, request_cp, opts);
245 | defer {
246 | if (signing_config) |opts| {
247 | signing.freeSignedRequest(self.allocator, &request_cp, opts);
248 | }
249 | }
250 |
251 | var headers = std.ArrayList(std.http.Header){};
252 | defer headers.deinit(self.allocator);
253 | for (request_cp.headers) |header|
254 | try headers.append(self.allocator, .{ .name = header.name, .value = header.value });
255 | log.debug("All Request Headers:", .{});
256 | for (headers.items) |h| {
257 | log.debug("\t{s}: {s}", .{ h.name, h.value });
258 | }
259 |
260 | const url = try std.fmt.allocPrint(self.allocator, "{s}{s}{s}", .{ endpoint.uri, request_cp.path, request_cp.query });
261 | defer self.allocator.free(url);
262 | log.debug("Request url: {s}", .{url});
263 | // TODO: Fix this proxy stuff. This is all a kludge just to compile, but std.http.Client has it all built in now
264 | var cl = std.http.Client{ .allocator = self.allocator, .https_proxy = if (self.proxy) |*p| @constCast(p) else null };
265 | defer cl.deinit(); // TODO: Connection pooling
266 | const method = std.meta.stringToEnum(std.http.Method, request_cp.method).?;
267 |
268 | // Fetch API in 0.15.1 is insufficient as it does not provide
269 | // server headers. We'll construct and send the request ourselves
270 | const uri = try std.Uri.parse(url);
271 | const req_options: std.http.Client.RequestOptions = .{
272 | // we need full control over most headers. I wish libraries would do a
273 | // better job of having default headers as an opt-in...
274 | .headers = .{
275 | .host = .omit,
276 | .authorization = .omit,
277 | .user_agent = .omit,
278 | .connection = .default, // we can let the client manage this...it has no impact to us
279 | .accept_encoding = .default, // accept encoding (gzip, deflate) *should* be ok
280 | .content_type = .omit,
281 | },
282 | .extra_headers = headers.items,
283 | };
284 |
285 | var req = if (options.mock) |m|
286 | try m.request(method, uri, req_options) // This will call the test harness
287 | else
288 | try cl.request(method, uri, req_options);
289 | defer req.deinit();
290 | // TODO: Need to test for payloads > 2^14. I believe one of our tests does this, but not sure
291 | // if (request_cp.body.len > 0) {
292 | // // Workaround for https://github.com/ziglang/zig/issues/15626
293 | // const max_bytes: usize = 1 << 14;
294 | // var inx: usize = 0;
295 | // while (request_cp.body.len > inx) {
296 | // try req.writeAll(request_cp.body[inx..@min(request_cp.body.len, inx + max_bytes)]);
297 | // inx += max_bytes;
298 | // }
299 | //
300 | // try req.finish();
301 | // }
302 | // try req.wait();
303 |
304 | if (request_cp.body.len > 0) {
305 | // This seems a bit silly, but we can't have a []const u8 here
306 | // because when it sends, it's using a writer, and this becomes
307 | // the buffer of the writer. It's conceivable that something
308 | // in the chain then does actually modify the body of the request
309 | // so we'll need to duplicate it here
310 | const req_body = try self.allocator.dupe(u8, request_cp.body);
311 | defer self.allocator.free(req_body); // docs for sendBodyComplete say it flushes, so no need to outlive this
312 | if (options.mock) |m|
313 | try m.sendBodyComplete(req_body)
314 | else
315 | try req.sendBodyComplete(req_body);
316 | } else if (options.mock == null) try req.sendBodiless();
317 |
318 | // if (options.mock == null) log.err("Request sent. Body len {d}, uri {f}", .{ request_cp.body.len, uri });
319 | var response = if (options.mock) |m| try m.receiveHead() else try req.receiveHead(&.{});
320 |
321 | // TODO: Timeout - is this now above us?
322 | log.debug(
323 | "Request Complete. Response code {d}: {?s}",
324 | .{ @intFromEnum(response.head.status), response.head.status.phrase() },
325 | );
326 | log.debug("Response headers:", .{});
327 | var resp_headers = std.ArrayList(Header){};
328 | defer resp_headers.deinit(self.allocator);
329 | var it = response.head.iterateHeaders();
330 | while (it.next()) |h| { // even though we don't expect to fill the buffer,
331 | // we don't get a length, but looks via stdlib source
332 | // it should be ok to call next on the undefined memory
333 | log.debug(" {s}: {s}", .{ h.name, h.value });
334 | try resp_headers.append(self.allocator, .{
335 | .name = try (self.allocator.dupe(u8, h.name)),
336 | .value = try (self.allocator.dupe(u8, h.value)),
337 | });
338 | }
339 | // This is directly lifted from fetch, as there is no function in
340 | // 0.15.1 client to negotiate decompression
341 | const decompress_buffer: []u8 = switch (response.head.content_encoding) {
342 | .identity => &.{},
343 | .zstd => try self.allocator.alloc(u8, std.compress.zstd.default_window_len),
344 | .deflate, .gzip => try self.allocator.alloc(u8, std.compress.flate.max_window_len),
345 | .compress => return error.UnsupportedCompressionMethod,
346 | };
347 | defer self.allocator.free(decompress_buffer);
348 |
349 | var transfer_buffer: [64]u8 = undefined;
350 | var decompress: std.http.Decompress = undefined;
351 | const reader = response.readerDecompressing(&transfer_buffer, &decompress, decompress_buffer);
352 |
353 | // Not sure on optimal size here, but should definitely be > 0
354 | var aw = try std.Io.Writer.Allocating.initCapacity(self.allocator, 128);
355 | defer aw.deinit();
356 | const response_writer = &aw.writer;
357 | _ = reader.streamRemaining(response_writer) catch |err| switch (err) {
358 | error.ReadFailed => return response.bodyErr().?,
359 | else => |e| return e,
360 | };
361 | log.debug("raw response body:\n{s}", .{aw.written()});
362 |
363 | const rc = HttpResult{
364 | .response_code = @intFromEnum(response.head.status),
365 | .body = try aw.toOwnedSlice(),
366 | .headers = try resp_headers.toOwnedSlice(self.allocator),
367 | .allocator = self.allocator,
368 | };
369 | return rc;
370 | }
371 | };
372 |
373 | fn getRegion(service: []const u8, region: []const u8) []const u8 {
374 | if (std.mem.eql(u8, service, "cloudfront")) return "us-east-1";
375 | if (std.mem.eql(u8, service, "iam")) return "us-east-1";
376 | return region;
377 | }
378 |
379 | fn addHeaders(
380 | allocator: std.mem.Allocator,
381 | headers: *std.ArrayList(std.http.Header),
382 | host: []const u8,
383 | body: []const u8,
384 | content_type: []const u8,
385 | additional_headers: []const Header,
386 | ) !?[]const u8 {
387 | // We don't need body because they were to add a Content-Length header. But
388 | // that is being added by the client send() function, so we don't want it
389 | // on the request twice. But I also feel pretty strongly that send() should
390 | // be providing us control, because I think if we don't add it here, it
391 | // won't get signed, and we would really prefer it to be signed. So, we
392 | // will wait and watch for this situation to change in stdlib
393 |
394 | _ = body;
395 | var has_content_type = false;
396 | for (additional_headers) |h| {
397 | if (std.ascii.eqlIgnoreCase(h.name, "Content-Type")) {
398 | has_content_type = true;
399 | break;
400 | }
401 | }
402 | try headers.append(allocator, .{ .name = "Accept", .value = "application/json" });
403 | try headers.append(allocator, .{ .name = "Host", .value = host });
404 | try headers.append(allocator, .{ .name = "User-Agent", .value = "zig-aws 1.0" });
405 | if (!has_content_type)
406 | try headers.append(allocator, .{ .name = "Content-Type", .value = content_type });
407 | try headers.appendSlice(allocator, additional_headers);
408 | return null;
409 | }
410 |
411 | fn getEnvironmentVariable(allocator: std.mem.Allocator, key: []const u8) !?[]const u8 {
412 | return std.process.getEnvVarOwned(allocator, key) catch |e| switch (e) {
413 | std.process.GetEnvVarOwnedError.EnvironmentVariableNotFound => return null,
414 | else => return e,
415 | };
416 | }
417 |
418 | /// override endpoint url. Intended for use in testing. Normally, you should
419 | /// rely on AWS_ENDPOINT_URL environment variable for this
420 | pub var endpoint_override: ?[]const u8 = null;
421 |
422 | fn endpointForRequest(allocator: std.mem.Allocator, service: []const u8, request: HttpRequest, options: Options) !EndPoint {
423 | if (endpoint_override) |override| {
424 | const uri = try allocator.dupe(u8, override);
425 | return endPointFromUri(allocator, uri, request.path);
426 | }
427 | const environment_override = try getEnvironmentVariable(allocator, "AWS_ENDPOINT_URL");
428 | if (environment_override) |override| {
429 | defer allocator.free(override);
430 | const uri = try allocator.dupe(u8, override);
431 | return endPointFromUri(allocator, uri, request.path);
432 | }
433 | // Fallback to us-east-1 if global endpoint does not exist.
434 | const realregion = if (std.mem.eql(u8, options.region, "aws-global")) "us-east-1" else options.region;
435 | const dualstack = if (options.dualstack) ".dualstack" else "";
436 |
437 | const domain = switch (std.hash_map.hashString(options.region)) {
438 | US_ISO_EAST_1_HASH => "c2s.ic.gov",
439 | CN_NORTH_1_HASH, CN_NORTHWEST_1_HASH => "amazonaws.com.cn",
440 | US_ISOB_EAST_1_HASH => "sc2s.sgov.gov",
441 | else => "amazonaws.com",
442 | };
443 |
444 | if (try endpointException(allocator, service, request, options, realregion, dualstack, domain)) |e|
445 | return e;
446 |
447 | const uri = try std.fmt.allocPrint(allocator, "https://{s}{s}.{s}.{s}", .{ service, dualstack, realregion, domain });
448 | const host = try allocator.dupe(u8, uri["https://".len..]);
449 | log.debug("host: {s}, scheme: {s}, port: {}", .{ host, "https", 443 });
450 | return EndPoint{
451 | .uri = uri,
452 | .host = host,
453 | .scheme = "https",
454 | .port = 443,
455 | .allocator = allocator,
456 | .path = try allocator.dupe(u8, request.path),
457 | };
458 | }
459 |
460 | fn endpointException(
461 | allocator: std.mem.Allocator,
462 | service: []const u8,
463 | request: HttpRequest,
464 | options: Options,
465 | realregion: []const u8,
466 | dualstack: []const u8,
467 | domain: []const u8,
468 | ) !?EndPoint {
469 | // Global endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#global-endpoints):
470 | // ✓ Amazon CloudFront
471 | // AWS Global Accelerator
472 | // ✓ AWS Identity and Access Management (IAM)
473 | // AWS Network Manager
474 | // AWS Organizations
475 | // Amazon Route 53
476 | // AWS Shield Advanced
477 | // AWS WAF Classic
478 |
479 | if (std.mem.eql(u8, service, "iam")) {
480 | return EndPoint{
481 | .uri = try allocator.dupe(u8, "https://iam.amazonaws.com"),
482 | .host = try allocator.dupe(u8, "iam.amazonaws.com"),
483 | .scheme = "https",
484 | .port = 443,
485 | .allocator = allocator,
486 | .path = try allocator.dupe(u8, request.path),
487 | };
488 | }
489 | if (std.mem.eql(u8, service, "cloudfront")) {
490 | return EndPoint{
491 | .uri = try allocator.dupe(u8, "https://cloudfront.amazonaws.com"),
492 | .host = try allocator.dupe(u8, "cloudfront.amazonaws.com"),
493 | .scheme = "https",
494 | .port = 443,
495 | .allocator = allocator,
496 | .path = try allocator.dupe(u8, request.path),
497 | };
498 | }
499 | if (std.mem.eql(u8, service, "s3")) {
500 | if (request.path.len == 1 or std.mem.indexOf(u8, request.path[1..], "/") == null)
501 | return null;
502 |
503 | // We need to adjust the host and the path to accomodate virtual
504 | // host addressing. This only applies to bucket operations, but
505 | // right now I'm hoping that bucket operations do not include a path
506 | // component, so will be handled by the return null statement above.
507 | const bucket_name = s3BucketFromPath(request.path);
508 | const rest_of_path = request.path[bucket_name.len + 1 ..];
509 | // TODO: Implement
510 | _ = options;
511 | const uri = try std.fmt.allocPrint(allocator, "https://{s}.{s}{s}.{s}.{s}", .{ bucket_name, service, dualstack, realregion, domain });
512 | const host = try allocator.dupe(u8, uri["https://".len..]);
513 | log.debug("S3 host: {s}, scheme: {s}, port: {}", .{ host, "https", 443 });
514 | return EndPoint{
515 | .uri = uri,
516 | .host = host,
517 | .scheme = "https",
518 | .port = 443,
519 | .allocator = allocator,
520 | .path = try allocator.dupe(u8, rest_of_path),
521 | };
522 | }
523 | return null;
524 | }
525 |
526 | fn s3BucketFromPath(path: []const u8) []const u8 {
527 | var in_bucket = false;
528 | var start: usize = 0;
529 | for (path, 0..) |c, inx| {
530 | if (c == '/') {
531 | if (in_bucket) return path[start..inx];
532 | start = inx + 1;
533 | in_bucket = true;
534 | }
535 | }
536 | unreachable;
537 | }
538 | /// creates an endpoint from a uri string.
539 | ///
540 | /// allocator: Will be used only to construct the EndPoint struct
541 | /// uri: string constructed in such a way that deallocation is needed
542 | fn endPointFromUri(allocator: std.mem.Allocator, uri: []const u8, path: []const u8) !EndPoint {
543 | const parsed_uri = try std.Uri.parse(uri);
544 |
545 | const scheme = parsed_uri.scheme;
546 | const host = try allocator.dupe(u8, parsed_uri.host.?.percent_encoded);
547 | const port: u16 = blk: {
548 | if (parsed_uri.port) |port| break :blk port;
549 | if (std.mem.eql(u8, scheme, "http")) break :blk 80;
550 | if (std.mem.eql(u8, scheme, "https")) break :blk 443;
551 | break :blk 0;
552 | };
553 |
554 | log.debug("host: {s}, scheme: {s}, port: {}", .{ host, scheme, port });
555 |
556 | return EndPoint{
557 | .uri = uri,
558 | .host = host,
559 | .scheme = scheme,
560 | .allocator = allocator,
561 | .port = port,
562 | .path = try allocator.dupe(u8, path),
563 | };
564 | }
565 |
566 | test "endpointForRequest standard operation" {
567 | const request: HttpRequest = .{};
568 | const options: Options = .{
569 | .region = "us-west-2",
570 | .dualstack = false,
571 | .sigv4_service_name = null,
572 | };
573 | const allocator = std.testing.allocator;
574 | const service = "dynamodb";
575 |
576 | const endpoint = try endpointForRequest(allocator, service, request, options);
577 | defer endpoint.deinit();
578 | try std.testing.expectEqualStrings("https://dynamodb.us-west-2.amazonaws.com", endpoint.uri);
579 | }
580 |
581 | test "endpointForRequest for cloudfront" {
582 | const request = HttpRequest{};
583 | const options = Options{
584 | .region = "us-west-2",
585 | .dualstack = false,
586 | .sigv4_service_name = null,
587 | };
588 | const allocator = std.testing.allocator;
589 | const service = "cloudfront";
590 |
591 | const endpoint = try endpointForRequest(allocator, service, request, options);
592 | defer endpoint.deinit();
593 | try std.testing.expectEqualStrings("https://cloudfront.amazonaws.com", endpoint.uri);
594 | }
595 |
596 | test "endpointForRequest for s3" {
597 | const request = HttpRequest{};
598 | const options = Options{
599 | .region = "us-east-2",
600 | .dualstack = false,
601 | .sigv4_service_name = null,
602 | };
603 | const allocator = std.testing.allocator;
604 | const service = "s3";
605 |
606 | const endpoint = try endpointForRequest(allocator, service, request, options);
607 | defer endpoint.deinit();
608 | try std.testing.expectEqualStrings("https://s3.us-east-2.amazonaws.com", endpoint.uri);
609 | }
610 | test "endpointForRequest for s3 - specific bucket" {
611 | const request = HttpRequest{
612 | .path = "/bucket/key",
613 | };
614 | const options = Options{
615 | .region = "us-east-2",
616 | .dualstack = false,
617 | .sigv4_service_name = null,
618 | };
619 | const allocator = std.testing.allocator;
620 | const service = "s3";
621 |
622 | const endpoint = try endpointForRequest(allocator, service, request, options);
623 | defer endpoint.deinit();
624 | try std.testing.expectEqualStrings("https://bucket.s3.us-east-2.amazonaws.com", endpoint.uri);
625 | try std.testing.expectEqualStrings("/key", endpoint.path);
626 | }
627 |
--------------------------------------------------------------------------------
/src/test_rest_json_1_query_no_input.response:
--------------------------------------------------------------------------------
1 |
2 | {"Functions":[{"Description":"AWS CDK resource provider framework - onEvent (DevelopmentFrontendStack-g650u/com.amazonaws.cdk.custom-resources.amplify-asset-deployment-provider/amplify-asset-deployment-handler-provider)","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"0c62fc74-a692-403d-9206-5fcbad406424","LastModified":"2023-03-01T18:13:15.704+0000","FileSystemConfigs":null,"FunctionName":"DevelopmentFrontendStack--amplifyassetdeploymentha-aZqB9IbZLIKU","Runtime":"nodejs14.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentha-aZqB9IbZLIKU","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":900,"Handler":"framework.onEvent","CodeSha256":"m4tt+M0l3p8bZvxIDj83dwGrwRW6atCfS/q8AiXCD3o=","Role":"arn:aws:iam::550620852718:role/DevelopmentFrontendStack-amplifyassetdeploymentha-1782JF7WAPXZ3","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":4307,"State":null,"StateReason":null,"Environment":{"Variables":{"USER_ON_EVENT_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymenton-X9iZJSCSPYDH","WAITER_STATE_MACHINE_ARN":"arn:aws:states:us-west-2:550620852718:stateMachine:amplifyassetdeploymenthandlerproviderwaiterstatemachineB3C2FCBE-Ltggp5wBcHWO","USER_IS_COMPLETE_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentis-jaHopLrSSARV"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"e94955d8-e21d-4949-a942-79557a2dd0b4","LastModified":"2023-03-10T18:49:46.116+0000","FileSystemConfigs":null,"FunctionName":"amplify-login-create-auth-challenge-7c72ae2f","Runtime":"nodejs16.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:amplify-login-create-auth-challenge-7c72ae2f","KMSKeyArn":null,"MemorySize":256,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":15,"Handler":"index.handler","CodeSha256":"F6dH1EPTE4mlv/piMF6OkK+ZeYrPs/OCGCzPQVmj66g=","Role":"arn:aws:iam::550620852718:role/amplify-login-lambda-7c72ae2f","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":1176,"State":null,"StateReason":null,"Environment":null,"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"42febf7f-b429-4929-81f7-b07d141c5ef8","LastModified":"2023-03-01T21:46:17.213+0000","FileSystemConfigs":null,"FunctionName":"DevelopmentFrontendStack--amplifyassetdeploymenton-khrJXiHCrnWH","Runtime":"nodejs14.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymenton-khrJXiHCrnWH","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":3,"Handler":"index.onEvent","CodeSha256":"rDddZOVFKB99sLk4xliKKZ5HI9k6yJYRFT07fKsT9A8=","Role":"arn:aws:iam::550620852718:role/DevelopmentFrontendStack-amplifyassetdeploymenton-NYFHZD78I18J","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":1817,"State":null,"StateReason":null,"Environment":{"Variables":{"AWS_NODEJS_CONNECTION_REUSE_ENABLED":"1"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"AWS CDK resource provider framework - onTimeout (DevelopmentFrontendStack-g650u/com.amazonaws.cdk.custom-resources.amplify-asset-deployment-provider/amplify-asset-deployment-handler-provider)","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"149f4db0-84d5-402c-bf4a-7e265f943b6d","LastModified":"2023-03-01T18:12:17.700+0000","FileSystemConfigs":null,"FunctionName":"DevelopmentFrontendStack--amplifyassetdeploymentha-ZK5IQaq8pZFz","Runtime":"nodejs14.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentha-ZK5IQaq8pZFz","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":900,"Handler":"framework.onTimeout","CodeSha256":"m4tt+M0l3p8bZvxIDj83dwGrwRW6atCfS/q8AiXCD3o=","Role":"arn:aws:iam::550620852718:role/DevelopmentFrontendStack-amplifyassetdeploymentha-1FQC7W9R2EQ1G","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":4307,"State":null,"StateReason":null,"Environment":{"Variables":{"USER_ON_EVENT_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymenton-X9iZJSCSPYDH","USER_IS_COMPLETE_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentis-jaHopLrSSARV"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"7d394c87-e579-487f-8ab4-f7c894065914","LastModified":"2023-03-01T18:11:45.557+0000","FileSystemConfigs":null,"FunctionName":"DevelopmentFrontendStack--amplifyassetdeploymenton-X9iZJSCSPYDH","Runtime":"nodejs14.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymenton-X9iZJSCSPYDH","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":3,"Handler":"index.onEvent","CodeSha256":"rDddZOVFKB99sLk4xliKKZ5HI9k6yJYRFT07fKsT9A8=","Role":"arn:aws:iam::550620852718:role/DevelopmentFrontendStack-amplifyassetdeploymenton-MLRMAWG11FNO","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":1817,"State":null,"StateReason":null,"Environment":{"Variables":{"AWS_NODEJS_CONNECTION_REUSE_ENABLED":"1"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":{"VpcId":"","SecurityGroupIds":[],"SubnetIds":[]},"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"5211860d-6811-444f-aabe-556ab2082e8d","LastModified":"2022-10-07T15:34:54.000+0000","FileSystemConfigs":null,"FunctionName":"awsome-lambda-LambdaStackawsomeLambda","Runtime":"go1.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:awsome-lambda-LambdaStackawsomeLambda","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":2,"Handler":"awsomelambda","CodeSha256":"NaY31cBK2HOsQ31jIFjIXfTYCnCJI51OgmmM8R62n+w=","Role":"arn:aws:iam::550620852718:role/awsome-lambda-LambdaStack-LambdaStackFunctionServi-10FYFJLB9S4NS","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":6921769,"State":null,"StateReason":null,"Environment":{"Variables":{"TABLE_NAME":"awsometable"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"fa53a29b-852b-4728-a4d6-2dada24d5bff","LastModified":"2023-03-10T18:49:46.076+0000","FileSystemConfigs":null,"FunctionName":"amplify-login-define-auth-challenge-7c72ae2f","Runtime":"nodejs16.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:amplify-login-define-auth-challenge-7c72ae2f","KMSKeyArn":null,"MemorySize":256,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":15,"Handler":"index.handler","CodeSha256":"JP2e+LBDVivBGg+YgL4/4r1cPzb1c8TJFKFqr10B7pY=","Role":"arn:aws:iam::550620852718:role/amplify-login-lambda-7c72ae2f","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":1770,"State":null,"StateReason":null,"Environment":null,"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"8298162a-5977-46ce-b96a-627cb7ee9001","LastModified":"2023-03-01T18:11:44.706+0000","FileSystemConfigs":null,"FunctionName":"DevelopmentFrontendStack--amplifyassetdeploymentis-jaHopLrSSARV","Runtime":"nodejs14.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentis-jaHopLrSSARV","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":3,"Handler":"index.isComplete","CodeSha256":"rDddZOVFKB99sLk4xliKKZ5HI9k6yJYRFT07fKsT9A8=","Role":"arn:aws:iam::550620852718:role/DevelopmentFrontendStack-amplifyassetdeploymentis-FKETZNYM7KLM","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":1817,"State":null,"StateReason":null,"Environment":{"Variables":{"AWS_NODEJS_CONNECTION_REUSE_ENABLED":"1"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"973a53b6-22db-4a3f-9a55-f75f0ae8bdb5","LastModified":"2023-03-10T18:57:12.506+0000","FileSystemConfigs":null,"FunctionName":"amplify-login-verify-auth-challenge-b4883e4c","Runtime":"nodejs16.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:amplify-login-verify-auth-challenge-b4883e4c","KMSKeyArn":null,"MemorySize":256,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":15,"Handler":"index.handler","CodeSha256":"DCNbkbpi7lcoljT2Xoka941sxwJ9xWnxCw+nddrVWSQ=","Role":"arn:aws:iam::550620852718:role/amplify-login-lambda-b4883e4c","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":2559,"State":null,"StateReason":null,"Environment":{"Variables":{},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"13a614e2-5230-4bd4-9144-56bbbcf674d3","LastModified":"2023-03-01T21:46:17.936+0000","FileSystemConfigs":null,"FunctionName":"DevelopmentFrontendStack--amplifyassetdeploymentis-KdTnkUbrXarx","Runtime":"nodejs14.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentis-KdTnkUbrXarx","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":3,"Handler":"index.isComplete","CodeSha256":"rDddZOVFKB99sLk4xliKKZ5HI9k6yJYRFT07fKsT9A8=","Role":"arn:aws:iam::550620852718:role/DevelopmentFrontendStack-amplifyassetdeploymentis-AIUO42JV0I32","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":1817,"State":null,"StateReason":null,"Environment":{"Variables":{"AWS_NODEJS_CONNECTION_REUSE_ENABLED":"1"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"fcf84475-411a-4fbb-9400-2d1fc4009602","LastModified":"2023-03-10T18:57:12.543+0000","FileSystemConfigs":null,"FunctionName":"amplify-login-define-auth-challenge-b4883e4c","Runtime":"nodejs16.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:amplify-login-define-auth-challenge-b4883e4c","KMSKeyArn":null,"MemorySize":256,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":15,"Handler":"index.handler","CodeSha256":"JP2e+LBDVivBGg+YgL4/4r1cPzb1c8TJFKFqr10B7pY=","Role":"arn:aws:iam::550620852718:role/amplify-login-lambda-b4883e4c","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":1770,"State":null,"StateReason":null,"Environment":null,"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"A starter AWS Lambda function.","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"ade91790-5448-4359-88f1-ace58efe5b69","LastModified":"2023-02-22T01:24:04.000+0000","FileSystemConfigs":null,"FunctionName":"ecs-from-pipeline","Runtime":"python3.7","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:ecs-from-pipeline","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":3,"Handler":"lambda_function.lambda_handler","CodeSha256":"av4W/6+HMnpwrS1J484QsUgoTq/4XHUl0+CDWOTQDlU=","Role":"arn:aws:iam::550620852718:role/service-role/ecs-from-pipeline-role-6cpw65oc","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":682,"State":null,"StateReason":null,"Environment":null,"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"5fbc7fa7-3b08-4a5e-a3fd-73db3e5aad31","LastModified":"2023-03-10T18:57:12.579+0000","FileSystemConfigs":null,"FunctionName":"amplify-login-create-auth-challenge-b4883e4c","Runtime":"nodejs16.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:amplify-login-create-auth-challenge-b4883e4c","KMSKeyArn":null,"MemorySize":256,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":15,"Handler":"index.handler","CodeSha256":"F6dH1EPTE4mlv/piMF6OkK+ZeYrPs/OCGCzPQVmj66g=","Role":"arn:aws:iam::550620852718:role/amplify-login-lambda-b4883e4c","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":1176,"State":null,"StateReason":null,"Environment":null,"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"AWS CDK resource provider framework - onTimeout (DevelopmentFrontendStack-bgf6z/com.amazonaws.cdk.custom-resources.amplify-asset-deployment-provider/amplify-asset-deployment-handler-provider)","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"f6e1edea-6e13-4eba-b128-cf77e78df368","LastModified":"2023-03-01T21:46:51.332+0000","FileSystemConfigs":null,"FunctionName":"DevelopmentFrontendStack--amplifyassetdeploymentha-mjNZdu8ICWd5","Runtime":"nodejs14.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentha-mjNZdu8ICWd5","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":900,"Handler":"framework.onTimeout","CodeSha256":"m4tt+M0l3p8bZvxIDj83dwGrwRW6atCfS/q8AiXCD3o=","Role":"arn:aws:iam::550620852718:role/DevelopmentFrontendStack-amplifyassetdeploymentha-FTYKWBKOHNVJ","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":4307,"State":null,"StateReason":null,"Environment":{"Variables":{"USER_ON_EVENT_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymenton-khrJXiHCrnWH","USER_IS_COMPLETE_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentis-KdTnkUbrXarx"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"Lambda function for auto-deleting objects in mysfitszj3t6webstack-hostingbucketa91a61fe-1ep3ezkgwpxr0 S3 bucket.","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"c9e78bee-e853-4417-86a2-6e94582b038b","LastModified":"2023-03-01T04:53:54.320+0000","FileSystemConfigs":null,"FunctionName":"mysfitszj3t6WebStack-CustomS3AutoDeleteObjectsCust-6TuDUPzRMMji","Runtime":"nodejs14.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:mysfitszj3t6WebStack-CustomS3AutoDeleteObjectsCust-6TuDUPzRMMji","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":900,"Handler":"__entrypoint__.handler","CodeSha256":"yltzPKQF4pqNnS1O5tcaxWE+AvMs8wapsIimbZLClic=","Role":"arn:aws:iam::550620852718:role/mysfitszj3t6WebStack-CustomS3AutoDeleteObjectsCust-2R3D5CIYX4TL","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":2432,"State":null,"StateReason":null,"Environment":null,"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"0fc48448-c4f8-4166-a528-08e13fe63a99","LastModified":"2023-03-10T18:49:46.032+0000","FileSystemConfigs":null,"FunctionName":"amplify-login-verify-auth-challenge-7c72ae2f","Runtime":"nodejs16.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:amplify-login-verify-auth-challenge-7c72ae2f","KMSKeyArn":null,"MemorySize":256,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":15,"Handler":"index.handler","CodeSha256":"DCNbkbpi7lcoljT2Xoka941sxwJ9xWnxCw+nddrVWSQ=","Role":"arn:aws:iam::550620852718:role/amplify-login-lambda-7c72ae2f","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":2559,"State":null,"StateReason":null,"Environment":{"Variables":{},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"AWS CDK resource provider framework - onEvent (DevelopmentFrontendStack-bgf6z/com.amazonaws.cdk.custom-resources.amplify-asset-deployment-provider/amplify-asset-deployment-handler-provider)","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"e6c4ccc6-985b-41f7-96dc-9b695bc524b8","LastModified":"2023-03-01T21:47:52.975+0000","FileSystemConfigs":null,"FunctionName":"DevelopmentFrontendStack--amplifyassetdeploymentha-Q7o3DyhWAN4t","Runtime":"nodejs14.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentha-Q7o3DyhWAN4t","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":900,"Handler":"framework.onEvent","CodeSha256":"m4tt+M0l3p8bZvxIDj83dwGrwRW6atCfS/q8AiXCD3o=","Role":"arn:aws:iam::550620852718:role/DevelopmentFrontendStack-amplifyassetdeploymentha-ZJX178E9E1MH","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":4307,"State":null,"StateReason":null,"Environment":{"Variables":{"USER_ON_EVENT_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymenton-khrJXiHCrnWH","WAITER_STATE_MACHINE_ARN":"arn:aws:states:us-west-2:550620852718:stateMachine:amplifyassetdeploymenthandlerproviderwaiterstatemachineB3C2FCBE-b3eh6QIYi88C","USER_IS_COMPLETE_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentis-KdTnkUbrXarx"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"AWS CDK resource provider framework - isComplete (DevelopmentFrontendStack-g650u/com.amazonaws.cdk.custom-resources.amplify-asset-deployment-provider/amplify-asset-deployment-handler-provider)","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"d92b096e-69a2-4b45-a909-49abfa3e5a62","LastModified":"2023-03-01T18:12:17.904+0000","FileSystemConfigs":null,"FunctionName":"DevelopmentFrontendStack--amplifyassetdeploymentha-Na74ntcTWqmO","Runtime":"nodejs14.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentha-Na74ntcTWqmO","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":900,"Handler":"framework.isComplete","CodeSha256":"m4tt+M0l3p8bZvxIDj83dwGrwRW6atCfS/q8AiXCD3o=","Role":"arn:aws:iam::550620852718:role/DevelopmentFrontendStack-amplifyassetdeploymentha-42EZDAH5D2JH","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":4307,"State":null,"StateReason":null,"Environment":{"Variables":{"USER_ON_EVENT_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymenton-X9iZJSCSPYDH","USER_IS_COMPLETE_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentis-jaHopLrSSARV"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"6a31520b-b635-427a-9a9d-16d33fc39c2a","LastModified":"2023-03-01T01:48:16.311+0000","FileSystemConfigs":null,"FunctionName":"mysfitszj3t6AppStack-lambdaFunction940E68AD-bRvlR833iW4h","Runtime":"python3.9","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:mysfitszj3t6AppStack-lambdaFunction940E68AD-bRvlR833iW4h","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":3,"Handler":"app.handler","CodeSha256":"yZuiNYTwHQUlRn+C7WeUlFLCK8yrWsTI2pczAdaY8jQ=","Role":"arn:aws:iam::550620852718:role/mysfitszj3t6AppStack-lambdaFunctionServiceRoleF7E5-1DLRCVX8M7KJA","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":10830258,"State":null,"StateReason":null,"Environment":{"Variables":{"FORCE_UPDATE":"True","APPNAME":"mysfitszj3t6"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"Lambda function to update IP addresses","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"4b144b6e-f41b-45f1-a34e-72318a38e37a","LastModified":"2021-06-21T18:51:45.259+0000","FileSystemConfigs":null,"FunctionName":"ipupdater-events-IPUpdater-3Ujump8kP4M0","Runtime":"nodejs14.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:ipupdater-events-IPUpdater-3Ujump8kP4M0","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":3,"Handler":"cloudwatch-events.handler","CodeSha256":"0u3hOFyRGbKRhRlmYuwnFjwkGfxi8e0B58xzHHpBiic=","Role":"arn:aws:iam::550620852718:role/ipupdater-events-CloudTrailUpdateIpExecutionRole-JYSNPTLVFY4","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":3859,"State":null,"StateReason":null,"Environment":{"Variables":{"CF_AUTH_KEY":"f0d53532b4eee81535248a18420324c666f80","CF_ZONE_ID":"76aead7b91c5ee16eb2d506e1f7b03e9","DNS_TAG_SEARCH_KEY":"lerchdev","CF_AUTH_EMAIL":"emil@lerch.org"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"0df31633-67e9-439f-aa01-89188c65263c","LastModified":"2023-03-10T18:49:46.159+0000","FileSystemConfigs":null,"FunctionName":"amplify-login-custom-message-7c72ae2f","Runtime":"nodejs16.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:amplify-login-custom-message-7c72ae2f","KMSKeyArn":null,"MemorySize":256,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":15,"Handler":"index.handler","CodeSha256":"sUpOkTFFIbmSQYXvUgjLQj9EFgha/dT6BbrGgYU202Y=","Role":"arn:aws:iam::550620852718:role/amplify-login-lambda-7c72ae2f","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":2256,"State":null,"StateReason":null,"Environment":null,"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"c49c82ce-e8a9-4f60-a304-f454db5c96a1","LastModified":"2023-03-01T04:54:16.222+0000","FileSystemConfigs":null,"FunctionName":"mysfitszj3t6WebStack-CustomCDKBucketDeployment8693-gWMm1FLBpsVS","Runtime":"python3.9","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":[{"SigningProfileVersionArn":null,"CodeSize":13941262,"UncompressedCodeSize":79903296,"SigningJobArn":null,"Arn":"arn:aws:lambda:us-west-2:550620852718:layer:IndexDeploymentAwsCliLayer22B6E017:2"}],"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:mysfitszj3t6WebStack-CustomCDKBucketDeployment8693-gWMm1FLBpsVS","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":900,"Handler":"index.handler","CodeSha256":"BBknNrz24IzJuS/KQXawzM/Wm7as1bwbfxscXhrY4Ho=","Role":"arn:aws:iam::550620852718:role/mysfitszj3t6WebStack-CustomCDKBucketDeployment8693-1G6JMGQGMEMI4","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":4279,"State":null,"StateReason":null,"Environment":null,"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"e6a22875-d246-4285-a144-1feb77a6c066","LastModified":"2023-03-10T18:57:12.615+0000","FileSystemConfigs":null,"FunctionName":"amplify-login-custom-message-b4883e4c","Runtime":"nodejs16.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:amplify-login-custom-message-b4883e4c","KMSKeyArn":null,"MemorySize":256,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":15,"Handler":"index.handler","CodeSha256":"sUpOkTFFIbmSQYXvUgjLQj9EFgha/dT6BbrGgYU202Y=","Role":"arn:aws:iam::550620852718:role/amplify-login-lambda-b4883e4c","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":2256,"State":null,"StateReason":null,"Environment":null,"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]},{"Description":"AWS CDK resource provider framework - isComplete (DevelopmentFrontendStack-bgf6z/com.amazonaws.cdk.custom-resources.amplify-asset-deployment-provider/amplify-asset-deployment-handler-provider)","TracingConfig":{"Mode":"PassThrough"},"VpcConfig":null,"SigningJobArn":null,"SnapStart":{"OptimizationStatus":"Off","ApplyOn":"None"},"RevisionId":"f6f61cfa-759a-4c94-9ac1-b724eb1f2b02","LastModified":"2023-03-01T21:46:51.329+0000","FileSystemConfigs":null,"FunctionName":"DevelopmentFrontendStack--amplifyassetdeploymentha-uC6KVSgiOGVc","Runtime":"nodejs14.x","Version":"$LATEST","PackageType":"Zip","LastUpdateStatus":null,"Layers":null,"FunctionArn":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentha-uC6KVSgiOGVc","KMSKeyArn":null,"MemorySize":128,"ImageConfigResponse":null,"LastUpdateStatusReason":null,"DeadLetterConfig":null,"Timeout":900,"Handler":"framework.isComplete","CodeSha256":"m4tt+M0l3p8bZvxIDj83dwGrwRW6atCfS/q8AiXCD3o=","Role":"arn:aws:iam::550620852718:role/DevelopmentFrontendStack-amplifyassetdeploymentha-KP5FUO6696RQ","SigningProfileVersionArn":null,"MasterArn":null,"RuntimeVersionConfig":null,"CodeSize":4307,"State":null,"StateReason":null,"Environment":{"Variables":{"USER_ON_EVENT_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymenton-khrJXiHCrnWH","USER_IS_COMPLETE_FUNCTION_ARN":"arn:aws:lambda:us-west-2:550620852718:function:DevelopmentFrontendStack--amplifyassetdeploymentis-KdTnkUbrXarx"},"Error":null},"EphemeralStorage":{"Size":512},"StateReasonCode":null,"LastUpdateStatusReasonCode":null,"Architectures":["x86_64"]}],"NextMarker":null}
3 |
--------------------------------------------------------------------------------