├── .github
└── workflows
│ ├── ci.yml
│ └── release.yml
├── .gitignore
├── CHANGELOG.md
├── CODEOWNERS
├── Cargo.lock
├── Cargo.toml
├── LICENSE
├── README.md
├── assets
├── s2-black.png
└── s2-white.png
├── cliff.toml
├── rust-toolchain.toml
└── src
├── account.rs
├── basin.rs
├── config.rs
├── error.rs
├── formats.rs
├── main.rs
├── ping.rs
├── stream.rs
└── types.rs
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 | permissions:
3 | contents: read
4 | on:
5 | pull_request:
6 | push:
7 | branches:
8 | - main
9 | env:
10 | RUST_BACKTRACE: 1
11 | CARGO_TERM_COLOR: always
12 | CLICOLOR: 1
13 | CARGO_INCREMENTAL: 0
14 | CARGO_NET_GIT_FETCH_WITH_CLI: true
15 | concurrency:
16 | group: "${{ github.workflow }}-${{ github.ref }}"
17 | cancel-in-progress: true
18 | jobs:
19 | ci:
20 | permissions:
21 | contents: none
22 | name: CI
23 | needs: lint
24 | runs-on: ubuntu-latest
25 | if: always()
26 | steps:
27 | - name: Failed
28 | run: exit 1
29 | if: contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'skipped')
30 | test:
31 | runs-on: ubuntu-latest
32 | steps:
33 | - name: checkout
34 | uses: actions/checkout@v4
35 | - name: install rust
36 | uses: dtolnay/rust-toolchain@stable
37 | with:
38 | toolchain: stable
39 | components: rustfmt, clippy
40 | - name: install protoc
41 | uses: arduino/setup-protoc@v3
42 | - uses: Swatinem/rust-cache@v2
43 | - name: Run cargo tests
44 | run: cargo test
45 | lint:
46 | runs-on: ubuntu-latest
47 | steps:
48 | - name: checkout
49 | uses: actions/checkout@v4
50 | - name: install rust
51 | uses: dtolnay/rust-toolchain@stable
52 | with:
53 | toolchain: stable
54 | components: rustfmt, clippy
55 | - name: install protoc
56 | uses: arduino/setup-protoc@v3
57 | - uses: Swatinem/rust-cache@v2
58 | - name: Install cargo-sort
59 | uses: baptiste0928/cargo-install@v3
60 | with:
61 | crate: cargo-sort
62 | version: "^1.0"
63 | - name: Check documentation
64 | env:
65 | RUSTDOCFLAGS: -D warnings
66 | run: cargo doc --workspace --all-features --no-deps --document-private-items
67 | - name: Check formatting
68 | run: cargo fmt --all -- --check
69 | - name: check clippy
70 | run: cargo clippy --workspace --all-features --all-targets -- -D warnings --allow deprecated
71 | - name: Check Cargo.toml sorting
72 | run: cargo sort --workspace --check
73 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: release
2 | on:
3 | push:
4 | tags: ["[0-9]+.[0-9]+.[0-9]+*"]
5 | workflow_dispatch:
6 | jobs:
7 | build_binaries:
8 | name: ${{ matrix.target }}
9 | runs-on: ${{ matrix.os }}
10 | strategy:
11 | fail-fast: false
12 | matrix:
13 | include:
14 | - os: ubuntu-24.04
15 | target: aarch64-unknown-linux-gnu
16 | deps: |
17 | sudo apt-get update
18 | sudo apt-get install -y gcc-aarch64-linux-gnu g++-aarch64-linux-gnu
19 | env:
20 | CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-gcc
21 | - os: ubuntu-24.04
22 | target: x86_64-unknown-linux-gnu
23 | - os: macos-latest
24 | target: x86_64-apple-darwin
25 | - os: macos-latest
26 | target: aarch64-apple-darwin
27 | - os: windows-latest
28 | target: x86_64-pc-windows-msvc
29 | - os: windows-latest
30 | target: aarch64-pc-windows-msvc
31 | steps:
32 | - name: checkout
33 | uses: actions/checkout@v4
34 | - uses: actions-rust-lang/setup-rust-toolchain@v1
35 | with:
36 | rustflags: ""
37 | target: ${{ matrix.target }}
38 | - name: Install dependencies
39 | if: matrix.deps != ''
40 | run: ${{ matrix.deps }}
41 | shell: bash
42 | - name: install protoc
43 | uses: arduino/setup-protoc@v3
44 | with:
45 | repo-token: ${{ secrets.GITHUB_TOKEN }}
46 | - name: Set CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER
47 | if: matrix.target == 'aarch64-unknown-linux-gnu'
48 | run: echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc" >> $GITHUB_ENV
49 | - name: Build
50 | run: cargo build --release --target ${{ matrix.target }}
51 | - name: Create pem and certificate.der files
52 | if: matrix.os == 'macos-latest'
53 | run: |
54 | echo "${{ secrets.MACOS_PEM }}" | base64 -d -o macos.pem
55 | echo "${{ secrets.MACOS_CERTIFICATE_DER }}" | base64 -d -o certificate.der
56 | - name: Sign macos binary
57 | if: matrix.os == 'macos-latest'
58 | uses: indygreg/apple-code-sign-action@v1
59 | with:
60 | input_path: target/${{ matrix.target }}/release/s2
61 | pem_file: macos.pem
62 | certificate_der_file: certificate.der
63 | sign: true
64 | sign_args: "--code-signature-flags=runtime"
65 | - name: Prepare artifacts
66 | shell: bash
67 | run: |
68 | cd target/${{ matrix.target }}/release
69 |
70 | if [ "${{ matrix.os }}" = "windows-latest" ];
71 | then
72 | 7z a ../../../s2-${{ matrix.target }}.zip s2.exe
73 | else
74 | zip -r ../../../s2-${{ matrix.target }}.zip s2
75 | fi
76 | - name: App store connect api key
77 | if: matrix.os == 'macos-latest'
78 | run: echo "${{ secrets.APP_STORE_CONNECT_API_KEY }}" | base64 -d -o app_store_connect_api_key.json
79 | - name: Notarize macos binary
80 | if: matrix.os == 'macos-latest'
81 | uses: indygreg/apple-code-sign-action@v1
82 | with:
83 | input_path: s2-${{ matrix.target }}.zip
84 | sign: false
85 | notarize: true
86 | app_store_connect_api_key_json_file: app_store_connect_api_key.json
87 | - name: upload artifacts
88 | uses: actions/upload-artifact@v4
89 | with:
90 | name: ${{ matrix.target }}
91 | path: |
92 | *.zip
93 | if-no-files-found: error
94 |
95 | create_release:
96 | needs: build_binaries
97 | runs-on: ubuntu-24.04
98 | if: github.event_name != 'workflow_dispatch'
99 | permissions:
100 | contents: write
101 | steps:
102 | - name: checkout
103 | uses: actions/checkout@v4
104 | - name: version
105 | id: version
106 | uses: SebRollen/toml-action@v1.2.0
107 | with:
108 | file: Cargo.toml
109 | field: package.version
110 | - uses: mindsers/changelog-reader-action@v2
111 | id: changelog_reader
112 | with:
113 | version: ${{ steps.version.outputs.value }}
114 | - name: install rust
115 | uses: dtolnay/rust-toolchain@stable
116 | - name: publish to crates.io
117 | run: cargo publish --token ${{ secrets.CRATES_IO_TOKEN }}
118 | - name: download artifacts
119 | uses: actions/download-artifact@v4
120 | - name: create release
121 | uses: softprops/action-gh-release@v2
122 | with:
123 | files: |
124 | **/*.tar.gz
125 | **/*.zip
126 | name: ${{ steps.version.outputs.value }}
127 | body: ${{ steps.changelog_reader.outputs.changes }}
128 |
129 | update_homebrew:
130 | needs: [create_release, build_binaries]
131 | runs-on: ubuntu-24.04
132 | if: github.event_name != 'workflow_dispatch'
133 | steps:
134 | - name: checkout
135 | uses: actions/checkout@v4
136 | - name: version
137 | id: version
138 | uses: SebRollen/toml-action@v1.2.0
139 | with:
140 | file: Cargo.toml
141 | field: package.version
142 | - name: Download artifacts
143 | uses: actions/download-artifact@v4
144 | - name: sha256sum
145 | run: |
146 | LINUX_INTEL_SHA256=$(shasum -a 256 x86_64-unknown-linux-gnu/s2-x86_64-unknown-linux-gnu.zip | awk '{print $1}')
147 | echo "LINUX_INTEL_SHA256=$LINUX_INTEL_SHA256" >> $GITHUB_ENV
148 | LINUX_ARM_SHA256=$(shasum -a 256 aarch64-unknown-linux-gnu/s2-aarch64-unknown-linux-gnu.zip | awk '{print $1}')
149 | echo "LINUX_ARM_SHA256=$LINUX_ARM_SHA256" >> $GITHUB_ENV
150 | MAC_INTEL_SHA256=$(shasum -a 256 x86_64-apple-darwin/s2-x86_64-apple-darwin.zip | awk '{print $1}')
151 | echo "MAC_INTEL_SHA256=$MAC_INTEL_SHA256" >> $GITHUB_ENV
152 | MAC_ARM_SHA256=$(shasum -a 256 aarch64-apple-darwin/s2-aarch64-apple-darwin.zip | awk '{print $1}')
153 | echo "MAC_ARM_SHA256=$MAC_ARM_SHA256" >> $GITHUB_ENV
154 | - name: checkout into the formula repo
155 | uses: actions/checkout@v4
156 | with:
157 | repository: 's2-streamstore/homebrew-s2'
158 | token: ${{ secrets.HOMEBREW_PAT }}
159 | - name: update formula
160 | run: |
161 | sed -i.bak "s/^ version \".*\"$/ version \"${{ steps.version.outputs.value }}\"/" s2.rb
162 | sed -z -i -e 's/[0-9a-f]\{64\}/${{ env.MAC_INTEL_SHA256 }}/1' s2.rb
163 | sed -z -i -e 's/[0-9a-f]\{64\}/${{ env.MAC_ARM_SHA256 }}/2' s2.rb
164 | sed -z -i -e 's/[0-9a-f]\{64\}/${{ env.LINUX_INTEL_SHA256 }}/3' s2.rb
165 | sed -z -i -e 's/[0-9a-f]\{64\}/${{ env.LINUX_ARM_SHA256 }}/4' s2.rb
166 | - name: release
167 | run: |
168 | git config --global user.email "mehul@s2.dev"
169 | git config --global user.name "Mehul Arora"
170 | git add s2.rb
171 | git commit -m "Update S2 to ${{ steps.version.outputs.value }}"
172 | git push
173 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /target
2 | .idea
3 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | All notable changes to this project will be documented in this file.
4 |
5 | ## [0.16.0] - 2025-05-25
6 |
7 | ### Features
8 |
9 | - Add linger opt for append ([#148](https://github.com/s2-streamstore/s2-cli/issues/148))
10 | - Fencing token as string rather than base64-encoded bytes ([#150](https://github.com/s2-streamstore/s2-cli/issues/150))
11 |
12 | ### Miscellaneous Tasks
13 |
14 | - Default `read` to tailing rather than reading from head of stream ([#149](https://github.com/s2-streamstore/s2-cli/issues/149))
15 | - Updated `--format` names ([#151](https://github.com/s2-streamstore/s2-cli/issues/151))
16 |
17 | ## [0.15.0] - 2025-05-10
18 |
19 | ### Miscellaneous Tasks
20 |
21 | - Bump SDK version ([#146](https://github.com/s2-streamstore/s2-cli/issues/146))
22 |
23 | ## [0.14.0] - 2025-05-08
24 |
25 | ### Features
26 |
27 | - Support timestamping configs ([#143](https://github.com/s2-streamstore/s2-cli/issues/143))
28 |
29 | ## [0.13.2] - 2025-05-02
30 |
31 | ### Miscellaneous Tasks
32 |
33 | - `CHANGELOG` update
34 |
35 | ## [0.13.1] - 2025-05-02
36 |
37 | ### Miscellaneous Tasks
38 |
39 | - `Cargo.lock` update
40 |
41 | ## [0.13.0] - 2025-05-02
42 |
43 | ### Features
44 |
45 | - `tail` command ([#140](https://github.com/s2-streamstore/s2-cli/issues/140))
46 |
47 | ### Miscellaneous Tasks
48 |
49 | - Reorder fields for json format
50 |
51 | ## [0.12.0] - 2025-04-30
52 |
53 | ### Features
54 |
55 | - Support reading from timestamp or tail-offset ([#137](https://github.com/s2-streamstore/s2-cli/issues/137))
56 |
57 | ### Bug Fixes
58 |
59 | - Ping ([#138](https://github.com/s2-streamstore/s2-cli/issues/138))
60 | - `create_stream_on_read` for reconfigure basin ([#136](https://github.com/s2-streamstore/s2-cli/issues/136))
61 |
62 | ## [0.11.0] - 2025-04-15
63 |
64 | ### Features
65 |
66 | - Access token methods ([#133](https://github.com/s2-streamstore/s2-cli/issues/133))
67 |
68 | ### Miscellaneous Tasks
69 |
70 | - Release 0.11.0
71 | - Typed errors ([#135](https://github.com/s2-streamstore/s2-cli/issues/135))
72 |
73 | ## [0.10.0] - 2025-03-14
74 |
75 | ### Bug Fixes
76 |
77 | - `--create-stream-on-append` to accept explicit bool ([#131](https://github.com/s2-streamstore/s2-cli/issues/131))
78 |
79 | ## [0.9.0] - 2025-03-12
80 |
81 | ### Features
82 |
83 | - Auto-paginate for stream and basin list ([#128](https://github.com/s2-streamstore/s2-cli/issues/128))
84 |
85 | ### Bug Fixes
86 |
87 | - Ls to return fully qualified s2 uri ([#126](https://github.com/s2-streamstore/s2-cli/issues/126))
88 |
89 | ### Miscellaneous Tasks
90 |
91 | - Remove unused deps + bump sdk version ([#125](https://github.com/s2-streamstore/s2-cli/issues/125))
92 | - *(release)* Upgrade SDK ([#129](https://github.com/s2-streamstore/s2-cli/issues/129))
93 |
94 | ## [0.8.4] - 2025-02-05
95 |
96 | ### Bug Fixes
97 |
98 | - Improve output messages for command record appends ([#119](https://github.com/s2-streamstore/s2-cli/issues/119))
99 | - Metered bytes log ([#121](https://github.com/s2-streamstore/s2-cli/issues/121))
100 |
101 | ### Miscellaneous Tasks
102 |
103 | - Improve read cli command docs ([#117](https://github.com/s2-streamstore/s2-cli/issues/117))
104 | - Add uri args struct ([#120](https://github.com/s2-streamstore/s2-cli/issues/120))
105 |
106 | ## [0.8.3] - 2025-01-22
107 |
108 | ### Miscellaneous Tasks
109 |
110 | - Reflect the update to make list limit optional instead of a default of 0 ([#114](https://github.com/s2-streamstore/s2-cli/issues/114))
111 | - Minor upgrades
112 |
113 | ## [0.8.2] - 2025-01-21
114 |
115 | ### Miscellaneous Tasks
116 |
117 | - Update SDK to `0.8.0` [#113](https://github.com/s2-streamstore/s2-cli/issues/113))
118 |
119 | ## [0.8.1] - 2025-01-16
120 |
121 | ### Miscellaneous Tasks
122 |
123 | - Update SDK to `0.7.0` ([#111](https://github.com/s2-streamstore/s2-cli/issues/111))
124 |
125 | ## [0.8.0] - 2025-01-13
126 |
127 | ### Features
128 |
129 | - Update fencing token to accept base64 instead of base16 ([#106](https://github.com/s2-streamstore/s2-cli/issues/106))
130 | - Support different formats for append ([#105](https://github.com/s2-streamstore/s2-cli/issues/105))
131 |
132 | ### Miscellaneous Tasks
133 |
134 | - Update clap CLI name ([#104](https://github.com/s2-streamstore/s2-cli/issues/104))
135 | - Update deps ([#108](https://github.com/s2-streamstore/s2-cli/issues/108))
136 |
137 | ## [0.7.0] - 2024-12-26
138 |
139 | ### Features
140 |
141 | - Only accept URIs in basin+stream args ([#100](https://github.com/s2-streamstore/s2-cli/issues/100))
142 | - `s2 ls` command to list basins or streams ([#102](https://github.com/s2-streamstore/s2-cli/issues/102))
143 |
144 | ### Miscellaneous Tasks
145 |
146 | - Inline path consts for consistency
147 |
148 | ## [0.6.4] - 2024-12-23
149 |
150 | ### Bug Fixes
151 |
152 | - Error/help messages ([#95](https://github.com/s2-streamstore/s2-cli/issues/95))
153 |
154 | ### Documentation
155 |
156 | - Update README S2 doc link ([#92](https://github.com/s2-streamstore/s2-cli/issues/92))
157 |
158 | ## [0.6.3] - 2024-12-19
159 |
160 | ### Documentation
161 |
162 | - Update README API link ([#89](https://github.com/s2-streamstore/s2-cli/issues/89))
163 |
164 | ### Miscellaneous Tasks
165 |
166 | - Upgrade SDK to `0.5.0` ([#90](https://github.com/s2-streamstore/s2-cli/issues/90))
167 |
168 | ## [0.6.2] - 2024-12-18
169 |
170 | ### Bug Fixes
171 |
172 | - Update output for reconfigure basin and create basin results ([#86](https://github.com/s2-streamstore/s2-cli/issues/86))
173 |
174 | ### Miscellaneous Tasks
175 |
176 | - Add `README.md` ([#83](https://github.com/s2-streamstore/s2-cli/issues/83))
177 |
178 | ## [0.6.1] - 2024-12-17
179 |
180 | ### Miscellaneous Tasks
181 |
182 | - Update cargo binary name to `s2` ([#84](https://github.com/s2-streamstore/s2-cli/issues/84))
183 | - *(release)* Upgrade SDK to 0.4.0 ([#85](https://github.com/s2-streamstore/s2-cli/issues/85))
184 | - *(release)* Upgrade SDK to 0.4.1 ([#87](https://github.com/s2-streamstore/s2-cli/issues/87))
185 |
186 | ## [0.6.0] - 2024-12-14
187 |
188 | ### Features
189 |
190 | - Support `s2://` URIs ([#74](https://github.com/s2-streamstore/s2-cli/issues/74))
191 | - Better display for ping stats ([#81](https://github.com/s2-streamstore/s2-cli/issues/81))
192 |
193 | ### Bug Fixes
194 |
195 | - Disable noisy description in help ([#79](https://github.com/s2-streamstore/s2-cli/issues/79))
196 |
197 | ### Miscellaneous Tasks
198 |
199 | - Remove unnecessary dependencies from `Cargo.toml` ([#80](https://github.com/s2-streamstore/s2-cli/issues/80))
200 |
201 | ## [0.5.2] - 2024-12-13
202 |
203 | ### Miscellaneous Tasks
204 |
205 | - Rename binary to s2 when releasing ([#76](https://github.com/s2-streamstore/s2-cli/issues/76))
206 |
207 | ## [0.5.1] - 2024-12-13
208 |
209 | ### Features
210 |
211 | - Homebrew sync ([#71](https://github.com/s2-streamstore/s2-cli/issues/71))
212 |
213 | ## [0.5.0] - 2024-12-11
214 |
215 | ### Bug Fixes
216 |
217 | - Use a different `std::thread::Thread` for `Stdin` IO ([#69](https://github.com/s2-streamstore/s2-cli/issues/69))
218 |
219 | ### Miscellaneous Tasks
220 |
221 | - Release to crates.io ([#68](https://github.com/s2-streamstore/s2-cli/issues/68))
222 |
223 | ## [0.4.0] - 2024-12-11
224 |
225 | ### Features
226 |
227 | - Allow append concurrency control on `fence` and `trim` too ([#60](https://github.com/s2-streamstore/s2-cli/issues/60))
228 | - Ping ([#48](https://github.com/s2-streamstore/s2-cli/issues/48)) ([#63](https://github.com/s2-streamstore/s2-cli/issues/63))
229 |
230 | ### Bug Fixes
231 |
232 | - Usage example
233 |
234 | ### Documentation
235 |
236 | - Clarify fencing token is in hex
237 |
238 | ### Miscellaneous Tasks
239 |
240 | - Mandatory read `start_seq_num` ([#58](https://github.com/s2-streamstore/s2-cli/issues/58))
241 | - Make all short args explicit ([#29](https://github.com/s2-streamstore/s2-cli/issues/29)) ([#59](https://github.com/s2-streamstore/s2-cli/issues/59))
242 | - Upgrade deps ([#64](https://github.com/s2-streamstore/s2-cli/issues/64))
243 | - Update cargo.toml ([#65](https://github.com/s2-streamstore/s2-cli/issues/65))
244 | - Rename to streamstore-cli ([#66](https://github.com/s2-streamstore/s2-cli/issues/66))
245 | - Description - Cargo.toml
246 | - Update README.md
247 |
248 | ## [0.3.0] - 2024-12-05
249 |
250 | ### Features
251 |
252 | - Return reconfigured stream ([#53](https://github.com/s2-streamstore/s2-cli/issues/53))
253 | - Stderr `CommandRecord` when reading ([#45](https://github.com/s2-streamstore/s2-cli/issues/45)) ([#55](https://github.com/s2-streamstore/s2-cli/issues/55))
254 | - Sign and notarize apple binaries ([#54](https://github.com/s2-streamstore/s2-cli/issues/54))
255 | - Flatten commands ([#52](https://github.com/s2-streamstore/s2-cli/issues/52)) ([#56](https://github.com/s2-streamstore/s2-cli/issues/56))
256 |
257 | ## [0.2.0] - 2024-12-05
258 |
259 | ### Features
260 |
261 | - Load endpoints `from_env()` ([#16](https://github.com/s2-streamstore/s2-cli/issues/16))
262 | - Display throughput for read session ([#25](https://github.com/s2-streamstore/s2-cli/issues/25))
263 | - Exercise limits for read session ([#27](https://github.com/s2-streamstore/s2-cli/issues/27))
264 | - Better error reporting ([#30](https://github.com/s2-streamstore/s2-cli/issues/30))
265 | - Appends with `fencing_token` and `match_seq_num` ([#38](https://github.com/s2-streamstore/s2-cli/issues/38))
266 | - Stream `fence` and `trim` commands ([#46](https://github.com/s2-streamstore/s2-cli/issues/46))
267 |
268 | ### Bug Fixes
269 |
270 | - Config env var precedence
271 | - Flush BufWriter ([#22](https://github.com/s2-streamstore/s2-cli/issues/22))
272 | - Handle common signals for streams ([#32](https://github.com/s2-streamstore/s2-cli/issues/32))
273 | - Optional `start_seq_num` in `StreamService/ReadSession` ([#42](https://github.com/s2-streamstore/s2-cli/issues/42))
274 | - Catch `ctrl-c` signal on windows ([#50](https://github.com/s2-streamstore/s2-cli/issues/50))
275 |
276 | ### Documentation
277 |
278 | - Consistency
279 | - Nits ([#19](https://github.com/s2-streamstore/s2-cli/issues/19))
280 |
281 | ### Miscellaneous Tasks
282 |
283 | - Rm `S2ConfigError::PathError` ([#17](https://github.com/s2-streamstore/s2-cli/issues/17))
284 | - Only attempt to load config from file if it exists ([#18](https://github.com/s2-streamstore/s2-cli/issues/18))
285 | - Rename binary to s2 ([#21](https://github.com/s2-streamstore/s2-cli/issues/21))
286 | - Set user-agent to s2-cli ([#23](https://github.com/s2-streamstore/s2-cli/issues/23)) ([#24](https://github.com/s2-streamstore/s2-cli/issues/24))
287 | - Create LICENSE
288 | - Update Cargo.toml with license
289 | - Update SDK ([#26](https://github.com/s2-streamstore/s2-cli/issues/26))
290 | - Sdk update ([#31](https://github.com/s2-streamstore/s2-cli/issues/31))
291 | - Update CLI to latest sdk ([#37](https://github.com/s2-streamstore/s2-cli/issues/37))
292 | - Upgrade SDK ([#41](https://github.com/s2-streamstore/s2-cli/issues/41))
293 | - Upgrade sdk version ([#43](https://github.com/s2-streamstore/s2-cli/issues/43))
294 | - Update SDK ([#47](https://github.com/s2-streamstore/s2-cli/issues/47))
295 |
296 | ## [0.1.0] - 2024-11-05
297 |
298 | ### Features
299 |
300 | - Implement `AccountService` ([#1](https://github.com/s2-streamstore/s2-cli/issues/1))
301 | - Implement `BasinService` ([#2](https://github.com/s2-streamstore/s2-cli/issues/2))
302 | - Implement `StreamService` ([#3](https://github.com/s2-streamstore/s2-cli/issues/3))
303 |
304 | ### Bug Fixes
305 |
306 | - Try to fix release CI ([#9](https://github.com/s2-streamstore/s2-cli/issues/9))
307 | - Release CI ([#10](https://github.com/s2-streamstore/s2-cli/issues/10))
308 | - Release CI ([#11](https://github.com/s2-streamstore/s2-cli/issues/11))
309 | - Automatically add release notes ([#12](https://github.com/s2-streamstore/s2-cli/issues/12))
310 | - Changelog ([#13](https://github.com/s2-streamstore/s2-cli/issues/13))
311 | - Release CI ([#14](https://github.com/s2-streamstore/s2-cli/issues/14))
312 |
313 | ### Miscellaneous Tasks
314 |
315 | - Reflect renamed repo
316 | - Upgrade deps
317 | - Clippy, whitespace
318 | - Add CI action ([#6](https://github.com/s2-streamstore/s2-cli/issues/6))
319 | - CODEOWNERS ([#7](https://github.com/s2-streamstore/s2-cli/issues/7))
320 | - Add release CI action ([#8](https://github.com/s2-streamstore/s2-cli/issues/8))
321 | - *(release)* Release 0.1.0 ([#15](https://github.com/s2-streamstore/s2-cli/issues/15))
322 |
323 |
324 |
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @s2-streamstore/dev
2 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "streamstore-cli"
3 | description = "CLI for S2"
4 | version = "0.16.0"
5 | edition = "2024"
6 | license = "Apache-2.0"
7 | keywords = ["streamstore", "s2", "log", "stream", "s3"]
8 | repository = "https://github.com/s2-streamstore/streamstore-cli"
9 | homepage = "https://github.com/s2-streamstore/streamstore-cli"
10 |
11 | [[bin]]
12 | name = "s2"
13 | path = "src/main.rs"
14 |
15 | [dependencies]
16 | async-stream = "0.3.6"
17 | base64ct = { version = "1.7.3", features = ["alloc"] }
18 | bytes = "1.10.1"
19 | clap = { version = "4.5.38", features = ["derive"] }
20 | color-print = "0.3.7"
21 | colored = "3.0.0"
22 | config = "0.15.11"
23 | dirs = "6.0.0"
24 | futures = "0.3.31"
25 | humantime = "2.2.0"
26 | indicatif = "0.17.11"
27 | json_to_table = "0.11.0"
28 | miette = { version = "7.6.0", features = ["fancy"] }
29 | rand = "0.9.1"
30 | serde = { version = "1.0.219", features = ["derive"] }
31 | serde_json = "1.0.140"
32 | streamstore = "0.16.2"
33 | thiserror = "2.0.12"
34 | tokio = { version = "1.45.1", features = ["full"] }
35 | tokio-stream = { version = "0.1.17", features = ["io-util"] }
36 | toml = "0.8.22"
37 | tracing = "0.1.41"
38 | tracing-subscriber = { version = "0.3.19", features = ["env-filter"] }
39 |
40 | [dev-dependencies]
41 | rstest = "0.25.0"
42 |
43 | [profile.release]
44 | lto = true
45 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
S2 CLI
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 | Command Line Tool to interact with the
28 | [S2 API](https://s2.dev/docs/interface/grpc).
29 |
30 | ## Getting started
31 |
32 | 1. [Install](#installation) the S2 CLI using your preferred method.
33 |
34 | 1. Generate an authentication token by logging onto the web console at
35 | [s2.dev](https://s2.dev/dashboard) and set the token in CLI config:
36 | ```bash
37 | s2 config set --access-token
38 | ```
39 |
40 | 1. You're ready to run S2 commands!
41 | ```bash
42 | s2 list-basins
43 | ```
44 |
45 | Head over to [S2 Docs](https://s2.dev/docs/quickstart) for a quick dive into
46 | using the CLI.
47 |
48 | ## Commands and reference
49 |
50 | You can add the `--help` flag to any command for CLI reference. Run `s2 --help`
51 | to view all the supported commands and options.
52 |
53 | > [!TIP]
54 | > The `--help` command displays a verbose help message whereas the `-h` displays
55 | > the same message in brief.
56 |
57 | ## Installation
58 |
59 | ### Using Homebrew
60 |
61 | This method works on macOS and Linux distributions with
62 | [Homebrew](https://brew.sh) installed.
63 |
64 | ```bash
65 | brew install s2-streamstore/s2/s2
66 | ```
67 |
68 | ### Using Cargo
69 |
70 | This method works on any system with [Rust](https://www.rust-lang.org/)
71 | and [Cargo](https://doc.rust-lang.org/cargo/) installed.
72 |
73 | ```bash
74 | cargo install streamstore-cli
75 | ```
76 |
77 | ### From Release Binaries
78 |
79 | Check out the [S2 CLI Releases](https://github.com/s2-streamstore/s2-cli/releases)
80 | for prebuilt binaries for many different architectures and operating systems.
81 |
82 | Linux and macOS users can download the release binary using:
83 |
84 | ```bash
85 | curl -fsSL s2.dev/install.sh | bash
86 | ```
87 |
88 | To install a specific version, you can set the `VERSION` environment variable.
89 |
90 | ```bash
91 | export VERSION=0.5.2
92 | curl -fsSL s2.dev/install.sh | bash
93 | ```
94 |
95 | ## Feedback
96 |
97 | We use [Github Issues](https://github.com/s2-streamstore/s2-cli/issues) to
98 | track feature requests and issues with the SDK. If you wish to provide feedback,
99 | report a bug or request a feature, feel free to open a Github issue.
100 |
101 | ### Contributing
102 |
103 | Developers are welcome to submit Pull Requests on the repository. If there is
104 | no tracking issue for the bug or feature request corresponding to the PR, we
105 | encourage you to open one for discussion before submitting the PR.
106 |
107 | ## Reach out to us
108 |
109 | Join our [Discord](https://discord.gg/vTCs7kMkAf) server. We would love to hear
110 | from you.
111 |
112 | You can also email us at [hi@s2.dev](mailto:hi@s2.dev).
113 |
114 | ## License
115 |
116 | This project is licensed under the [Apache-2.0 License](./LICENSE).
117 |
--------------------------------------------------------------------------------
/assets/s2-black.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/s2-streamstore/s2-cli/d34de5a440fdf90907c3366aef5b6fc814f6bf55/assets/s2-black.png
--------------------------------------------------------------------------------
/assets/s2-white.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/s2-streamstore/s2-cli/d34de5a440fdf90907c3366aef5b6fc814f6bf55/assets/s2-white.png
--------------------------------------------------------------------------------
/cliff.toml:
--------------------------------------------------------------------------------
1 | # git-cliff ~ default configuration file
2 | # https://git-cliff.org/docs/configuration
3 | #
4 | # Lines starting with "#" are comments.
5 | # Configuration options are organized into tables and keys.
6 | # See documentation for more information on available options.
7 |
8 | [changelog]
9 | # template for the changelog header
10 | header = """
11 | # Changelog\n
12 | All notable changes to this project will be documented in this file.\n
13 | """
14 | # template for the changelog body
15 | # https://keats.github.io/tera/docs/#introduction
16 | body = """
17 | {% if version %}\
18 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
19 | {% else %}\
20 | ## [unreleased]
21 | {% endif %}\
22 | {% for group, commits in commits | group_by(attribute="group") %}
23 | ### {{ group | striptags | trim | upper_first }}
24 | {% for commit in commits %}
25 | - {% if commit.scope %}*({{ commit.scope }})* {% endif %}\
26 | {% if commit.breaking %}[**breaking**] {% endif %}\
27 | {{ commit.message | upper_first }}\
28 | {% endfor %}
29 | {% endfor %}\n
30 | """
31 | # template for the changelog footer
32 | footer = """
33 |
34 | """
35 | # remove the leading and trailing s
36 | trim = true
37 | # postprocessors
38 | postprocessors = [
39 | { pattern = '', replace = "https://github.com/s2-streamstore/s2-cli" }, # replace repository URL
40 | ]
41 | # render body even when there are no releases to process
42 | # render_always = true
43 | # output file path
44 | # output = "test.md"
45 |
46 | [git]
47 | # parse the commits based on https://www.conventionalcommits.org
48 | conventional_commits = true
49 | # filter out the commits that are not conventional
50 | filter_unconventional = true
51 | # process each line of a commit as an individual commit
52 | split_commits = false
53 | # regex for preprocessing the commit messages
54 | commit_preprocessors = [
55 | # Replace issue numbers
56 | { pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"},
57 | # Check spelling of the commit with https://github.com/crate-ci/typos
58 | # If the spelling is incorrect, it will be automatically fixed.
59 | #{ pattern = '.*', replace_command = 'typos --write-changes -' },
60 | ]
61 | # regex for parsing and grouping commits
62 | commit_parsers = [
63 | { message = "^feat", group = " Features" },
64 | { message = "^fix", group = " Bug Fixes" },
65 | { message = "^doc", group = " Documentation" },
66 | { message = "^perf", group = " Performance" },
67 | { message = "^refactor", group = " Refactor" },
68 | { message = "^style", group = " Styling" },
69 | { message = "^test", group = " Testing" },
70 | { message = "^chore\\(release\\): prepare for", skip = true },
71 | { message = "^chore\\(deps.*\\)", skip = true },
72 | { message = "^chore\\(pr\\)", skip = true },
73 | { message = "^chore\\(pull\\)", skip = true },
74 | { message = "^chore|^ci", group = " Miscellaneous Tasks" },
75 | { body = ".*security", group = " Security" },
76 | { message = "^revert", group = " Revert" },
77 | ]
78 | # filter out the commits that are not matched by commit parsers
79 | filter_commits = false
80 | # sort the tags topologically
81 | topo_order = false
82 | # sort the commits inside sections by oldest/newest order
83 | sort_commits = "oldest"
84 |
--------------------------------------------------------------------------------
/rust-toolchain.toml:
--------------------------------------------------------------------------------
1 | [toolchain]
2 | channel = "stable"
3 |
--------------------------------------------------------------------------------
/src/account.rs:
--------------------------------------------------------------------------------
1 | use crate::error::{ServiceError, ServiceErrorContext, ServiceStatus};
2 | use async_stream::stream;
3 | use futures::Stream;
4 | use s2::{
5 | client::Client,
6 | types::{
7 | AccessTokenId, AccessTokenInfo, BasinConfig, BasinInfo, BasinName, CreateBasinRequest,
8 | DeleteBasinRequest, ListAccessTokensRequest, ListAccessTokensResponse, ListBasinsRequest,
9 | ListBasinsResponse, Operation, PermittedOperationGroups, ReconfigureBasinRequest,
10 | ResourceSet, StreamConfig,
11 | },
12 | };
13 |
14 | pub struct AccountService {
15 | client: Client,
16 | }
17 |
18 | impl AccountService {
19 | pub fn new(client: Client) -> Self {
20 | Self { client }
21 | }
22 |
23 | pub fn list_basins(
24 | &self,
25 | prefix: String,
26 | mut start_after: String,
27 | mut limit: Option,
28 | no_auto_paginate: bool,
29 | ) -> impl Stream- > + '_ {
30 | stream! {
31 | loop {
32 | let resp = self
33 | .list_basins_internal(prefix.to_owned(), start_after.to_string(), limit.map(|l| l.min(1000)))
34 | .await;
35 |
36 | match resp.as_ref() {
37 | Ok(ListBasinsResponse { basins, has_more }) if *has_more && !no_auto_paginate => {
38 | start_after = basins
39 | .last()
40 | .map(|s| s.name.clone())
41 | .ok_or(ServiceError::new(ServiceErrorContext::ListBasins, ServiceStatus::default()))?;
42 | if let Some(l) = limit {
43 | if l > basins.len() {
44 | limit = Some(l - basins.len());
45 | } else {
46 | // Limit has been exhausted.
47 | return yield resp;
48 | }
49 | }
50 | yield resp;
51 | },
52 | _ => {
53 | return yield resp;
54 | }
55 |
56 | }
57 | }
58 | }
59 | }
60 | async fn list_basins_internal(
61 | &self,
62 | prefix: String,
63 | start_after: String,
64 | limit: Option
,
65 | ) -> Result {
66 | let list_basins_req = ListBasinsRequest::new()
67 | .with_prefix(prefix)
68 | .with_start_after(start_after)
69 | .with_limit(limit);
70 |
71 | self.client
72 | .list_basins(list_basins_req)
73 | .await
74 | .map_err(|e| ServiceError::new(ServiceErrorContext::ListBasins, e))
75 | }
76 |
77 | pub async fn create_basin(
78 | &self,
79 | basin: BasinName,
80 | storage_class: Option,
81 | retention_policy: Option,
82 | create_stream_on_append: bool,
83 | create_stream_on_read: bool,
84 | ) -> Result {
85 | let mut stream_config = StreamConfig::new();
86 |
87 | if let Some(storage_class) = storage_class {
88 | stream_config = stream_config.with_storage_class(storage_class.into());
89 | }
90 |
91 | if let Some(retention_policy) = retention_policy {
92 | stream_config = stream_config.with_retention_policy(retention_policy.into());
93 | }
94 |
95 | let basin_config = BasinConfig {
96 | default_stream_config: Some(stream_config),
97 | create_stream_on_append,
98 | create_stream_on_read,
99 | };
100 |
101 | let create_basin_req = CreateBasinRequest::new(basin).with_config(basin_config);
102 |
103 | self.client
104 | .create_basin(create_basin_req)
105 | .await
106 | .map_err(|e| ServiceError::new(ServiceErrorContext::CreateBasin, e))
107 | }
108 |
109 | pub async fn delete_basin(&self, basin: BasinName) -> Result<(), ServiceError> {
110 | let delete_basin_req = DeleteBasinRequest::new(basin);
111 | self.client
112 | .delete_basin(delete_basin_req)
113 | .await
114 | .map_err(|e| ServiceError::new(ServiceErrorContext::DeleteBasin, e))
115 | }
116 |
117 | pub async fn get_basin_config(&self, basin: BasinName) -> Result {
118 | self.client
119 | .get_basin_config(basin)
120 | .await
121 | .map_err(|e| ServiceError::new(ServiceErrorContext::GetBasinConfig, e))
122 | }
123 |
124 | pub async fn reconfigure_basin(
125 | &self,
126 | basin: BasinName,
127 | basin_config: BasinConfig,
128 | mask: Vec,
129 | ) -> Result {
130 | let reconfigure_basin_req = ReconfigureBasinRequest::new(basin)
131 | .with_config(basin_config)
132 | .with_mask(mask);
133 | self.client
134 | .reconfigure_basin(reconfigure_basin_req)
135 | .await
136 | .map_err(|e| ServiceError::new(ServiceErrorContext::ReconfigureBasin, e))
137 | }
138 |
139 | #[allow(clippy::too_many_arguments)]
140 | pub async fn issue_access_token(
141 | &self,
142 | id: AccessTokenId,
143 | expires_at: Option,
144 | auto_prefix_streams: bool,
145 | basins: Option,
146 | streams: Option,
147 | access_tokens: Option,
148 | op_groups: Option,
149 | ops: Vec,
150 | ) -> Result {
151 | let mut access_token_scope = s2::types::AccessTokenScope::new().with_ops(ops);
152 | if let Some(basins) = basins {
153 | access_token_scope = access_token_scope.with_basins(basins);
154 | }
155 | if let Some(streams) = streams {
156 | access_token_scope = access_token_scope.with_streams(streams);
157 | }
158 | if let Some(access_tokens) = access_tokens {
159 | access_token_scope = access_token_scope.with_tokens(access_tokens);
160 | }
161 | if let Some(op_groups) = op_groups {
162 | access_token_scope = access_token_scope.with_op_groups(op_groups);
163 | }
164 | let mut access_token_info = s2::types::AccessTokenInfo::new(id)
165 | .with_auto_prefix_streams(auto_prefix_streams)
166 | .with_scope(access_token_scope);
167 |
168 | if let Some(expires_at) = expires_at {
169 | access_token_info = access_token_info.with_expires_at(expires_at);
170 | }
171 |
172 | self.client
173 | .issue_access_token(access_token_info)
174 | .await
175 | .map_err(|e| ServiceError::new(ServiceErrorContext::IssueAccessToken, e))
176 | }
177 |
178 | pub async fn revoke_access_token(
179 | &self,
180 | id: AccessTokenId,
181 | ) -> Result {
182 | self.client
183 | .revoke_access_token(id)
184 | .await
185 | .map_err(|e| ServiceError::new(ServiceErrorContext::RevokeAccessToken, e))
186 | }
187 |
188 | pub fn list_access_tokens(
189 | &self,
190 | prefix: String,
191 | mut start_after: String,
192 | mut limit: Option,
193 | no_auto_paginate: bool,
194 | ) -> impl Stream- > + '_ {
195 | stream! {
196 | loop {
197 | let resp = self
198 | .list_access_tokens_internal(prefix.to_owned(), start_after.to_string(), limit.map(|l| l.min(1000)))
199 | .await;
200 |
201 | match resp.as_ref() {
202 | Ok(ListAccessTokensResponse { access_tokens, has_more }) if *has_more && !no_auto_paginate => {
203 | start_after = access_tokens
204 | .last()
205 | .map(|s| s.id.clone().into())
206 | .ok_or(ServiceError::new(ServiceErrorContext::ListAccessTokens, ServiceStatus::default()))?;
207 | if let Some(l) = limit {
208 | if l > access_tokens.len() {
209 | limit = Some(l - access_tokens.len());
210 | } else {
211 | return yield resp;
212 | }
213 | }
214 | yield resp;
215 | },
216 | _ => {
217 | return yield resp;
218 | }
219 | }
220 | }
221 | }
222 | }
223 |
224 | async fn list_access_tokens_internal(
225 | &self,
226 | prefix: String,
227 | start_after: String,
228 | limit: Option
,
229 | ) -> Result {
230 | let list_access_tokens_req = ListAccessTokensRequest::new()
231 | .with_prefix(prefix)
232 | .with_start_after(start_after)
233 | .with_limit(limit);
234 |
235 | self.client
236 | .list_access_tokens(list_access_tokens_req)
237 | .await
238 | .map_err(|e| ServiceError::new(ServiceErrorContext::ListAccessTokens, e))
239 | }
240 | }
241 |
--------------------------------------------------------------------------------
/src/basin.rs:
--------------------------------------------------------------------------------
1 | use async_stream::stream;
2 | use futures::Stream;
3 | use s2::{
4 | client::BasinClient,
5 | types::{
6 | CreateStreamRequest, DeleteStreamRequest, ListStreamsRequest, ListStreamsResponse,
7 | ReconfigureStreamRequest, StreamConfig, StreamInfo,
8 | },
9 | };
10 |
11 | use crate::error::{ServiceError, ServiceErrorContext, ServiceStatus};
12 |
13 | pub struct BasinService {
14 | client: BasinClient,
15 | }
16 |
17 | impl BasinService {
18 | pub fn new(client: BasinClient) -> Self {
19 | Self { client }
20 | }
21 |
22 | pub fn list_streams(
23 | &self,
24 | prefix: String,
25 | mut start_after: String,
26 | mut limit: Option,
27 | no_auto_paginate: bool,
28 | ) -> impl Stream- > + '_ {
29 | stream! {
30 | loop {
31 | let resp = self
32 | .list_streams_internal(prefix.to_owned(), start_after.to_string(), limit.map(|l| l.min(1000)))
33 | .await;
34 |
35 | match resp.as_ref() {
36 | Ok(ListStreamsResponse { streams, has_more}) if *has_more && !no_auto_paginate => {
37 | start_after = streams
38 | .last()
39 | .map(|s| s.name.clone())
40 | .ok_or(ServiceError::new(ServiceErrorContext::ListStreams, ServiceStatus::default()))?;
41 | if let Some(l) = limit {
42 | if l > streams.len() {
43 | limit = Some(l - streams.len());
44 | } else {
45 | // Limit has been exhausted.
46 | return yield resp;
47 | }
48 | }
49 | yield resp;
50 | },
51 | _ => {
52 | return yield resp;
53 | }
54 |
55 | }
56 | }
57 | }
58 | }
59 |
60 | async fn list_streams_internal(
61 | &self,
62 | prefix: String,
63 | start_after: String,
64 | limit: Option
,
65 | ) -> Result {
66 | self.client
67 | .list_streams(
68 | ListStreamsRequest::new()
69 | .with_prefix(prefix)
70 | .with_start_after(start_after)
71 | .with_limit(limit),
72 | )
73 | .await
74 | .map_err(|e| ServiceError::new(ServiceErrorContext::ListStreams, e))
75 | }
76 |
77 | pub async fn create_stream(
78 | &self,
79 | stream: String,
80 | config: StreamConfig,
81 | ) -> Result {
82 | self.client
83 | .create_stream(CreateStreamRequest::new(stream).with_config(config))
84 | .await
85 | .map_err(|e| ServiceError::new(ServiceErrorContext::CreateStream, e))
86 | }
87 |
88 | pub async fn delete_stream(&self, stream: String) -> Result<(), ServiceError> {
89 | self.client
90 | .delete_stream(DeleteStreamRequest::new(stream))
91 | .await
92 | .map_err(|e| ServiceError::new(ServiceErrorContext::DeleteStream, e))
93 | }
94 |
95 | pub async fn get_stream_config(&self, stream: String) -> Result {
96 | self.client
97 | .get_stream_config(stream)
98 | .await
99 | .map_err(|e| ServiceError::new(ServiceErrorContext::GetStreamConfig, e))
100 | }
101 |
102 | pub async fn reconfigure_stream(
103 | &self,
104 | stream: String,
105 | config: StreamConfig,
106 | mask: Vec,
107 | ) -> Result {
108 | let reconfigure_stream_req = ReconfigureStreamRequest::new(stream)
109 | .with_config(config)
110 | .with_mask(mask);
111 |
112 | self.client
113 | .reconfigure_stream(reconfigure_stream_req)
114 | .await
115 | .map_err(|e| ServiceError::new(ServiceErrorContext::ReconfigureStream, e))
116 | }
117 | }
118 |
--------------------------------------------------------------------------------
/src/config.rs:
--------------------------------------------------------------------------------
1 | use std::path::{Path, PathBuf};
2 |
3 | use config::{Config, FileFormat};
4 | use miette::Diagnostic;
5 | use serde::{Deserialize, Serialize};
6 | use thiserror::Error;
7 |
8 | use crate::error::S2CliError;
9 |
10 | use serde::de;
11 |
12 | #[derive(Debug, Serialize)]
13 | pub struct S2Config {
14 | pub access_token: String,
15 | }
16 |
17 | /// Note: Custom deserialization to support both old and new token formats.
18 | impl<'de> Deserialize<'de> for S2Config {
19 | fn deserialize(deserializer: D) -> Result
20 | where
21 | D: de::Deserializer<'de>,
22 | {
23 | #[derive(Deserialize)]
24 | #[serde(untagged)]
25 | enum TokenField {
26 | New { access_token: String },
27 | Old { auth_token: String },
28 | }
29 |
30 | let token = TokenField::deserialize(deserializer)?;
31 |
32 | Ok(S2Config {
33 | access_token: match token {
34 | TokenField::New { access_token } => access_token,
35 | TokenField::Old { auth_token } => auth_token,
36 | },
37 | })
38 | }
39 | }
40 |
41 | #[cfg(target_os = "windows")]
42 | pub fn config_path() -> Result {
43 | let mut path = dirs::config_dir().ok_or(S2ConfigError::DirNotFound)?;
44 | path.push("s2");
45 | path.push("config.toml");
46 | Ok(path)
47 | }
48 |
49 | #[cfg(not(target_os = "windows"))]
50 | pub fn config_path() -> Result {
51 | let mut path = dirs::home_dir().ok_or(S2ConfigError::DirNotFound)?;
52 | path.push(".config");
53 | path.push("s2");
54 | path.push("config.toml");
55 | Ok(path)
56 | }
57 |
58 | pub fn load_config(path: &Path) -> Result {
59 | let mut builder = Config::builder();
60 | if path.exists() {
61 | builder = builder.add_source(config::File::new(
62 | path.to_str().expect("config path is valid utf8"),
63 | FileFormat::Toml,
64 | ));
65 | }
66 | builder = builder.add_source(config::Environment::with_prefix("S2"));
67 | Ok(builder.build()?.try_deserialize::()?)
68 | }
69 |
70 | pub fn create_config(config_path: &PathBuf, access_token: String) -> Result<(), S2ConfigError> {
71 | let cfg = S2Config { access_token };
72 |
73 | if let Some(parent) = config_path.parent() {
74 | std::fs::create_dir_all(parent).map_err(S2ConfigError::Write)?;
75 | }
76 |
77 | let toml = toml::to_string(&cfg).unwrap();
78 | std::fs::write(config_path, toml).map_err(S2ConfigError::Write)?;
79 |
80 | Ok(())
81 | }
82 |
83 | #[derive(Error, Debug, Diagnostic)]
84 | pub enum S2ConfigError {
85 | #[error("Failed to find a home for config directory")]
86 | DirNotFound,
87 |
88 | #[error("Failed to load config file")]
89 | #[diagnostic(help(
90 | "Did you run `s2 config set`? or use `S2_ACCESS_TOKEN` environment variable."
91 | ))]
92 | Load(#[from] config::ConfigError),
93 |
94 | #[error("Failed to write config file")]
95 | Write(#[source] std::io::Error),
96 | }
97 |
--------------------------------------------------------------------------------
/src/error.rs:
--------------------------------------------------------------------------------
1 | use miette::Diagnostic;
2 | use s2::{client::ClientError, types::ConvertError};
3 | use thiserror::Error;
4 |
5 | use crate::config::S2ConfigError;
6 |
7 | const HELP: &str = color_print::cstr!(
8 | "\nNotice something wrong? \n\n\
9 | > Open an issue: \n\
10 | https://github.com/s2-streamstore/s2-cli/issues \n\n\
11 | > Reach out to us: \n\
12 | hi@s2.dev "
13 | );
14 |
15 | const BUG_HELP: &str = color_print::cstr!(
16 | "\nLooks like you may have encountered a bug! \n\n\
17 | > Report this issue here: \n\
18 | https://github.com/s2-cli/issues
19 | "
20 | );
21 |
22 | #[derive(Error, Debug, Diagnostic)]
23 | pub enum S2CliError {
24 | #[error(transparent)]
25 | #[diagnostic(transparent)]
26 | Config(#[from] S2ConfigError),
27 |
28 | #[error("Invalid CLI arguments: {0}")]
29 | #[diagnostic(transparent)]
30 | InvalidArgs(miette::Report),
31 |
32 | #[error("Unable to load S2 endpoints from environment")]
33 | #[diagnostic(help(
34 | "Are you overriding `S2_CLOUD`, `S2_ACCOUNT_ENDPOINT` or `S2_BASIN_ENDPOINT`?
35 | Make sure the values are in the expected format."
36 | ))]
37 | EndpointsFromEnv(String),
38 |
39 | #[error(transparent)]
40 | #[diagnostic(help("{}", BUG_HELP))]
41 | InvalidConfig(#[from] serde_json::Error),
42 |
43 | #[error("Failed to initialize a `Record Reader`! {0}")]
44 | RecordReaderInit(String),
45 |
46 | #[error("Stream mutated concurrently during ping")]
47 | PingStreamMutated,
48 |
49 | #[error("Failed to write records: {0}")]
50 | RecordWrite(String),
51 |
52 | #[error(transparent)]
53 | #[diagnostic(help("{}", HELP))]
54 | Service(#[from] ServiceError),
55 | }
56 |
57 | #[derive(Debug, Clone, Copy)]
58 | pub enum ServiceErrorContext {
59 | ListBasins,
60 | CreateBasin,
61 | DeleteBasin,
62 | GetBasinConfig,
63 | ReconfigureBasin,
64 | IssueAccessToken,
65 | RevokeAccessToken,
66 | ListAccessTokens,
67 | ListStreams,
68 | CreateStream,
69 | DeleteStream,
70 | GetStreamConfig,
71 | CheckTail,
72 | Trim,
73 | Fence,
74 | AppendSession,
75 | ReadSession,
76 | ReconfigureStream,
77 | }
78 |
79 | impl std::fmt::Display for ServiceErrorContext {
80 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
81 | match self {
82 | Self::ListBasins => write!(f, "Failed to list basins"),
83 | Self::CreateBasin => write!(f, "Failed to create basin"),
84 | Self::DeleteBasin => write!(f, "Failed to delete basin"),
85 | Self::GetBasinConfig => write!(f, "Failed to get basin config"),
86 | Self::ReconfigureBasin => write!(f, "Failed to reconfigure basin"),
87 | Self::IssueAccessToken => write!(f, "Failed to issue access token"),
88 | Self::RevokeAccessToken => write!(f, "Failed to revoke access token"),
89 | Self::ListAccessTokens => write!(f, "Failed to list access tokens"),
90 | Self::ListStreams => write!(f, "Failed to list streams"),
91 | Self::CreateStream => write!(f, "Failed to create stream"),
92 | Self::DeleteStream => write!(f, "Failed to delete stream"),
93 | Self::GetStreamConfig => write!(f, "Failed to get stream config"),
94 | Self::CheckTail => write!(f, "Failed to check tail"),
95 | Self::Trim => write!(f, "Failed to trim"),
96 | Self::Fence => write!(f, "Failed to set fencing token"),
97 | Self::AppendSession => write!(f, "Failed to append session"),
98 | Self::ReadSession => write!(f, "Failed to read session"),
99 | Self::ReconfigureStream => write!(f, "Failed to reconfigure stream"),
100 | }
101 | }
102 | }
103 |
104 | /// Error for holding relevant info from `tonic::Status`
105 | #[derive(thiserror::Error, Debug, Default)]
106 | #[error("{status}:\n {message}")]
107 | pub struct ServiceStatus {
108 | pub message: String,
109 | pub status: String,
110 | }
111 |
112 | impl From for ServiceStatus {
113 | fn from(error: ClientError) -> Self {
114 | match error {
115 | ClientError::Service(status) => Self {
116 | message: status.message().to_string(),
117 | status: status.code().to_string(),
118 | },
119 | ClientError::Conversion(conv) => Self {
120 | message: conv.to_string(),
121 | status: "Failed to convert SDK type".to_string(),
122 | },
123 | }
124 | }
125 | }
126 |
127 | #[derive(Debug, thiserror::Error)]
128 | #[error("{context}:\n {status}")]
129 | pub struct ServiceError {
130 | context: ServiceErrorContext,
131 | status: ServiceStatus,
132 | }
133 |
134 | impl ServiceError {
135 | pub fn new(context: ServiceErrorContext, status: impl Into) -> Self {
136 | Self {
137 | context,
138 | status: status.into(),
139 | }
140 | }
141 | }
142 |
143 | #[derive(Debug, Error)]
144 | pub enum S2UriParseError {
145 | #[error("S2 URI must begin with `s2://`")]
146 | MissingUriScheme,
147 | #[error("Invalid S2 URI scheme `{0}://`. Must be `s2://`")]
148 | InvalidUriScheme(String),
149 | #[error("{0}")]
150 | InvalidBasinName(ConvertError),
151 | #[error("Only basin name expected but found both basin and stream names")]
152 | UnexpectedStreamName,
153 | #[error("Missing stream name in S2 URI")]
154 | MissingStreamName,
155 | }
156 |
157 | #[cfg(test)]
158 | impl PartialEq for S2UriParseError {
159 | fn eq(&self, other: &Self) -> bool {
160 | match (self, other) {
161 | (Self::MissingUriScheme, Self::MissingUriScheme) => true,
162 | (Self::InvalidUriScheme(s), Self::InvalidUriScheme(o)) if s.eq(o) => true,
163 | (Self::InvalidBasinName(_), Self::InvalidBasinName(_)) => true,
164 | (Self::MissingStreamName, Self::MissingStreamName) => true,
165 | (Self::UnexpectedStreamName, Self::UnexpectedStreamName) => true,
166 | _ => false,
167 | }
168 | }
169 | }
170 |
--------------------------------------------------------------------------------
/src/formats.rs:
--------------------------------------------------------------------------------
1 | use s2::types::{AppendRecord, ConvertError, SequencedRecord};
2 | use std::io;
3 | use tokio::io::AsyncWrite;
4 |
5 | use futures::Stream;
6 |
7 | #[derive(Debug, thiserror::Error)]
8 | pub enum RecordParseError {
9 | #[error("Error reading: {0}")]
10 | Io(#[from] io::Error),
11 | #[error("Error parsing: {0}")]
12 | Convert(#[from] ConvertError),
13 | }
14 |
15 | pub trait RecordParser
16 | where
17 | I: Stream- > + Send + Unpin,
18 | {
19 | type RecordStream: Stream
- > + Send + Unpin;
20 |
21 | fn parse_records(lines: I) -> Self::RecordStream;
22 | }
23 |
24 | pub trait RecordWriter {
25 | async fn write_record(
26 | record: &SequencedRecord,
27 | writer: &mut (impl AsyncWrite + Unpin),
28 | ) -> io::Result<()>;
29 | }
30 |
31 | pub use body::RawFormatter as RawBodyFormatter;
32 | pub type RawJsonFormatter = json::Formatter
;
33 | pub type Base64JsonFormatter = json::Formatter;
34 |
35 | mod body {
36 | use std::{
37 | io,
38 | pin::Pin,
39 | task::{Context, Poll},
40 | };
41 |
42 | use futures::{Stream, StreamExt};
43 | use s2::types::{AppendRecord, SequencedRecord};
44 | use tokio::io::{AsyncWrite, AsyncWriteExt};
45 |
46 | use super::{RecordParseError, RecordParser, RecordWriter};
47 |
48 | pub struct RawFormatter;
49 |
50 | impl RecordWriter for RawFormatter {
51 | async fn write_record(
52 | record: &SequencedRecord,
53 | writer: &mut (impl AsyncWrite + Unpin),
54 | ) -> io::Result<()> {
55 | let s = String::from_utf8_lossy(&record.body);
56 | writer.write_all(s.as_bytes()).await
57 | }
58 | }
59 |
60 | impl RecordParser for RawFormatter
61 | where
62 | I: Stream- > + Send + Unpin,
63 | {
64 | type RecordStream = RecordStream
;
65 |
66 | fn parse_records(lines: I) -> Self::RecordStream {
67 | RecordStream(lines)
68 | }
69 | }
70 |
71 | pub struct RecordStream(S);
72 |
73 | impl Stream for RecordStream
74 | where
75 | S: Stream- > + Send + Unpin,
76 | {
77 | type Item = Result
;
78 |
79 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> {
80 | match self.0.poll_next_unpin(cx) {
81 | Poll::Pending => Poll::Pending,
82 | Poll::Ready(None) => Poll::Ready(None),
83 | Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e.into()))),
84 | Poll::Ready(Some(Ok(s))) => Poll::Ready(Some(Ok(AppendRecord::new(s)?))),
85 | }
86 | }
87 | }
88 | }
89 |
90 | mod json {
91 | use std::{
92 | borrow::Cow,
93 | io,
94 | pin::Pin,
95 | task::{Context, Poll},
96 | };
97 |
98 | use base64ct::{Base64, Encoding};
99 | use bytes::Bytes;
100 | use futures::{Stream, StreamExt};
101 | use s2::types::{AppendRecord, AppendRecordParts, ConvertError, Header, SequencedRecord};
102 | use serde::{Deserialize, Serialize};
103 | use tokio::io::{AsyncWrite, AsyncWriteExt};
104 |
105 | use super::{RecordParseError, RecordParser, RecordWriter};
106 |
107 | #[derive(Debug, Clone, Default)]
108 | struct CowStr<'a, const BIN_SAFE: bool>(Cow<'a, str>);
109 |
110 | impl CowStr<'_, BIN_SAFE> {
111 | fn is_empty(&self) -> bool {
112 | self.0.is_empty()
113 | }
114 | }
115 |
116 | type OwnedCowStr = CowStr<'static, BIN_SAFE>;
117 |
118 | impl<'a, const BIN_SAFE: bool> From<&'a [u8]> for CowStr<'a, BIN_SAFE> {
119 | fn from(value: &'a [u8]) -> Self {
120 | Self(if BIN_SAFE {
121 | Base64::encode_string(value).into()
122 | } else {
123 | String::from_utf8_lossy(value)
124 | })
125 | }
126 | }
127 |
128 | impl TryFrom> for Bytes {
129 | type Error = ConvertError;
130 |
131 | fn try_from(value: OwnedCowStr) -> Result {
132 | let CowStr(s) = value;
133 |
134 | Ok(if BIN_SAFE {
135 | Base64::decode_vec(&s).map_err(|_| format!("invalid base64: {s}"))?
136 | } else {
137 | s.into_owned().into_bytes()
138 | }
139 | .into())
140 | }
141 | }
142 |
143 | impl Serialize for CowStr<'_, BIN_SAFE> {
144 | fn serialize(&self, serializer: S) -> Result
145 | where
146 | S: serde::Serializer,
147 | {
148 | self.0.serialize(serializer)
149 | }
150 | }
151 |
152 | impl<'de, const BIN_SAFE: bool> Deserialize<'de> for OwnedCowStr {
153 | fn deserialize(deserializer: D) -> Result
154 | where
155 | D: serde::Deserializer<'de>,
156 | {
157 | String::deserialize(deserializer).map(|s| CowStr(s.into()))
158 | }
159 | }
160 |
161 | pub struct Formatter;
162 |
163 | #[derive(Debug, Clone, Serialize)]
164 | struct SerializableSequencedRecord<'a, const BIN_SAFE: bool> {
165 | seq_num: u64,
166 | timestamp: u64,
167 | #[serde(skip_serializing_if = "Vec::is_empty")]
168 | headers: Vec<(CowStr<'a, BIN_SAFE>, CowStr<'a, BIN_SAFE>)>,
169 | #[serde(skip_serializing_if = "CowStr::is_empty")]
170 | body: CowStr<'a, BIN_SAFE>,
171 | }
172 |
173 | impl<'a, const BIN_SAFE: bool> From<&'a SequencedRecord>
174 | for SerializableSequencedRecord<'a, BIN_SAFE>
175 | {
176 | fn from(value: &'a SequencedRecord) -> Self {
177 | let SequencedRecord {
178 | timestamp,
179 | seq_num,
180 | headers,
181 | body,
182 | } = value;
183 |
184 | let headers: Vec<(CowStr, CowStr)> = headers
185 | .iter()
186 | .map(|Header { name, value }| (name.as_ref().into(), value.as_ref().into()))
187 | .collect();
188 |
189 | let body: CowStr = body.as_ref().into();
190 |
191 | SerializableSequencedRecord {
192 | timestamp: *timestamp,
193 | seq_num: *seq_num,
194 | headers,
195 | body,
196 | }
197 | }
198 | }
199 |
200 | impl RecordWriter for Formatter {
201 | async fn write_record(
202 | record: &SequencedRecord,
203 | writer: &mut (impl AsyncWrite + Unpin),
204 | ) -> io::Result<()> {
205 | let record: SerializableSequencedRecord = record.into();
206 | let s = serde_json::to_string(&record).map_err(io::Error::other)?;
207 | writer.write_all(s.as_bytes()).await
208 | }
209 | }
210 |
211 | impl RecordParser for Formatter
212 | where
213 | I: Stream- > + Send + Unpin,
214 | {
215 | type RecordStream = RecordStream
;
216 |
217 | fn parse_records(lines: I) -> Self::RecordStream {
218 | RecordStream(lines)
219 | }
220 | }
221 |
222 | #[derive(Debug, Clone, Deserialize)]
223 | struct DeserializableAppendRecord {
224 | timestamp: Option,
225 | #[serde(default)]
226 | headers: Vec<(OwnedCowStr, OwnedCowStr)>,
227 | #[serde(default)]
228 | body: OwnedCowStr,
229 | }
230 |
231 | impl TryFrom> for AppendRecord {
232 | type Error = ConvertError;
233 |
234 | fn try_from(value: DeserializableAppendRecord) -> Result {
235 | let DeserializableAppendRecord {
236 | timestamp,
237 | headers,
238 | body,
239 | } = value;
240 |
241 | let parts = AppendRecordParts {
242 | timestamp,
243 | headers: headers
244 | .into_iter()
245 | .map(|(name, value)| {
246 | Ok(Header {
247 | name: name.try_into()?,
248 | value: value.try_into()?,
249 | })
250 | })
251 | .collect::, ConvertError>>()?,
252 | body: body.try_into()?,
253 | };
254 |
255 | parts.try_into()
256 | }
257 | }
258 |
259 | pub struct RecordStream(S);
260 |
261 | impl Stream for RecordStream
262 | where
263 | S: Stream- > + Send + Unpin,
264 | {
265 | type Item = Result
;
266 |
267 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> {
268 | fn parse_record(
269 | s: String,
270 | ) -> Result {
271 | let append_record: DeserializableAppendRecord = serde_json::from_str(&s)
272 | .map_err(|e| RecordParseError::Convert(e.to_string().into()))?;
273 |
274 | Ok(append_record.try_into()?)
275 | }
276 |
277 | match self.0.poll_next_unpin(cx) {
278 | Poll::Pending => Poll::Pending,
279 | Poll::Ready(None) => Poll::Ready(None),
280 | Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e.into()))),
281 | Poll::Ready(Some(Ok(s))) => Poll::Ready(Some(parse_record::(s))),
282 | }
283 | }
284 | }
285 | }
286 |
--------------------------------------------------------------------------------
/src/main.rs:
--------------------------------------------------------------------------------
1 | use json_to_table::json_to_table;
2 | use std::{
3 | io::BufRead,
4 | path::PathBuf,
5 | pin::Pin,
6 | time::{Duration, SystemTime, UNIX_EPOCH},
7 | };
8 |
9 | use account::AccountService;
10 | use basin::BasinService;
11 | use clap::{Parser, Subcommand, ValueEnum, builder::styling};
12 | use colored::Colorize;
13 | use config::{config_path, create_config};
14 | use error::{S2CliError, ServiceError, ServiceErrorContext};
15 | use formats::{Base64JsonFormatter, RawBodyFormatter, RawJsonFormatter, RecordWriter};
16 | use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
17 | use ping::{LatencyStats, PingResult, Pinger};
18 | use rand::Rng;
19 | use s2::{
20 | Streaming,
21 | batching::AppendRecordsBatchingOpts,
22 | client::{BasinClient, Client, ClientConfig, S2Endpoints, StreamClient},
23 | types::{
24 | AccessTokenId, AppendRecord, AppendRecordBatch, BasinInfo, Command, CommandRecord,
25 | FencingToken, MeteredBytes as _, ReadLimit, ReadOutput, ReadStart, StreamInfo,
26 | },
27 | };
28 | use stream::{RecordStream, StreamService};
29 | use tokio::{
30 | fs::{File, OpenOptions},
31 | io::{AsyncBufReadExt, AsyncWrite, AsyncWriteExt, BufWriter},
32 | select,
33 | };
34 | use tokio::{signal, sync::mpsc};
35 | use tokio_stream::{
36 | Stream, StreamExt,
37 | wrappers::{LinesStream, ReceiverStream},
38 | };
39 | use tracing::trace;
40 | use tracing_subscriber::{fmt::format::FmtSpan, layer::SubscriberExt, util::SubscriberInitExt};
41 | use types::{
42 | AccessTokenInfo, BasinConfig, Operation, PermittedOperationGroups, ResourceSet,
43 | S2BasinAndMaybeStreamUri, S2BasinAndStreamUri, S2BasinUri, StreamConfig,
44 | };
45 |
46 | mod account;
47 | mod basin;
48 | mod stream;
49 |
50 | mod config;
51 | mod error;
52 | mod formats;
53 | mod ping;
54 | mod types;
55 |
56 | const STYLES: styling::Styles = styling::Styles::styled()
57 | .header(styling::AnsiColor::Green.on_default().bold())
58 | .usage(styling::AnsiColor::Green.on_default().bold())
59 | .literal(styling::AnsiColor::Blue.on_default().bold())
60 | .placeholder(styling::AnsiColor::Cyan.on_default());
61 |
62 | const GENERAL_USAGE: &str = color_print::cstr!(
63 | r#"
64 | $ s2 config set --access-token ...
65 | $ s2 list-basins --prefix "foo" --limit 100
66 | "#
67 | );
68 |
69 | #[derive(Parser, Debug)]
70 | #[command(name = "s2", version, override_usage = GENERAL_USAGE, styles = STYLES)]
71 | struct Cli {
72 | #[command(subcommand)]
73 | command: Commands,
74 | }
75 |
76 | #[derive(Parser, Debug)]
77 | struct S2BasinAndStreamUriArgs {
78 | /// S2 URI of the format: s2://{basin}/{stream}
79 | #[arg(value_name = "S2_URI")]
80 | uri: S2BasinAndStreamUri,
81 | }
82 |
83 | #[derive(Subcommand, Debug)]
84 | enum Commands {
85 | /// Manage CLI configuration.
86 | Config {
87 | #[command(subcommand)]
88 | action: ConfigActions,
89 | },
90 |
91 | /// List basins or streams in a basin.
92 | ///
93 | /// List basins if basin name is not provided otherwise lists streams in
94 | /// the basin.
95 | Ls {
96 | /// Name of the basin to manage or S2 URI with basin and optionally prefix.
97 | ///
98 | /// S2 URI is of the format: s2://{basin}/{prefix}
99 | #[arg(value_name = "BASIN|S2_URI")]
100 | uri: Option,
101 |
102 | /// Filter to names that begin with this prefix.
103 | #[arg(short = 'p', long)]
104 | prefix: Option,
105 |
106 | /// Filter to names that lexicographically start after this name.
107 | #[arg(short = 's', long)]
108 | start_after: Option,
109 |
110 | /// Number of results, upto a maximum of 1000.
111 | #[arg(short = 'n', long)]
112 | limit: Option,
113 |
114 | /// Disable automatic following of pagination responses, which can make multiple RPC calls.
115 | #[arg(long, default_value_t = false)]
116 | no_auto_paginate: bool,
117 | },
118 |
119 | /// List basins.
120 | ListBasins {
121 | /// Filter to basin names that begin with this prefix.
122 | #[arg(short = 'p', long, default_value = "")]
123 | prefix: Option,
124 |
125 | /// Filter to basin names that lexicographically start after this name.
126 | #[arg(short = 's', long, default_value = "")]
127 | start_after: Option,
128 |
129 | /// Number of results, upto a maximum of 1000.
130 | #[arg(short = 'n', long)]
131 | limit: Option,
132 |
133 | /// Disable automatic following of pagination responses, which can make multiple RPC calls.
134 | #[arg(long, default_value_t = false)]
135 | no_auto_paginate: bool,
136 | },
137 |
138 | /// Create a basin.
139 | CreateBasin {
140 | /// Name of the basin to create.
141 | basin: S2BasinUri,
142 |
143 | #[command(flatten)]
144 | config: BasinConfig,
145 | },
146 |
147 | /// Delete a basin.
148 | DeleteBasin {
149 | /// Name of the basin to delete.
150 | basin: S2BasinUri,
151 | },
152 |
153 | /// Get basin config.
154 | GetBasinConfig {
155 | /// Basin name to get config for.
156 | basin: S2BasinUri,
157 | },
158 |
159 | /// Reconfigure a basin.
160 | ReconfigureBasin {
161 | /// Name of the basin to reconfigure.
162 | basin: S2BasinUri,
163 |
164 | /// Configuration to apply.
165 | #[command(flatten)]
166 | config: BasinConfig,
167 | },
168 |
169 | /// Issue an access token.
170 | IssueAccessToken {
171 | /// Access token ID.
172 | #[arg(long)]
173 | id: AccessTokenId,
174 |
175 | /// Expiration time in seconds since Unix epoch.
176 | #[arg(long)]
177 | expires_at: Option,
178 |
179 | /// Namespace streams based on the configured stream-level scope, which must be a prefix.
180 | /// Stream name arguments will be automatically prefixed, and the prefix will be stripped
181 | /// when listing streams.
182 | #[arg(long, default_value_t = false)]
183 | auto_prefix_streams: bool,
184 |
185 | /// Basin names allowed.
186 | /// Matches exact value if it starts with `=`, otherwise treats it as a prefix.
187 | #[arg(long)]
188 | basins: Option>,
189 |
190 | /// Stream names allowed.
191 | /// Matches exact value if it starts with `=`, otherwise treats it as a prefix.
192 | #[arg(long)]
193 | streams: Option>,
194 |
195 | /// Token IDs allowed.
196 | /// Matches exact value if it starts with `=`, otherwise treats it as a prefix.
197 | #[arg(long)]
198 | access_tokens: Option>,
199 |
200 | /// Access permissions at the group level.
201 | /// The format is: "account=rw,basin=r,stream=w"
202 | /// where 'r' indicates read permission and 'w' indicates write permission.
203 | #[arg(long)]
204 | op_groups: Option,
205 |
206 | /// Operations allowed for the token.
207 | /// A union of allowed operations and groups is used as an effective set of allowed operations.
208 | #[arg(long, value_delimiter = ',')]
209 | ops: Vec,
210 | },
211 |
212 | /// Revoke an access token.
213 | RevokeAccessToken {
214 | /// ID of the access token to revoke.
215 | #[arg(long)]
216 | id: AccessTokenId,
217 | },
218 |
219 | /// List access tokens.
220 | ListAccessTokens {
221 | /// List access tokens that begin with this prefix.
222 | #[arg(short = 'p', long, default_value = "")]
223 | prefix: Option,
224 |
225 | /// Only return access tokens that lexicographically start after this token ID.
226 | #[arg(short = 's', long, default_value = "")]
227 | start_after: Option,
228 |
229 | /// Number of results, upto a maximum of 1000.
230 | #[arg(short = 'n', long)]
231 | limit: Option,
232 |
233 | /// Disable automatic following of pagination responses, which can make multiple RPC calls.
234 | #[arg(long, default_value_t = false)]
235 | no_auto_paginate: bool,
236 | },
237 |
238 | /// List streams.
239 | ListStreams {
240 | /// Name of the basin to manage or S2 URI with basin and optionally prefix.
241 | ///
242 | /// S2 URI is of the format: s2://{basin}/{prefix}
243 | #[arg(value_name = "BASIN|S2_URI")]
244 | uri: S2BasinAndMaybeStreamUri,
245 |
246 | /// Filter to stream names that begin with this prefix.
247 | #[arg(short = 'p', long)]
248 | prefix: Option,
249 |
250 | /// Filter to stream names that lexicographically start after this name.
251 | #[arg(short = 's', long)]
252 | start_after: Option,
253 |
254 | /// Number of results, upto a maximum of 1000.
255 | #[arg(short = 'n', long)]
256 | limit: Option,
257 |
258 | /// Disable automatic following of pagination responses, which can make multiple RPC calls.
259 | #[arg(long, default_value_t = false)]
260 | no_auto_paginate: bool,
261 | },
262 |
263 | /// Create a stream.
264 | CreateStream {
265 | #[command(flatten)]
266 | uri: S2BasinAndStreamUriArgs,
267 |
268 | /// Configuration to apply.
269 | #[command(flatten)]
270 | config: StreamConfig,
271 | },
272 |
273 | /// Delete a stream.
274 | DeleteStream {
275 | #[command(flatten)]
276 | uri: S2BasinAndStreamUriArgs,
277 | },
278 |
279 | /// Get stream config.
280 | GetStreamConfig {
281 | #[command(flatten)]
282 | uri: S2BasinAndStreamUriArgs,
283 | },
284 |
285 | /// Reconfigure a stream.
286 | ReconfigureStream {
287 | #[command(flatten)]
288 | uri: S2BasinAndStreamUriArgs,
289 |
290 | /// Configuration to apply.
291 | #[command(flatten)]
292 | config: StreamConfig,
293 | },
294 |
295 | /// Get the next sequence number that will be assigned by a stream.
296 | CheckTail {
297 | #[command(flatten)]
298 | uri: S2BasinAndStreamUriArgs,
299 | },
300 |
301 | /// Set the trim point for the stream.
302 | ///
303 | /// Trimming is eventually consistent, and trimmed records may be visible
304 | /// for a brief period.
305 | Trim {
306 | #[command(flatten)]
307 | uri: S2BasinAndStreamUriArgs,
308 |
309 | /// Earliest sequence number that should be retained.
310 | /// This sequence number is only allowed to advance,
311 | /// and any regression will be ignored.
312 | trim_point: u64,
313 |
314 | /// Enforce fencing token.
315 | #[arg(short = 'f', long)]
316 | fencing_token: Option,
317 |
318 | /// Enforce that the sequence number issued to the first record matches.
319 | #[arg(short = 'm', long)]
320 | match_seq_num: Option,
321 | },
322 |
323 | /// Set a fencing token for the stream.
324 | ///
325 | /// Fencing is strongly consistent, and subsequent appends that specify a
326 | /// token will be rejected if it does not match.
327 | ///
328 | /// Note that fencing is a cooperative mechanism,
329 | /// and it is only enforced when a token is provided.
330 | Fence {
331 | #[command(flatten)]
332 | uri: S2BasinAndStreamUriArgs,
333 |
334 | /// New fencing token.
335 | /// It may be upto 36 characters, and can be empty.
336 | #[arg()]
337 | new_fencing_token: FencingToken,
338 |
339 | /// Enforce existing fencing token.
340 | #[arg(short = 'f', long)]
341 | fencing_token: Option,
342 |
343 | /// Enforce that the sequence number issued to this command matches.
344 | #[arg(short = 'm', long)]
345 | match_seq_num: Option,
346 | },
347 |
348 | /// Append records to a stream.
349 | Append {
350 | #[command(flatten)]
351 | uri: S2BasinAndStreamUriArgs,
352 |
353 | /// Enforce fencing token.
354 | #[arg(short = 'f', long)]
355 | fencing_token: Option,
356 |
357 | /// Enforce that the sequence number issued to the first record matches.
358 | #[arg(short = 'm', long)]
359 | match_seq_num: Option,
360 |
361 | /// Input format.
362 | #[arg(long, value_enum, default_value_t)]
363 | format: Format,
364 |
365 | /// Input newline delimited records to append from a file or stdin.
366 | /// All records are treated as plain text.
367 | /// Use "-" to read from stdin.
368 | #[arg(short = 'i', long, value_parser = parse_records_input_source, default_value = "-")]
369 | input: RecordsIn,
370 |
371 | /// How long to wait while accumulating records before emitting a batch.
372 | #[arg(long, default_value = "5ms")]
373 | linger: humantime::Duration,
374 | },
375 |
376 | /// Read records from a stream.
377 | ///
378 | /// If a limit if specified, reading will stop when the limit is reached or there are no more records on the stream.
379 | /// If a limit is not specified, the reader will keep tailing and wait for new records.
380 | Read {
381 | #[command(flatten)]
382 | uri: S2BasinAndStreamUriArgs,
383 |
384 | /// Starting sequence number (inclusive).
385 | #[arg(short = 's', long, group = "start")]
386 | seq_num: Option,
387 |
388 | /// Starting timestamp (inclusive).
389 | #[arg(long, group = "start")]
390 | timestamp: Option,
391 |
392 | /// Starting timestamp as a human-friendly delta from current time e.g. "1h",
393 | /// which will be converted to milliseconds since Unix epoch.
394 | #[arg(long, group = "start")]
395 | ago: Option,
396 |
397 | /// Start from N records before the tail of the stream.
398 | #[arg(long, group = "start")]
399 | tail_offset: Option,
400 |
401 | /// Output format.
402 | #[arg(long, value_enum, default_value_t)]
403 | format: Format,
404 |
405 | /// Limit the number of records returned.
406 | #[arg(short = 'n', long)]
407 | count: Option,
408 |
409 | /// Limit the number of bytes returned.
410 | #[arg(short = 'b', long)]
411 | bytes: Option,
412 |
413 | /// Output records to a file or stdout.
414 | /// Use "-" to write to stdout.
415 | #[arg(short = 'o', long, value_parser = parse_records_output_source, default_value = "-")]
416 | output: RecordsOut,
417 | },
418 |
419 | /// Tail a stream, showing the last N records.
420 | Tail {
421 | #[command(flatten)]
422 | uri: S2BasinAndStreamUriArgs,
423 |
424 | /// Output the last N records instead of the default (10).
425 | #[arg(short = 'n', long = "lines", default_value_t = 10)]
426 | lines: u64,
427 |
428 | /// Follow the stream, waiting for new records to be appended.
429 | #[arg(short = 'f', long, default_value_t = false)]
430 | follow: bool,
431 |
432 | /// Output format.
433 | #[arg(long, value_enum, default_value_t)]
434 | format: Format,
435 |
436 | /// Output records to a file or stdout.
437 | /// Use "-" to write to stdout.
438 | #[arg(short = 'o', long, value_parser = parse_records_output_source, default_value = "-")]
439 | output: RecordsOut,
440 | },
441 |
442 | /// Ping the stream to get append acknowledgement and end-to-end latencies.
443 | Ping {
444 | #[command(flatten)]
445 | uri: S2BasinAndStreamUriArgs,
446 |
447 | /// Send a batch after this interval.
448 | ///
449 | /// Will be set to a minimum of 100ms.
450 | #[arg(short = 'i', long, default_value = "500ms")]
451 | interval: humantime::Duration,
452 |
453 | /// Batch size in bytes. A jitter (+/- 25%) will be added.
454 | ///
455 | /// Truncated to a maximum of 128 KiB.
456 | #[arg(short = 'b', long, default_value_t = 32 * 1024)]
457 | batch_bytes: u64,
458 |
459 | /// Stop after sending this number of batches.
460 | #[arg(short = 'n', long)]
461 | num_batches: Option,
462 | },
463 | }
464 |
465 | #[derive(Subcommand, Debug)]
466 | enum ConfigActions {
467 | /// Set the authentication token to be reused in subsequent commands.
468 | /// Alternatively, use the S2_ACCESS_TOKEN environment variable.
469 | Set {
470 | #[arg(short = 'a', long, group = "auth")]
471 | access_token: Option,
472 | #[arg(long = "auth-token", group = "auth", hide = true)]
473 | auth_token: Option,
474 | },
475 | }
476 |
477 | #[derive(Debug, Clone, Copy, Default, ValueEnum)]
478 | pub enum Format {
479 | /// Plaintext record body as UTF-8.
480 | /// If the body is not valid UTF-8, this will be a lossy decoding.
481 | /// Headers cannot be represented, so command records are sent to stderr when reading.
482 | #[default]
483 | #[clap(name = "")]
484 | BodyRaw,
485 | /// JSON format with UTF-8 headers and body.
486 | /// If the data is not valid UTF-8, this will be a lossy decoding.
487 | #[clap(name = "raw", alias = "json")]
488 | JsonRaw,
489 | /// JSON format with headers and body encoded as Base64.
490 | #[clap(name = "base64", alias = "json-binsafe")]
491 | JsonBase64,
492 | }
493 |
494 | #[derive(Debug, Clone)]
495 | pub enum RecordsIn {
496 | File(PathBuf),
497 | Stdin,
498 | }
499 |
500 | /// Sink for records in a read session.
501 | #[derive(Debug, Clone)]
502 | pub enum RecordsOut {
503 | File(PathBuf),
504 | Stdout,
505 | }
506 |
507 | impl RecordsIn {
508 | pub async fn into_reader(
509 | &self,
510 | ) -> std::io::Result> + Send>>> {
511 | match self {
512 | RecordsIn::File(path) => {
513 | let file = File::open(path).await?;
514 | Ok(Box::pin(LinesStream::new(
515 | tokio::io::BufReader::new(file).lines(),
516 | )))
517 | }
518 | RecordsIn::Stdin => Ok(Box::pin(stdio_lines_stream(std::io::stdin()))),
519 | }
520 | }
521 | }
522 |
523 | fn stdio_lines_stream(f: F) -> ReceiverStream>
524 | where
525 | F: std::io::Read + Send + 'static,
526 | {
527 | let lines = std::io::BufReader::new(f).lines();
528 | let (tx, rx) = mpsc::channel(AppendRecordBatch::MAX_CAPACITY);
529 | let _handle = std::thread::spawn(move || {
530 | for line in lines {
531 | if tx.blocking_send(line).is_err() {
532 | return;
533 | }
534 | }
535 | });
536 | ReceiverStream::new(rx)
537 | }
538 |
539 | impl RecordsOut {
540 | pub async fn into_writer(&self) -> std::io::Result> {
541 | match self {
542 | RecordsOut::File(path) => {
543 | trace!(?path, "opening file writer");
544 | let file = OpenOptions::new()
545 | .write(true)
546 | .create(true)
547 | .append(true)
548 | .open(path)
549 | .await?;
550 |
551 | Ok(Box::new(BufWriter::new(file)))
552 | }
553 | RecordsOut::Stdout => {
554 | trace!("stdout writer");
555 | Ok(Box::new(BufWriter::new(tokio::io::stdout())))
556 | }
557 | }
558 | }
559 | }
560 |
561 | fn parse_records_input_source(s: &str) -> Result {
562 | match s {
563 | "" | "-" => Ok(RecordsIn::Stdin),
564 | _ => Ok(RecordsIn::File(PathBuf::from(s))),
565 | }
566 | }
567 |
568 | fn parse_records_output_source(s: &str) -> Result {
569 | match s {
570 | "" | "-" => Ok(RecordsOut::Stdout),
571 | _ => Ok(RecordsOut::File(PathBuf::from(s))),
572 | }
573 | }
574 |
575 | fn client_config(access_token: String) -> Result {
576 | let endpoints = S2Endpoints::from_env().map_err(S2CliError::EndpointsFromEnv)?;
577 | let client_config = ClientConfig::new(access_token.to_string())
578 | .with_user_agent("s2-cli".parse().expect("valid user agent"))
579 | .with_endpoints(endpoints)
580 | .with_request_timeout(Duration::from_secs(30));
581 | Ok(client_config)
582 | }
583 |
584 | #[tokio::main]
585 | async fn main() -> miette::Result<()> {
586 | miette::set_panic_hook();
587 | run().await?;
588 | Ok(())
589 | }
590 |
591 | async fn run() -> Result<(), S2CliError> {
592 | let commands = Cli::parse();
593 | let config_path = config_path()?;
594 |
595 | tracing_subscriber::registry()
596 | .with(
597 | tracing_subscriber::fmt::layer()
598 | .pretty()
599 | .with_span_events(FmtSpan::NEW)
600 | .compact()
601 | .with_writer(std::io::stderr),
602 | )
603 | .with(tracing_subscriber::EnvFilter::from_default_env())
604 | .init();
605 |
606 | async fn list_basins(
607 | client_config: ClientConfig,
608 | prefix: Option,
609 | start_after: Option,
610 | limit: Option,
611 | no_auto_paginate: bool,
612 | ) -> Result<(), S2CliError> {
613 | let account_service = AccountService::new(Client::new(client_config));
614 | let basin_response_stream = account_service.list_basins(
615 | prefix.unwrap_or_default(),
616 | start_after.unwrap_or_default(),
617 | limit,
618 | no_auto_paginate,
619 | );
620 |
621 | tokio::pin!(basin_response_stream);
622 |
623 | while let Some(response) = basin_response_stream.next().await {
624 | for basin_info in response?.basins {
625 | let BasinInfo { name, state, .. } = basin_info;
626 | let state = if let Some(state) = state {
627 | match state {
628 | s2::types::BasinState::Active => state.to_string().green(),
629 | s2::types::BasinState::Creating => state.to_string().yellow(),
630 | s2::types::BasinState::Deleting => state.to_string().red(),
631 | }
632 | } else {
633 | "unknown".to_owned().blue()
634 | };
635 | println!("{} {}", name, state);
636 | }
637 | }
638 |
639 | Ok(())
640 | }
641 |
642 | async fn list_streams(
643 | client_config: ClientConfig,
644 | uri: S2BasinAndMaybeStreamUri,
645 | prefix: Option,
646 | start_after: Option,
647 | limit: Option,
648 | no_auto_paginate: bool,
649 | ) -> Result<(), S2CliError> {
650 | let S2BasinAndMaybeStreamUri {
651 | basin,
652 | stream: maybe_prefix,
653 | } = uri;
654 | let prefix = match (maybe_prefix, prefix) {
655 | (Some(_), Some(_)) => {
656 | return Err(S2CliError::InvalidArgs(miette::miette!(
657 | help = "Make sure to provide the prefix once either using '--prefix' opt or in URI like 's2://basin-name/prefix'",
658 | "Multiple prefixes provided"
659 | )));
660 | }
661 | (Some(s), None) | (None, Some(s)) => Some(s),
662 | (None, None) => None,
663 | };
664 |
665 | let basin_service = BasinService::new(BasinClient::new(client_config, basin.clone()));
666 | let streams = basin_service.list_streams(
667 | prefix.unwrap_or_default(),
668 | start_after.unwrap_or_default(),
669 | limit,
670 | no_auto_paginate,
671 | );
672 |
673 | tokio::pin!(streams);
674 |
675 | while let Some(stream) = streams.next().await {
676 | for StreamInfo {
677 | name,
678 | created_at,
679 | deleted_at,
680 | } in stream?.streams
681 | {
682 | let date_time = |time: u32| {
683 | humantime::format_rfc3339_seconds(UNIX_EPOCH + Duration::from_secs(time as u64))
684 | };
685 |
686 | println!(
687 | "s2://{}/{} {} {}",
688 | basin,
689 | name,
690 | date_time(created_at).to_string().green(),
691 | deleted_at
692 | .map(|d| date_time(d).to_string().red())
693 | .unwrap_or_default()
694 | );
695 | }
696 | }
697 |
698 | Ok(())
699 | }
700 |
701 | async fn list_tokens(
702 | client_config: ClientConfig,
703 | prefix: Option,
704 | start_after: Option,
705 | limit: Option,
706 | no_auto_paginate: bool,
707 | ) -> Result<(), S2CliError> {
708 | let account_service = AccountService::new(Client::new(client_config));
709 | let tokens = account_service.list_access_tokens(
710 | prefix.unwrap_or_default(),
711 | start_after.unwrap_or_default(),
712 | limit,
713 | no_auto_paginate,
714 | );
715 |
716 | tokio::pin!(tokens);
717 |
718 | while let Some(token) = tokens.next().await {
719 | for token_info in token?.access_tokens {
720 | let exp_date = token_info
721 | .expires_at
722 | .map(|exp| {
723 | humantime::format_rfc3339_seconds(
724 | UNIX_EPOCH + Duration::from_secs(exp as u64),
725 | )
726 | .to_string()
727 | .green()
728 | })
729 | .expect("expires_at");
730 |
731 | println!(
732 | "{} {}",
733 | token_info.id.parse::().expect("id"),
734 | exp_date
735 | );
736 | }
737 | }
738 |
739 | Ok(())
740 | }
741 |
742 | match commands.command {
743 | Commands::Config { action } => match action {
744 | ConfigActions::Set {
745 | access_token,
746 | auth_token,
747 | } => {
748 | let access_token = access_token.or(auth_token).ok_or_else(|| {
749 | S2CliError::InvalidArgs(miette::miette!(
750 | "Access token not provided. Use --access-token to set it."
751 | ))
752 | })?;
753 | create_config(&config_path, access_token)?;
754 | eprintln!("{}", "✓ Token set".green().bold());
755 | eprintln!(
756 | " Configuration saved to: {}",
757 | config_path.display().to_string().cyan()
758 | );
759 | }
760 | },
761 | Commands::Ls {
762 | uri,
763 | prefix,
764 | start_after,
765 | limit,
766 | no_auto_paginate,
767 | } => {
768 | let cfg = config::load_config(&config_path)?;
769 | let client_config = client_config(cfg.access_token)?;
770 | if let Some(uri) = uri {
771 | list_streams(
772 | client_config,
773 | uri,
774 | prefix,
775 | start_after,
776 | limit,
777 | no_auto_paginate,
778 | )
779 | .await?;
780 | } else {
781 | list_basins(client_config, prefix, start_after, limit, no_auto_paginate).await?;
782 | }
783 | }
784 | Commands::ListBasins {
785 | prefix,
786 | start_after,
787 | limit,
788 | no_auto_paginate,
789 | } => {
790 | let cfg = config::load_config(&config_path)?;
791 | let client_config = client_config(cfg.access_token)?;
792 | list_basins(client_config, prefix, start_after, limit, no_auto_paginate).await?;
793 | }
794 | Commands::CreateBasin { basin, config } => {
795 | let cfg = config::load_config(&config_path)?;
796 | let client_config = client_config(cfg.access_token)?;
797 | let account_service = AccountService::new(Client::new(client_config));
798 | let (storage_class, retention_policy) = match &config.default_stream_config {
799 | Some(config) => {
800 | let storage_class = config.storage_class.clone();
801 | let retention_policy = config.retention_policy.clone();
802 | (storage_class, retention_policy)
803 | }
804 | None => (None, None),
805 | };
806 | let BasinInfo { state, .. } = account_service
807 | .create_basin(
808 | basin.into(),
809 | storage_class,
810 | retention_policy,
811 | config.create_stream_on_append.unwrap_or_default(),
812 | config.create_stream_on_read.unwrap_or_default(),
813 | )
814 | .await?;
815 |
816 | let message = match state {
817 | Some(s2::types::BasinState::Creating) => {
818 | "✓ Basin creation requested".yellow().bold()
819 | }
820 | Some(s2::types::BasinState::Active) => "✓ Basin created".green().bold(),
821 | s => format!("Unexpected state: {s:?}").red().bold(),
822 | };
823 | eprintln!("{message}");
824 | }
825 | Commands::DeleteBasin { basin } => {
826 | let cfg = config::load_config(&config_path)?;
827 | let client_config = client_config(cfg.access_token)?;
828 | let account_service = AccountService::new(Client::new(client_config));
829 | account_service.delete_basin(basin.into()).await?;
830 | eprintln!("{}", "✓ Basin deletion requested".green().bold());
831 | }
832 | Commands::GetBasinConfig { basin } => {
833 | let cfg = config::load_config(&config_path)?;
834 | let client_config = client_config(cfg.access_token)?;
835 | let account_service = AccountService::new(Client::new(client_config));
836 | let basin_config = account_service.get_basin_config(basin.into()).await?;
837 | let basin_config: BasinConfig = basin_config.into();
838 | println!("{}", json_to_table(&serde_json::to_value(&basin_config)?));
839 | }
840 | Commands::ReconfigureBasin { basin, config } => {
841 | let cfg = config::load_config(&config_path)?;
842 | let client_config = client_config(cfg.access_token)?;
843 | let account_service = AccountService::new(Client::new(client_config));
844 | let mut mask = Vec::new();
845 | if let Some(config) = &config.default_stream_config {
846 | if config.storage_class.is_some() {
847 | mask.push("default_stream_config.storage_class".to_owned());
848 | }
849 | if config.retention_policy.is_some() {
850 | mask.push("default_stream_config.retention_policy".to_owned());
851 | }
852 | }
853 | if config.create_stream_on_append.is_some() {
854 | mask.push("create_stream_on_append".to_owned());
855 | }
856 | if config.create_stream_on_read.is_some() {
857 | mask.push("create_stream_on_read".to_owned());
858 | }
859 | let config: BasinConfig = account_service
860 | .reconfigure_basin(basin.into(), config.into(), mask)
861 | .await?
862 | .into();
863 | eprintln!("{}", "✓ Basin reconfigured".green().bold());
864 | println!("{}", json_to_table(&serde_json::to_value(&config)?));
865 | }
866 | Commands::ListStreams {
867 | uri,
868 | prefix,
869 | start_after,
870 | limit,
871 | no_auto_paginate,
872 | } => {
873 | let cfg = config::load_config(&config_path)?;
874 | let client_config = client_config(cfg.access_token)?;
875 | list_streams(
876 | client_config,
877 | uri,
878 | prefix,
879 | start_after,
880 | limit,
881 | no_auto_paginate,
882 | )
883 | .await?;
884 | }
885 | Commands::CreateStream { uri, config } => {
886 | let S2BasinAndStreamUri { basin, stream } = uri.uri;
887 | let cfg = config::load_config(&config_path)?;
888 | let client_config = client_config(cfg.access_token)?;
889 | let basin_client = BasinClient::new(client_config, basin);
890 | BasinService::new(basin_client)
891 | .create_stream(stream, config.into())
892 | .await?;
893 | eprintln!("{}", "✓ Stream created".green().bold());
894 | }
895 | Commands::DeleteStream { uri } => {
896 | let S2BasinAndStreamUri { basin, stream } = uri.uri;
897 | let cfg = config::load_config(&config_path)?;
898 | let client_config = client_config(cfg.access_token)?;
899 | let basin_client = BasinClient::new(client_config, basin);
900 | BasinService::new(basin_client)
901 | .delete_stream(stream)
902 | .await?;
903 | eprintln!("{}", "✓ Stream deletion requested".green().bold());
904 | }
905 | Commands::GetStreamConfig { uri } => {
906 | let S2BasinAndStreamUri { basin, stream } = uri.uri;
907 | let cfg = config::load_config(&config_path)?;
908 | let client_config = client_config(cfg.access_token)?;
909 | let basin_client = BasinClient::new(client_config, basin);
910 | let config: StreamConfig = BasinService::new(basin_client)
911 | .get_stream_config(stream)
912 | .await?
913 | .into();
914 | println!("{}", json_to_table(&serde_json::to_value(&config)?));
915 | }
916 | Commands::ReconfigureStream { uri, config } => {
917 | let S2BasinAndStreamUri { basin, stream } = uri.uri;
918 | let cfg = config::load_config(&config_path)?;
919 | let client_config = client_config(cfg.access_token)?;
920 | let basin_client = BasinClient::new(client_config, basin);
921 | let mut mask = Vec::new();
922 |
923 | if config.storage_class.is_some() {
924 | mask.push("storage_class".to_string());
925 | }
926 |
927 | if config.retention_policy.is_some() {
928 | mask.push("retention_policy".to_string());
929 | }
930 |
931 | let config: StreamConfig = BasinService::new(basin_client)
932 | .reconfigure_stream(stream, config.into(), mask)
933 | .await?
934 | .into();
935 |
936 | eprintln!("{}", "✓ Stream reconfigured".green().bold());
937 | println!("{}", json_to_table(&serde_json::to_value(&config)?));
938 | }
939 | Commands::CheckTail { uri } => {
940 | let S2BasinAndStreamUri { basin, stream } = uri.uri;
941 | let cfg = config::load_config(&config_path)?;
942 | let client_config = client_config(cfg.access_token)?;
943 | let stream_client = StreamClient::new(client_config, basin, stream);
944 | let tail = StreamService::new(stream_client).check_tail().await?;
945 | println!("{}\t{}", tail.seq_num, tail.timestamp);
946 | }
947 | Commands::Trim {
948 | uri,
949 | trim_point,
950 | fencing_token,
951 | match_seq_num,
952 | } => {
953 | let S2BasinAndStreamUri { basin, stream } = uri.uri;
954 | let cfg = config::load_config(&config_path)?;
955 | let client_config = client_config(cfg.access_token)?;
956 | let stream_client = StreamClient::new(client_config, basin, stream);
957 | let out = StreamService::new(stream_client)
958 | .append_command_record(
959 | CommandRecord::trim(trim_point),
960 | fencing_token,
961 | match_seq_num,
962 | )
963 | .await?;
964 | eprintln!(
965 | "{}",
966 | format!(
967 | "✓ Trim request for trim point {trim_point} appended at: {:?}",
968 | out.start
969 | )
970 | .green()
971 | .bold()
972 | );
973 | }
974 | Commands::Fence {
975 | uri,
976 | new_fencing_token,
977 | fencing_token,
978 | match_seq_num,
979 | } => {
980 | let S2BasinAndStreamUri { basin, stream } = uri.uri;
981 | let cfg = config::load_config(&config_path)?;
982 | let client_config = client_config(cfg.access_token)?;
983 | let stream_client = StreamClient::new(client_config, basin, stream);
984 | let out = StreamService::new(stream_client)
985 | .append_command_record(
986 | CommandRecord::fence(new_fencing_token),
987 | fencing_token,
988 | match_seq_num,
989 | )
990 | .await?;
991 | eprintln!(
992 | "{}",
993 | format!("✓ Fencing token appended at seq_num: {:?}", out.start)
994 | .green()
995 | .bold()
996 | );
997 | }
998 | Commands::Append {
999 | uri,
1000 | input,
1001 | fencing_token,
1002 | match_seq_num,
1003 | format,
1004 | linger,
1005 | } => {
1006 | let S2BasinAndStreamUri { basin, stream } = uri.uri;
1007 | let cfg = config::load_config(&config_path)?;
1008 | let client_config = client_config(cfg.access_token)?;
1009 | let stream_client = StreamClient::new(client_config, basin, stream);
1010 |
1011 | let records_in = input
1012 | .into_reader()
1013 | .await
1014 | .map_err(|e| S2CliError::RecordReaderInit(e.to_string()))?;
1015 |
1016 | let append_input_stream: Box + Send + Unpin> =
1017 | match format {
1018 | Format::BodyRaw => {
1019 | Box::new(RecordStream::<_, RawBodyFormatter>::new(records_in))
1020 | }
1021 | Format::JsonRaw => {
1022 | Box::new(RecordStream::<_, RawJsonFormatter>::new(records_in))
1023 | }
1024 | Format::JsonBase64 => {
1025 | Box::new(RecordStream::<_, Base64JsonFormatter>::new(records_in))
1026 | }
1027 | };
1028 |
1029 | let mut append_output_stream = StreamService::new(stream_client)
1030 | .append_session(
1031 | append_input_stream,
1032 | AppendRecordsBatchingOpts::new()
1033 | .with_fencing_token(fencing_token)
1034 | .with_match_seq_num(match_seq_num)
1035 | .with_linger(linger),
1036 | )
1037 | .await?;
1038 |
1039 | loop {
1040 | select! {
1041 | maybe_append_result = append_output_stream.next() => {
1042 | match maybe_append_result {
1043 | Some(append_result) => {
1044 | match append_result {
1045 | Ok(append_result) => {
1046 | eprintln!(
1047 | "{}",
1048 | format!(
1049 | "✓ [APPENDED] {}..{} // tail: {} @ {}",
1050 | append_result.start.seq_num,
1051 | append_result.end.seq_num,
1052 | append_result.tail.seq_num,
1053 | append_result.tail.timestamp
1054 | )
1055 | .green()
1056 | .bold()
1057 | );
1058 | },
1059 | Err(e) => {
1060 | return Err(ServiceError::new(ServiceErrorContext::AppendSession, e).into());
1061 | }
1062 | }
1063 | }
1064 | None => break,
1065 | }
1066 | }
1067 |
1068 | _ = signal::ctrl_c() => {
1069 | drop(append_output_stream);
1070 | eprintln!("{}", "■ [ABORTED]".red().bold());
1071 | break;
1072 | }
1073 | }
1074 | }
1075 | }
1076 | Commands::Tail {
1077 | uri,
1078 | lines,
1079 | follow,
1080 | output,
1081 | format,
1082 | } => {
1083 | let read_start = ReadStart::TailOffset(lines);
1084 | let read_limit = if follow {
1085 | ReadLimit::default()
1086 | } else {
1087 | ReadLimit::new().with_count(lines)
1088 | };
1089 | let S2BasinAndStreamUri { basin, stream } = uri.uri;
1090 | let cfg = config::load_config(&config_path)?;
1091 | let client_config = client_config(cfg.access_token)?;
1092 | let stream_client = StreamClient::new(client_config, basin, stream);
1093 | let mut read_outputs = StreamService::new(stream_client)
1094 | .read_session(read_start, read_limit)
1095 | .await?;
1096 | let mut writer = output.into_writer().await.unwrap();
1097 | while handle_read_outputs(&mut read_outputs, &mut writer, format).await? {}
1098 | }
1099 | Commands::Read {
1100 | uri,
1101 | seq_num,
1102 | timestamp,
1103 | tail_offset,
1104 | ago,
1105 | output,
1106 | count,
1107 | bytes,
1108 | format,
1109 | } => {
1110 | let S2BasinAndStreamUri { basin, stream } = uri.uri;
1111 | let cfg = config::load_config(&config_path)?;
1112 | let client_config = client_config(cfg.access_token)?;
1113 | let stream_client = StreamClient::new(client_config, basin, stream);
1114 | let read_start = match (seq_num, timestamp, tail_offset, ago) {
1115 | (Some(seq_num), None, None, None) => ReadStart::SeqNum(seq_num),
1116 | (None, Some(timestamp), None, None) => ReadStart::Timestamp(timestamp),
1117 | (None, None, Some(offset), None) => ReadStart::TailOffset(offset),
1118 | (None, None, None, Some(ago)) => {
1119 | let timestamp = SystemTime::now()
1120 | .duration_since(SystemTime::UNIX_EPOCH)
1121 | .unwrap()
1122 | .as_millis()
1123 | .saturating_sub(ago.as_millis()) as u64;
1124 | ReadStart::Timestamp(timestamp)
1125 | }
1126 | (None, None, None, None) => s2::types::ReadStart::TailOffset(0),
1127 | _ => unreachable!("clap ensures only one start option is provided"),
1128 | };
1129 | let read_limit = ReadLimit { count, bytes };
1130 | let mut read_outputs = StreamService::new(stream_client)
1131 | .read_session(read_start, read_limit)
1132 | .await?;
1133 | let mut writer = output.into_writer().await.unwrap();
1134 | while handle_read_outputs(&mut read_outputs, &mut writer, format).await? {}
1135 | }
1136 | Commands::Ping {
1137 | uri,
1138 | interval,
1139 | batch_bytes,
1140 | num_batches,
1141 | } => {
1142 | let S2BasinAndStreamUri { basin, stream } = uri.uri;
1143 | let cfg = config::load_config(&config_path)?;
1144 | let client_config = client_config(cfg.access_token)?;
1145 | let stream_client = StreamService::new(StreamClient::new(client_config, basin, stream));
1146 |
1147 | let interval = Duration::from(interval).max(Duration::from_millis(100));
1148 | let batch_bytes = batch_bytes.min(128 * 1024);
1149 |
1150 | let prepare_loader = ProgressBar::new_spinner()
1151 | .with_prefix("Preparing...")
1152 | .with_style(
1153 | ProgressStyle::default_spinner()
1154 | .template("{spinner} {prefix}")
1155 | .expect("valid template"),
1156 | );
1157 | prepare_loader.enable_steady_tick(Duration::from_millis(50));
1158 |
1159 | let mut pinger = Pinger::init(&stream_client).await?;
1160 |
1161 | prepare_loader.finish_and_clear();
1162 |
1163 | let mut pings = Vec::new();
1164 |
1165 | let stat_bars = MultiProgress::new();
1166 |
1167 | let bytes_bar = ProgressBar::no_length().with_prefix("bytes").with_style(
1168 | ProgressStyle::default_bar()
1169 | .template("{pos:.bold} {prefix:.bold}")
1170 | .expect("valid template"),
1171 | );
1172 |
1173 | let mut max_ack = 500;
1174 | let ack_bar = ProgressBar::new(max_ack).with_prefix("ack").with_style(
1175 | ProgressStyle::default_bar()
1176 | .template("{prefix:.bold} [{bar:40.blue/blue}] {pos:>4}/{len:<4} ms")
1177 | .expect("valid template"),
1178 | );
1179 |
1180 | let mut max_e2e = 500;
1181 | let e2e_bar = ProgressBar::new(max_e2e).with_prefix("e2e").with_style(
1182 | ProgressStyle::default_bar()
1183 | .template("{prefix:.bold} [{bar:40.red/red}] {pos:>4}/{len:<4} ms")
1184 | .expect("valid template"),
1185 | );
1186 |
1187 | // HACK: This bar basically has no purpose. It's just to clear all
1188 | // other bars since the very first bar in the set doesn't clear when
1189 | // `^C` signal is received.
1190 | let empty_line_bar = {
1191 | let bar = stat_bars.add(
1192 | ProgressBar::no_length().with_style(
1193 | ProgressStyle::default_bar()
1194 | .template("\n")
1195 | .expect("valid template"),
1196 | ),
1197 | );
1198 | // Force render the bar.
1199 | bar.inc(1);
1200 | bar
1201 | };
1202 | let bytes_bar = stat_bars.add(bytes_bar);
1203 | let ack_bar = stat_bars.add(ack_bar);
1204 | let e2e_bar = stat_bars.add(e2e_bar);
1205 |
1206 | async fn ping_next(
1207 | pinger: &mut Pinger,
1208 | pings: &mut Vec,
1209 | interval: Duration,
1210 | batch_bytes: u64,
1211 | bytes_bar: &ProgressBar,
1212 | ack_meter: (&ProgressBar, /* max_ack */ &mut u64),
1213 | e2e_meter: (&ProgressBar, /* max_e2e */ &mut u64),
1214 | ) -> Result<(), S2CliError> {
1215 | let jitter_op = if rand::random() {
1216 | u64::saturating_add
1217 | } else {
1218 | u64::saturating_sub
1219 | };
1220 |
1221 | let max_jitter = batch_bytes / 4;
1222 |
1223 | let record_bytes = jitter_op(batch_bytes, rand::rng().random_range(0..=max_jitter));
1224 |
1225 | let Some(res) = pinger.ping(record_bytes).await? else {
1226 | return Ok(());
1227 | };
1228 |
1229 | bytes_bar.set_position(record_bytes);
1230 |
1231 | let (ack_bar, max_ack) = ack_meter;
1232 |
1233 | let ack = res.ack.as_millis() as u64;
1234 | *max_ack = std::cmp::max(*max_ack, ack);
1235 | ack_bar.set_length(*max_ack);
1236 | ack_bar.set_position(ack);
1237 |
1238 | let (e2e_bar, max_e2e) = e2e_meter;
1239 |
1240 | let e2e = res.e2e.as_millis() as u64;
1241 | *max_e2e = std::cmp::max(*max_e2e, e2e);
1242 | e2e_bar.set_length(*max_e2e);
1243 | e2e_bar.set_position(e2e);
1244 |
1245 | pings.push(res);
1246 |
1247 | tokio::time::sleep(interval).await;
1248 | Ok(())
1249 | }
1250 |
1251 | while Some(pings.len()) != num_batches {
1252 | select! {
1253 | res = ping_next(
1254 | &mut pinger,
1255 | &mut pings,
1256 | interval,
1257 | batch_bytes,
1258 | &bytes_bar,
1259 | (&ack_bar, &mut max_ack),
1260 | (&e2e_bar, &mut max_e2e),
1261 | ) => res?,
1262 | _ = signal::ctrl_c() => break,
1263 | }
1264 | }
1265 |
1266 | // Close the pinger.
1267 | std::mem::drop(pinger);
1268 |
1269 | bytes_bar.finish_and_clear();
1270 | ack_bar.finish_and_clear();
1271 | e2e_bar.finish_and_clear();
1272 | empty_line_bar.finish_and_clear();
1273 |
1274 | let total_batches = pings.len();
1275 | let (bytes, (acks, e2es)): (Vec<_>, (Vec<_>, Vec<_>)) = pings
1276 | .into_iter()
1277 | .map(|PingResult { bytes, ack, e2e }| (bytes, (ack, e2e)))
1278 | .unzip();
1279 | let total_bytes = bytes.into_iter().sum::();
1280 |
1281 | eprintln!("Round-tripped {total_bytes} bytes in {total_batches} batches");
1282 |
1283 | pub fn print_stats(stats: LatencyStats, name: &str) {
1284 | eprintln!("{}", format!("{name} Latency Statistics ").yellow().bold());
1285 |
1286 | fn stat_duration(key: &str, val: Duration, scale: f64) {
1287 | let bar = "⠸".repeat((val.as_millis() as f64 * scale).round() as usize);
1288 | eprintln!(
1289 | "{:7}: {:>7} │ {}",
1290 | key,
1291 | format!("{} ms", val.as_millis()).green().bold(),
1292 | bar
1293 | );
1294 | }
1295 |
1296 | let stats = stats.into_vec();
1297 | let max_val = stats
1298 | .iter()
1299 | .map(|(_, val)| val)
1300 | .max()
1301 | .unwrap_or(&Duration::ZERO);
1302 |
1303 | let max_bar_len = 50;
1304 | let scale = if max_val.as_millis() > max_bar_len {
1305 | max_bar_len as f64 / max_val.as_millis() as f64
1306 | } else {
1307 | 1.0
1308 | };
1309 |
1310 | for (name, val) in stats {
1311 | stat_duration(&name, val, scale);
1312 | }
1313 | }
1314 |
1315 | eprintln!(/* Empty line */);
1316 | print_stats(LatencyStats::generate(acks), "Append Acknowledgement");
1317 | eprintln!(/* Empty line */);
1318 | print_stats(LatencyStats::generate(e2es), "End-to-End");
1319 | }
1320 | Commands::IssueAccessToken {
1321 | id,
1322 | expires_at,
1323 | auto_prefix_streams,
1324 | basins,
1325 | streams,
1326 | access_tokens,
1327 | op_groups,
1328 | ops,
1329 | } => {
1330 | let cfg = config::load_config(&config_path)?;
1331 | let client_config = client_config(cfg.access_token)?;
1332 | let account_service = AccountService::new(Client::new(client_config));
1333 | let access_token = account_service
1334 | .issue_access_token(
1335 | id,
1336 | expires_at,
1337 | auto_prefix_streams,
1338 | basins.map(Into::into),
1339 | streams.map(Into::into),
1340 | access_tokens.map(Into::into),
1341 | op_groups.map(Into::into),
1342 | ops.into_iter().map(Into::into).collect(),
1343 | )
1344 | .await?;
1345 | println!("{access_token}");
1346 | }
1347 | Commands::RevokeAccessToken { id } => {
1348 | let cfg = config::load_config(&config_path)?;
1349 | let client_config = client_config(cfg.access_token)?;
1350 | let account_service = AccountService::new(Client::new(client_config));
1351 | let info = account_service.revoke_access_token(id).await?;
1352 | let info: AccessTokenInfo = info.into();
1353 | eprintln!("{}", "✓ Access token revoked".green().bold());
1354 | println!("{}", json_to_table(&serde_json::to_value(&info)?));
1355 | }
1356 | Commands::ListAccessTokens {
1357 | prefix,
1358 | start_after,
1359 | limit,
1360 | no_auto_paginate,
1361 | } => {
1362 | let cfg = config::load_config(&config_path)?;
1363 | let client_config = client_config(cfg.access_token)?;
1364 | list_tokens(client_config, prefix, start_after, limit, no_auto_paginate).await?;
1365 | }
1366 | }
1367 |
1368 | Ok(())
1369 | }
1370 |
1371 | async fn handle_read_outputs(
1372 | read_outputs: &mut Streaming,
1373 | writer: &mut Box,
1374 | format: Format,
1375 | ) -> Result {
1376 | select! {
1377 | maybe_read_result = read_outputs.next() => {
1378 | match maybe_read_result {
1379 | Some(read_result) => {
1380 | match read_result {
1381 | Ok(ReadOutput::Batch(sequenced_record_batch)) => {
1382 | let num_records = sequenced_record_batch.records.len();
1383 | let mut batch_len = 0;
1384 |
1385 | let seq_range = match (
1386 | sequenced_record_batch.records.first(),
1387 | sequenced_record_batch.records.last(),
1388 | ) {
1389 | (Some(first), Some(last)) => first.seq_num..=last.seq_num,
1390 | _ => panic!("empty batch"),
1391 | };
1392 | for sequenced_record in sequenced_record_batch.records {
1393 | batch_len += sequenced_record.metered_bytes();
1394 | match format {
1395 | Format::BodyRaw => {
1396 | if let Some(command_record) = sequenced_record.as_command_record() {
1397 | let (cmd, description) = match command_record.command {
1398 | Command::Fence { fencing_token } => (
1399 | "fence",
1400 | format!("FencingToken({fencing_token})")
1401 | ),
1402 | Command::Trim { seq_num } => (
1403 | "trim",
1404 | format!("TrimPoint({seq_num})"),
1405 | ),
1406 | };
1407 | eprintln!("{} with {} // {} @ {}",
1408 | cmd.bold(),
1409 | description.green().bold(),
1410 | sequenced_record.seq_num,
1411 | sequenced_record.timestamp
1412 | );
1413 | Ok(())
1414 | } else {
1415 | RawBodyFormatter::write_record(
1416 | &sequenced_record,
1417 | writer,
1418 | ).await
1419 | }
1420 | },
1421 | Format::JsonRaw => {
1422 | RawJsonFormatter::write_record(
1423 | &sequenced_record,
1424 | writer,
1425 | ).await
1426 | },
1427 | Format::JsonBase64 => {
1428 | Base64JsonFormatter::write_record(
1429 | &sequenced_record,
1430 | writer,
1431 | ).await
1432 | },
1433 | }
1434 | .map_err(|e| S2CliError::RecordWrite(e.to_string()))?;
1435 | writer
1436 | .write_all(b"\n")
1437 | .await
1438 | .map_err(|e| S2CliError::RecordWrite(e.to_string()))?;
1439 | }
1440 |
1441 | eprintln!(
1442 | "{}",
1443 | format!("⦿ {batch_len} bytes ({num_records} records in range {seq_range:?})")
1444 | .blue()
1445 | .bold()
1446 | );
1447 | }
1448 |
1449 | Ok(ReadOutput::NextSeqNum(seq_num)) => {
1450 | eprintln!("{}", format!("next_seq_num: {seq_num}").blue().bold());
1451 | return Ok(false);
1452 | }
1453 |
1454 | Err(e) => {
1455 | return Err(ServiceError::new(ServiceErrorContext::ReadSession, e).into());
1456 | }
1457 | }
1458 | }
1459 | None => return Ok(false),
1460 | }
1461 | },
1462 | _ = signal::ctrl_c() => {
1463 | eprintln!("{}", "■ [ABORTED]".red().bold());
1464 | return Ok(false);
1465 | }
1466 | }
1467 | writer
1468 | .flush()
1469 | .await
1470 | .map_err(|e| S2CliError::RecordWrite(e.to_string()))?;
1471 | Ok(true)
1472 | }
1473 |
--------------------------------------------------------------------------------
/src/ping.rs:
--------------------------------------------------------------------------------
1 | use std::time::Duration;
2 |
3 | use rand::Rng;
4 | use s2::{
5 | batching::AppendRecordsBatchingOpts,
6 | types::{
7 | AppendAck, AppendRecord, ReadLimit, ReadOutput, ReadStart, SequencedRecord,
8 | SequencedRecordBatch,
9 | },
10 | };
11 | use tokio::{join, select, signal, sync::mpsc, task::JoinHandle, time::Instant};
12 | use tokio_stream::StreamExt;
13 |
14 | use crate::{
15 | error::{S2CliError, ServiceError, ServiceErrorContext},
16 | stream::StreamService,
17 | };
18 |
19 | pub struct PingResult {
20 | pub bytes: u64,
21 | pub ack: Duration,
22 | pub e2e: Duration,
23 | }
24 |
25 | pub struct Pinger {
26 | records_tx: mpsc::UnboundedSender,
27 | appends_handle: JoinHandle<()>,
28 | reads_handle: JoinHandle<()>,
29 | appends_rx: mpsc::UnboundedReceiver>,
30 | reads_rx: mpsc::UnboundedReceiver>,
31 | }
32 |
33 | impl Pinger {
34 | pub async fn init(stream_client: &StreamService) -> Result {
35 | let tail = stream_client.check_tail().await?;
36 |
37 | let mut read_stream = stream_client
38 | .read_session(ReadStart::SeqNum(tail.seq_num), ReadLimit::default())
39 | .await?;
40 |
41 | let (records_tx, records_rx) = mpsc::unbounded_channel();
42 | let mut append_stream = stream_client
43 | .append_session(
44 | tokio_stream::wrappers::UnboundedReceiverStream::new(records_rx),
45 | AppendRecordsBatchingOpts::new()
46 | .with_max_batch_records(1)
47 | .with_match_seq_num(Some(tail.seq_num)),
48 | )
49 | .await?;
50 |
51 | let warmup_record = AppendRecord::new("warmup").expect("valid record");
52 | records_tx
53 | .send(warmup_record.clone())
54 | .expect("stream channel open");
55 |
56 | match append_stream.next().await.expect("warmup batch ack") {
57 | Ok(AppendAck { start, .. }) if start.seq_num == tail.seq_num => (),
58 | Ok(_) => return Err(S2CliError::PingStreamMutated),
59 | Err(e) => return Err(ServiceError::new(ServiceErrorContext::AppendSession, e).into()),
60 | };
61 |
62 | match read_stream.next().await.expect("warmup batch e2e") {
63 | Ok(ReadOutput::Batch(SequencedRecordBatch { records }))
64 | if records.len() == 1
65 | && records[0].headers.is_empty()
66 | && records[0].body.as_ref() == warmup_record.body() => {}
67 | Ok(_) => return Err(S2CliError::PingStreamMutated),
68 | Err(e) => return Err(ServiceError::new(ServiceErrorContext::ReadSession, e).into()),
69 | };
70 |
71 | let (reads_tx, reads_rx) = mpsc::unbounded_channel();
72 | let reads_handle = tokio::spawn(async move {
73 | loop {
74 | select! {
75 | next = read_stream.next() => match next {
76 | Some(Err(e)) => {
77 | reads_tx.send(Err(
78 | ServiceError::new(ServiceErrorContext::ReadSession, e).into()
79 | )).expect("open reads channel");
80 | return;
81 | }
82 | Some(Ok(output)) => {
83 | if let ReadOutput::Batch(SequencedRecordBatch { mut records }) = output {
84 | let read = Instant::now();
85 | if records.len() != 1 {
86 | reads_tx.send(Err(
87 | S2CliError::PingStreamMutated
88 | )).expect("reads channel open");
89 | return;
90 | }
91 | let record = records.pop().expect("pre validated length");
92 | reads_tx.send(Ok((read, record))).expect("reads channel open");
93 | } else {
94 | reads_tx.send(Err(
95 | S2CliError::PingStreamMutated
96 | )).expect("reads channel open");
97 | return;
98 | }
99 | }
100 | None => break,
101 | },
102 | _ = signal::ctrl_c() => break,
103 | };
104 | }
105 | });
106 |
107 | let (appends_tx, appends_rx) = mpsc::unbounded_channel();
108 | let appends_handle = tokio::spawn(async move {
109 | while let Some(next) = append_stream.next().await {
110 | match next {
111 | Ok(AppendAck { start, end, .. }) => {
112 | let append = Instant::now();
113 | let records = end.seq_num - start.seq_num;
114 | if records != 1 {
115 | appends_tx
116 | .send(Err(S2CliError::PingStreamMutated))
117 | .expect("appends channel open");
118 | return;
119 | }
120 | appends_tx.send(Ok(append)).expect("appends channel open");
121 | }
122 | Err(e) => {
123 | appends_tx
124 | .send(Err(S2CliError::from(ServiceError::new(
125 | ServiceErrorContext::AppendSession,
126 | e,
127 | ))))
128 | .expect("appends channel open");
129 | }
130 | }
131 | }
132 | });
133 |
134 | Ok(Self {
135 | records_tx,
136 | appends_handle,
137 | reads_handle,
138 | appends_rx,
139 | reads_rx,
140 | })
141 | }
142 |
143 | pub async fn ping(&mut self, bytes: u64) -> Result, S2CliError> {
144 | let body = rand::rng()
145 | .sample_iter(
146 | rand::distr::Uniform::new_inclusive(0, u8::MAX).expect("valid distribution"),
147 | )
148 | .take(bytes as usize)
149 | .collect::>();
150 |
151 | let record = AppendRecord::new(body.clone()).expect("pre validated append record bytes");
152 |
153 | self.records_tx.send(record).expect("stream channel open");
154 |
155 | let send = Instant::now();
156 |
157 | let (append, read, record) = match join!(self.appends_rx.recv(), self.reads_rx.recv()) {
158 | (None, _) | (_, None) => return Ok(None),
159 | (Some(Err(e)), _) | (_, Some(Err(e))) => return Err(e),
160 | (Some(Ok(append)), Some(Ok((read, record)))) => (append, read, record),
161 | };
162 |
163 | // Validate the received record
164 | if body != record.body || !record.headers.is_empty() {
165 | return Err(S2CliError::PingStreamMutated);
166 | }
167 |
168 | Ok(Some(PingResult {
169 | bytes,
170 | ack: append - send,
171 | e2e: read - send,
172 | }))
173 | }
174 | }
175 |
176 | impl Drop for Pinger {
177 | fn drop(&mut self) {
178 | self.appends_handle.abort();
179 | self.reads_handle.abort();
180 | }
181 | }
182 |
183 | pub struct LatencyStats {
184 | pub min: Duration,
185 | pub median: Duration,
186 | pub p90: Duration,
187 | pub p99: Duration,
188 | pub max: Duration,
189 | }
190 |
191 | impl LatencyStats {
192 | pub fn generate(mut data: Vec) -> Self {
193 | data.sort_unstable();
194 |
195 | let n = data.len();
196 |
197 | if n == 0 {
198 | return Self {
199 | min: Duration::ZERO,
200 | median: Duration::ZERO,
201 | p90: Duration::ZERO,
202 | p99: Duration::ZERO,
203 | max: Duration::ZERO,
204 | };
205 | }
206 |
207 | let median = if n % 2 == 0 {
208 | (data[n / 2 - 1] + data[n / 2]) / 2
209 | } else {
210 | data[n / 2]
211 | };
212 |
213 | let p_idx = |p: f64| ((n as f64) * p).ceil() as usize - 1;
214 |
215 | Self {
216 | min: data[0],
217 | median,
218 | p90: data[p_idx(0.90)],
219 | p99: data[p_idx(0.99)],
220 | max: data[n - 1],
221 | }
222 | }
223 |
224 | pub fn into_vec(self) -> Vec<(String, Duration)> {
225 | vec![
226 | ("min".to_owned(), self.min),
227 | ("median".to_owned(), self.median),
228 | ("p90".to_owned(), self.p90),
229 | ("p99".to_owned(), self.p99),
230 | ("max".to_owned(), self.max),
231 | ]
232 | }
233 | }
234 |
--------------------------------------------------------------------------------
/src/stream.rs:
--------------------------------------------------------------------------------
1 | use colored::Colorize;
2 | use s2::{
3 | Streaming,
4 | batching::{AppendRecordsBatchingOpts, AppendRecordsBatchingStream},
5 | client::StreamClient,
6 | types::{
7 | AppendAck, AppendInput, AppendRecordBatch, Command, CommandRecord, FencingToken, ReadLimit,
8 | ReadOutput, ReadSessionRequest, ReadStart, StreamPosition,
9 | },
10 | };
11 |
12 | use futures::{Stream, StreamExt};
13 | use s2::types::AppendRecord;
14 | use std::pin::Pin;
15 | use std::task::{Context, Poll};
16 |
17 | use crate::{
18 | error::{ServiceError, ServiceErrorContext},
19 | formats::RecordParser,
20 | };
21 |
22 | #[derive(Debug)]
23 | pub struct RecordStream(P::RecordStream)
24 | where
25 | S: Stream- > + Send + Unpin,
26 | P: RecordParser
;
27 |
28 | impl RecordStream
29 | where
30 | S: Stream- > + Send + Unpin,
31 | P: RecordParser
,
32 | {
33 | pub fn new(s: S) -> Self {
34 | Self(P::parse_records(s))
35 | }
36 | }
37 |
38 | impl Stream for RecordStream
39 | where
40 | S: Stream- > + Send + Unpin,
41 | P: RecordParser
,
42 | {
43 | type Item = AppendRecord;
44 |
45 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> {
46 | match self.0.poll_next_unpin(cx) {
47 | Poll::Ready(Some(Ok(record))) => Poll::Ready(Some(record)),
48 | Poll::Ready(Some(Err(e))) => {
49 | eprintln!("{}", e.to_string().red());
50 | Poll::Ready(None)
51 | }
52 | Poll::Ready(None) => Poll::Ready(None),
53 | Poll::Pending => Poll::Pending,
54 | }
55 | }
56 | }
57 |
58 | pub struct StreamService {
59 | client: StreamClient,
60 | }
61 |
62 | impl StreamService {
63 | pub fn new(client: StreamClient) -> Self {
64 | Self { client }
65 | }
66 |
67 | pub async fn check_tail(&self) -> Result {
68 | self.client
69 | .check_tail()
70 | .await
71 | .map_err(|e| ServiceError::new(ServiceErrorContext::CheckTail, e))
72 | }
73 |
74 | pub async fn append_command_record(
75 | &self,
76 | cmd: CommandRecord,
77 | fencing_token: Option,
78 | match_seq_num: Option,
79 | ) -> Result {
80 | let context = match &cmd.command {
81 | Command::Fence { .. } => ServiceErrorContext::Fence,
82 | Command::Trim { .. } => ServiceErrorContext::Trim,
83 | };
84 | let records = AppendRecordBatch::try_from_iter([cmd]).expect("single valid append record");
85 | let append_input = AppendInput {
86 | records,
87 | fencing_token,
88 | match_seq_num,
89 | };
90 | self.client
91 | .append(append_input)
92 | .await
93 | .map_err(|e| ServiceError::new(context, e))
94 | }
95 |
96 | pub async fn append_session(
97 | &self,
98 | stream: impl 'static + Send + Stream- + Unpin,
99 | opts: AppendRecordsBatchingOpts,
100 | ) -> Result
, ServiceError> {
101 | let append_record_stream = AppendRecordsBatchingStream::new(stream, opts);
102 |
103 | self.client
104 | .append_session(append_record_stream)
105 | .await
106 | .map_err(|e| ServiceError::new(ServiceErrorContext::AppendSession, e))
107 | }
108 |
109 | pub async fn read_session(
110 | &self,
111 | start: ReadStart,
112 | limit: ReadLimit,
113 | ) -> Result, ServiceError> {
114 | self.client
115 | .read_session(ReadSessionRequest { start, limit })
116 | .await
117 | .map_err(|e| ServiceError::new(ServiceErrorContext::ReadSession, e))
118 | }
119 | }
120 |
--------------------------------------------------------------------------------
/src/types.rs:
--------------------------------------------------------------------------------
1 | //! Types for Basin configuration that directly map to s2::types.
2 |
3 | use clap::{Parser, ValueEnum};
4 | use s2::types::BasinName;
5 | use serde::Serialize;
6 | use std::{str::FromStr, time::Duration};
7 | use thiserror::Error;
8 |
9 | use crate::error::S2UriParseError;
10 |
11 | #[derive(Debug, Clone)]
12 | struct S2Uri {
13 | basin: BasinName,
14 | stream: Option,
15 | }
16 |
17 | #[cfg(test)]
18 | impl PartialEq for S2Uri {
19 | fn eq(&self, other: &Self) -> bool {
20 | *self.basin == *other.basin && self.stream == other.stream
21 | }
22 | }
23 |
24 | impl FromStr for S2Uri {
25 | type Err = S2UriParseError;
26 |
27 | fn from_str(s: &str) -> Result {
28 | let (scheme, s) = s
29 | .split_once("://")
30 | .ok_or(S2UriParseError::MissingUriScheme)?;
31 | if scheme != "s2" {
32 | return Err(S2UriParseError::InvalidUriScheme(scheme.to_owned()));
33 | }
34 |
35 | let (basin, stream) = if let Some((basin, stream)) = s.split_once("/") {
36 | let stream = if stream.is_empty() {
37 | None
38 | } else {
39 | Some(stream.to_owned())
40 | };
41 | (basin, stream)
42 | } else {
43 | (s, None)
44 | };
45 |
46 | Ok(S2Uri {
47 | basin: basin.parse().map_err(S2UriParseError::InvalidBasinName)?,
48 | stream,
49 | })
50 | }
51 | }
52 |
53 | #[derive(Debug, Clone)]
54 | pub struct S2BasinUri(pub BasinName);
55 |
56 | impl From for BasinName {
57 | fn from(value: S2BasinUri) -> Self {
58 | value.0
59 | }
60 | }
61 |
62 | #[cfg(test)]
63 | impl PartialEq for S2BasinUri {
64 | fn eq(&self, other: &Self) -> bool {
65 | *self.0 == *other.0
66 | }
67 | }
68 |
69 | impl FromStr for S2BasinUri {
70 | type Err = S2UriParseError;
71 |
72 | fn from_str(s: &str) -> Result {
73 | match S2Uri::from_str(s) {
74 | Ok(S2Uri {
75 | basin,
76 | stream: None,
77 | }) => Ok(Self(
78 | basin.parse().map_err(S2UriParseError::InvalidBasinName)?,
79 | )),
80 | Ok(S2Uri {
81 | basin: _,
82 | stream: Some(_),
83 | }) => Err(S2UriParseError::UnexpectedStreamName),
84 | Err(S2UriParseError::MissingUriScheme) => {
85 | Ok(Self(s.parse().map_err(S2UriParseError::InvalidBasinName)?))
86 | }
87 | Err(other) => Err(other),
88 | }
89 | }
90 | }
91 |
92 | #[derive(Debug, Clone)]
93 | pub struct S2BasinAndMaybeStreamUri {
94 | pub basin: BasinName,
95 | pub stream: Option,
96 | }
97 |
98 | #[cfg(test)]
99 | impl PartialEq for S2BasinAndMaybeStreamUri {
100 | fn eq(&self, other: &Self) -> bool {
101 | *self.basin == *other.basin && self.stream == other.stream
102 | }
103 | }
104 |
105 | impl FromStr for S2BasinAndMaybeStreamUri {
106 | type Err = S2UriParseError;
107 |
108 | fn from_str(s: &str) -> Result {
109 | match S2Uri::from_str(s) {
110 | Ok(S2Uri { basin, stream }) => Ok(Self { basin, stream }),
111 | Err(S2UriParseError::MissingUriScheme) => Ok(Self {
112 | basin: s.parse().map_err(S2UriParseError::InvalidBasinName)?,
113 | stream: None,
114 | }),
115 | Err(other) => Err(other),
116 | }
117 | }
118 | }
119 |
120 | /// String Format: s2://{basin}/{stream}
121 | #[derive(Debug, Clone)]
122 | pub struct S2BasinAndStreamUri {
123 | pub basin: BasinName,
124 | pub stream: String,
125 | }
126 |
127 | #[cfg(test)]
128 | impl PartialEq for S2BasinAndStreamUri {
129 | fn eq(&self, other: &Self) -> bool {
130 | *self.basin == *other.basin && self.stream == other.stream
131 | }
132 | }
133 |
134 | impl FromStr for S2BasinAndStreamUri {
135 | type Err = S2UriParseError;
136 |
137 | fn from_str(s: &str) -> Result {
138 | let S2Uri { basin, stream } = s.parse()?;
139 | let stream = stream.ok_or(S2UriParseError::MissingStreamName)?;
140 | Ok(Self { basin, stream })
141 | }
142 | }
143 |
144 | #[derive(Parser, Debug, Clone, Serialize)]
145 | pub struct BasinConfig {
146 | #[clap(flatten)]
147 | pub default_stream_config: Option,
148 | /// Create stream on append with basin defaults if it doesn't exist.
149 | #[arg(long)]
150 | pub create_stream_on_append: Option,
151 | /// Create stream on read with basin defaults if it doesn't exist.
152 | #[arg(long)]
153 | pub create_stream_on_read: Option,
154 | }
155 |
156 | #[derive(Parser, Debug, Clone, Serialize)]
157 | pub struct StreamConfig {
158 | #[arg(long)]
159 | /// Storage class for a stream.
160 | pub storage_class: Option,
161 | #[arg(long, help("Example: 1d, 1w, 1y"))]
162 | /// Retention policy for a stream.
163 | pub retention_policy: Option,
164 | #[clap(flatten)]
165 | /// Timestamping configuration.
166 | pub timestamping: Option,
167 | }
168 |
169 | #[derive(ValueEnum, Debug, Clone, Serialize)]
170 | #[serde(rename_all = "kebab-case")]
171 | pub enum StorageClass {
172 | Standard,
173 | Express,
174 | }
175 |
176 | #[derive(ValueEnum, Debug, Clone, Serialize)]
177 | #[serde(rename_all = "kebab-case")]
178 | pub enum TimestampingMode {
179 | ClientPrefer,
180 | ClientRequire,
181 | Arrival,
182 | }
183 |
184 | #[derive(Parser, Debug, Clone, Serialize)]
185 | pub struct TimestampingConfig {
186 | #[arg(long)]
187 | /// Timestamping mode.
188 | pub timestamping_mode: Option,
189 |
190 | #[arg(long)]
191 | /// Uncapped timestamps.
192 | pub timestamping_uncapped: Option,
193 | }
194 |
195 | #[derive(Clone, Debug, Serialize)]
196 | pub enum RetentionPolicy {
197 | #[allow(dead_code)]
198 | Age(Duration),
199 | }
200 |
201 | impl From<&str> for RetentionPolicy {
202 | fn from(s: &str) -> Self {
203 | match humantime::parse_duration(s) {
204 | Ok(d) => RetentionPolicy::Age(d),
205 | Err(_) => RetentionPolicy::Age(Duration::from_secs(0)),
206 | }
207 | }
208 | }
209 |
210 | impl From for s2::types::BasinConfig {
211 | fn from(config: BasinConfig) -> Self {
212 | let BasinConfig {
213 | default_stream_config,
214 | create_stream_on_append,
215 | create_stream_on_read,
216 | } = config;
217 | s2::types::BasinConfig {
218 | default_stream_config: default_stream_config.map(Into::into),
219 | create_stream_on_append: create_stream_on_append.unwrap_or_default(),
220 | create_stream_on_read: create_stream_on_read.unwrap_or_default(),
221 | }
222 | }
223 | }
224 |
225 | impl From for s2::types::StreamConfig {
226 | fn from(config: StreamConfig) -> Self {
227 | let storage_class = config.storage_class.map(s2::types::StorageClass::from);
228 |
229 | let retention_policy = config
230 | .retention_policy
231 | .map(s2::types::RetentionPolicy::from);
232 |
233 | let timestamping_config = config.timestamping.map(s2::types::TimestampingConfig::from);
234 |
235 | let mut stream_config = s2::types::StreamConfig::new();
236 | if let Some(storage_class) = storage_class {
237 | stream_config = stream_config.with_storage_class(storage_class);
238 | }
239 | if let Some(retention_policy) = retention_policy {
240 | stream_config = stream_config.with_retention_policy(retention_policy);
241 | }
242 | if let Some(timestamping) = timestamping_config {
243 | stream_config = stream_config.with_timestamping(timestamping);
244 | }
245 | stream_config
246 | }
247 | }
248 |
249 | impl From for s2::types::StorageClass {
250 | fn from(class: StorageClass) -> Self {
251 | match class {
252 | StorageClass::Standard => s2::types::StorageClass::Standard,
253 | StorageClass::Express => s2::types::StorageClass::Express,
254 | }
255 | }
256 | }
257 |
258 | impl From for StorageClass {
259 | fn from(class: s2::types::StorageClass) -> Self {
260 | match class {
261 | s2::types::StorageClass::Standard => StorageClass::Standard,
262 | s2::types::StorageClass::Express => StorageClass::Express,
263 | }
264 | }
265 | }
266 |
267 | impl From for s2::types::TimestampingMode {
268 | fn from(mode: TimestampingMode) -> Self {
269 | match mode {
270 | TimestampingMode::ClientPrefer => s2::types::TimestampingMode::ClientPrefer,
271 | TimestampingMode::ClientRequire => s2::types::TimestampingMode::ClientRequire,
272 | TimestampingMode::Arrival => s2::types::TimestampingMode::Arrival,
273 | }
274 | }
275 | }
276 |
277 | impl From for TimestampingMode {
278 | fn from(mode: s2::types::TimestampingMode) -> Self {
279 | match mode {
280 | s2::types::TimestampingMode::ClientPrefer => TimestampingMode::ClientPrefer,
281 | s2::types::TimestampingMode::ClientRequire => TimestampingMode::ClientRequire,
282 | s2::types::TimestampingMode::Arrival => TimestampingMode::Arrival,
283 | }
284 | }
285 | }
286 |
287 | impl From for s2::types::TimestampingConfig {
288 | fn from(config: TimestampingConfig) -> Self {
289 | s2::types::TimestampingConfig {
290 | mode: config.timestamping_mode.map(Into::into),
291 | uncapped: config.timestamping_uncapped,
292 | }
293 | }
294 | }
295 |
296 | impl From for TimestampingConfig {
297 | fn from(config: s2::types::TimestampingConfig) -> Self {
298 | TimestampingConfig {
299 | timestamping_mode: config.mode.map(Into::into),
300 | timestamping_uncapped: config.uncapped,
301 | }
302 | }
303 | }
304 |
305 | impl From for s2::types::RetentionPolicy {
306 | fn from(policy: RetentionPolicy) -> Self {
307 | match policy {
308 | RetentionPolicy::Age(d) => s2::types::RetentionPolicy::Age(d),
309 | }
310 | }
311 | }
312 |
313 | impl From for RetentionPolicy {
314 | fn from(policy: s2::types::RetentionPolicy) -> Self {
315 | match policy {
316 | s2::types::RetentionPolicy::Age(d) => RetentionPolicy::Age(d),
317 | }
318 | }
319 | }
320 |
321 | impl From for BasinConfig {
322 | fn from(config: s2::types::BasinConfig) -> Self {
323 | BasinConfig {
324 | default_stream_config: config.default_stream_config.map(Into::into),
325 | create_stream_on_append: Some(config.create_stream_on_append),
326 | create_stream_on_read: Some(config.create_stream_on_read),
327 | }
328 | }
329 | }
330 |
331 | impl From for StreamConfig {
332 | fn from(config: s2::types::StreamConfig) -> Self {
333 | StreamConfig {
334 | storage_class: config.storage_class.map(Into::into),
335 | retention_policy: config.retention_policy.map(Into::into),
336 | timestamping: config.timestamping.map(Into::into),
337 | }
338 | }
339 | }
340 |
341 | #[derive(Debug, Clone, Serialize, PartialEq, Eq)]
342 | pub enum ResourceSet {
343 | Exact(String),
344 | Prefix(String),
345 | }
346 |
347 | impl From> for s2::types::ResourceSet {
348 | fn from(value: ResourceSet) -> Self {
349 | match value {
350 | ResourceSet::Exact(s) => s2::types::ResourceSet::Exact(s),
351 | ResourceSet::Prefix(s) => s2::types::ResourceSet::Prefix(s),
352 | }
353 | }
354 | }
355 |
356 | impl From for ResourceSet {
357 | fn from(value: s2::types::ResourceSet) -> Self {
358 | match value {
359 | s2::types::ResourceSet::Exact(s) => ResourceSet::Exact(s),
360 | s2::types::ResourceSet::Prefix(s) => ResourceSet::Prefix(s),
361 | }
362 | }
363 | }
364 |
365 | #[derive(Debug, Error, Clone, PartialEq, Eq)]
366 | pub enum ResourceSetParseError {
367 | #[error("Exact value '{value}' length {length} must be between {min} and {max}")]
368 | ExactValueLengthInvalid {
369 | value: String,
370 | length: usize,
371 | min: usize,
372 | max: usize,
373 | },
374 |
375 | #[error("Prefix '{value}' length {length} exceeds maximum {max}")]
376 | PrefixTooLong {
377 | value: String,
378 | length: usize,
379 | max: usize,
380 | },
381 | }
382 |
383 | impl FromStr for ResourceSet {
384 | type Err = ResourceSetParseError;
385 |
386 | fn from_str(s: &str) -> Result {
387 | if s.is_empty() {
388 | return Ok(ResourceSet::Prefix(String::new()));
389 | }
390 |
391 | if let Some(value) = s.strip_prefix('=') {
392 | if value.is_empty() {
393 | return Ok(ResourceSet::Exact(String::new()));
394 | }
395 | let len = value.len();
396 | if len > MAX || len < MIN {
397 | return Err(ResourceSetParseError::ExactValueLengthInvalid {
398 | value: value.to_owned(),
399 | length: len,
400 | min: MIN,
401 | max: MAX,
402 | });
403 | }
404 | Ok(ResourceSet::Exact(value.to_owned()))
405 | } else {
406 | let len = s.len();
407 | if len > MAX {
408 | return Err(ResourceSetParseError::PrefixTooLong {
409 | value: s.to_owned(),
410 | length: len,
411 | max: MAX,
412 | });
413 | }
414 | Ok(ResourceSet::Prefix(s.to_owned()))
415 | }
416 | }
417 | }
418 |
419 | #[derive(Debug, Clone, Serialize, PartialEq)]
420 | pub struct PermittedOperationGroups {
421 | pub account: Option,
422 | pub basin: Option,
423 | pub stream: Option,
424 | }
425 |
426 | impl From for s2::types::PermittedOperationGroups {
427 | fn from(groups: PermittedOperationGroups) -> Self {
428 | s2::types::PermittedOperationGroups {
429 | account: groups.account.map(Into::into),
430 | basin: groups.basin.map(Into::into),
431 | stream: groups.stream.map(Into::into),
432 | }
433 | }
434 | }
435 |
436 | impl From for PermittedOperationGroups {
437 | fn from(groups: s2::types::PermittedOperationGroups) -> Self {
438 | PermittedOperationGroups {
439 | account: groups.account.map(Into::into),
440 | basin: groups.basin.map(Into::into),
441 | stream: groups.stream.map(Into::into),
442 | }
443 | }
444 | }
445 |
446 | impl FromStr for PermittedOperationGroups {
447 | type Err = OpGroupsParseError;
448 |
449 | fn from_str(s: &str) -> Result {
450 | let mut account = None;
451 | let mut basin = None;
452 | let mut stream = None;
453 |
454 | if s.is_empty() {
455 | return Ok(PermittedOperationGroups {
456 | account,
457 | basin,
458 | stream,
459 | });
460 | }
461 |
462 | for part in s.split(',') {
463 | let part = part.trim();
464 | if part.is_empty() {
465 | continue;
466 | }
467 | let (key, value) =
468 | part.split_once('=')
469 | .ok_or_else(|| OpGroupsParseError::InvalidFormat {
470 | value: part.to_owned(),
471 | })?;
472 | let perms = value.parse::()?;
473 | match key {
474 | "account" => account = Some(perms),
475 | "basin" => basin = Some(perms),
476 | "stream" => stream = Some(perms),
477 | _ => {
478 | return Err(OpGroupsParseError::InvalidKey {
479 | key: key.to_owned(),
480 | });
481 | }
482 | }
483 | }
484 |
485 | Ok(PermittedOperationGroups {
486 | account,
487 | basin,
488 | stream,
489 | })
490 | }
491 | }
492 |
493 | #[derive(Debug, Clone, Serialize, PartialEq)]
494 | pub struct ReadWritePermissions {
495 | pub read: bool,
496 | pub write: bool,
497 | }
498 |
499 | impl FromStr for ReadWritePermissions {
500 | type Err = OpGroupsParseError;
501 |
502 | fn from_str(s: &str) -> Result {
503 | let mut read = false;
504 | let mut write = false;
505 | for c in s.chars() {
506 | match c {
507 | 'r' => read = true,
508 | 'w' => write = true,
509 | _ => return Err(OpGroupsParseError::InvalidPermissionChar(c)),
510 | }
511 | }
512 | if !read && !write {
513 | return Err(OpGroupsParseError::MissingPermission);
514 | }
515 | Ok(ReadWritePermissions { read, write })
516 | }
517 | }
518 |
519 | impl From for s2::types::ReadWritePermissions {
520 | fn from(permissions: ReadWritePermissions) -> Self {
521 | s2::types::ReadWritePermissions {
522 | read: permissions.read,
523 | write: permissions.write,
524 | }
525 | }
526 | }
527 |
528 | impl From for ReadWritePermissions {
529 | fn from(permissions: s2::types::ReadWritePermissions) -> Self {
530 | ReadWritePermissions {
531 | read: permissions.read,
532 | write: permissions.write,
533 | }
534 | }
535 | }
536 |
537 | #[derive(Debug, Error, Clone, PartialEq, Eq)]
538 | pub enum OpGroupsParseError {
539 | #[error("Invalid op_group format: '{value}'. Expected 'key=value'")]
540 | InvalidFormat { value: String },
541 |
542 | #[error("Invalid op_group key: '{key}'. Expected 'account', 'basin', or 'stream'")]
543 | InvalidKey { key: String },
544 |
545 | #[error("At least one permission ('r' or 'w') must be specified")]
546 | MissingPermission,
547 |
548 | #[error("Invalid permission character: {0}")]
549 | InvalidPermissionChar(char),
550 | }
551 |
552 | #[derive(Debug, Serialize)]
553 | pub struct AccessTokenInfo {
554 | pub id: String,
555 | pub expires_at: Option,
556 | pub auto_prefix_streams: bool,
557 | pub scope: Option,
558 | }
559 |
560 | impl From for AccessTokenInfo {
561 | fn from(info: s2::types::AccessTokenInfo) -> Self {
562 | AccessTokenInfo {
563 | id: info.id.to_string(),
564 | expires_at: info.expires_at,
565 | auto_prefix_streams: info.auto_prefix_streams,
566 | scope: info.scope.map(Into::into),
567 | }
568 | }
569 | }
570 |
571 | #[derive(Debug, Serialize)]
572 | pub struct AccessTokenScope {
573 | pub basins: Option>,
574 | pub streams: Option>,
575 | pub access_tokens: Option>,
576 | pub op_groups: Option,
577 | pub ops: Vec,
578 | }
579 |
580 | impl From for AccessTokenScope {
581 | fn from(scope: s2::types::AccessTokenScope) -> Self {
582 | AccessTokenScope {
583 | basins: scope.basins.map(Into::into),
584 | streams: scope.streams.map(Into::into),
585 | access_tokens: scope.access_tokens.map(Into::into),
586 | op_groups: scope.op_groups.map(Into::into),
587 | ops: scope.ops.into_iter().map(Operation::from).collect(),
588 | }
589 | }
590 | }
591 |
592 | #[derive(Debug, Clone, Serialize)]
593 | pub enum Operation {
594 | ListBasins,
595 | CreateBasin,
596 | DeleteBasin,
597 | ReconfigureBasin,
598 | GetBasinConfig,
599 | IssueAccessToken,
600 | RevokeAccessToken,
601 | ListAccessTokens,
602 | ListStreams,
603 | CreateStream,
604 | DeleteStream,
605 | GetStreamConfig,
606 | ReconfigureStream,
607 | CheckTail,
608 | Append,
609 | Read,
610 | Trim,
611 | Fence,
612 | }
613 |
614 | impl From for s2::types::Operation {
615 | fn from(op: Operation) -> Self {
616 | match op {
617 | Operation::ListBasins => s2::types::Operation::ListBasins,
618 | Operation::CreateBasin => s2::types::Operation::CreateBasin,
619 | Operation::DeleteBasin => s2::types::Operation::DeleteBasin,
620 | Operation::ReconfigureBasin => s2::types::Operation::ReconfigureBasin,
621 | Operation::GetBasinConfig => s2::types::Operation::GetBasinConfig,
622 | Operation::IssueAccessToken => s2::types::Operation::IssueAccessToken,
623 | Operation::RevokeAccessToken => s2::types::Operation::RevokeAccessToken,
624 | Operation::ListAccessTokens => s2::types::Operation::ListAccessTokens,
625 | Operation::ListStreams => s2::types::Operation::ListStreams,
626 | Operation::CreateStream => s2::types::Operation::CreateStream,
627 | Operation::DeleteStream => s2::types::Operation::DeleteStream,
628 | Operation::GetStreamConfig => s2::types::Operation::GetStreamConfig,
629 | Operation::ReconfigureStream => s2::types::Operation::ReconfigureStream,
630 | Operation::CheckTail => s2::types::Operation::CheckTail,
631 | Operation::Append => s2::types::Operation::Append,
632 | Operation::Read => s2::types::Operation::Read,
633 | Operation::Trim => s2::types::Operation::Trim,
634 | Operation::Fence => s2::types::Operation::Fence,
635 | }
636 | }
637 | }
638 |
639 | impl From for Operation {
640 | fn from(op: s2::types::Operation) -> Self {
641 | match op {
642 | s2::types::Operation::ListBasins => Operation::ListBasins,
643 | s2::types::Operation::CreateBasin => Operation::CreateBasin,
644 | s2::types::Operation::DeleteBasin => Operation::DeleteBasin,
645 | s2::types::Operation::ReconfigureBasin => Operation::ReconfigureBasin,
646 | s2::types::Operation::GetBasinConfig => Operation::GetBasinConfig,
647 | s2::types::Operation::IssueAccessToken => Operation::IssueAccessToken,
648 | s2::types::Operation::RevokeAccessToken => Operation::RevokeAccessToken,
649 | s2::types::Operation::ListAccessTokens => Operation::ListAccessTokens,
650 | s2::types::Operation::ListStreams => Operation::ListStreams,
651 | s2::types::Operation::CreateStream => Operation::CreateStream,
652 | s2::types::Operation::DeleteStream => Operation::DeleteStream,
653 | s2::types::Operation::GetStreamConfig => Operation::GetStreamConfig,
654 | s2::types::Operation::ReconfigureStream => Operation::ReconfigureStream,
655 | s2::types::Operation::CheckTail => Operation::CheckTail,
656 | s2::types::Operation::Append => Operation::Append,
657 | s2::types::Operation::Read => Operation::Read,
658 | s2::types::Operation::Trim => Operation::Trim,
659 | s2::types::Operation::Fence => Operation::Fence,
660 | }
661 | }
662 | }
663 |
664 | #[derive(Debug, Error, Clone, PartialEq, Eq)]
665 | pub enum OperationParseError {
666 | #[error("Invalid operation: '{0}'")]
667 | InvalidOperation(String),
668 | }
669 |
670 | impl FromStr for Operation {
671 | type Err = OperationParseError;
672 |
673 | fn from_str(s: &str) -> Result {
674 | match s.to_lowercase().as_str() {
675 | "list-basins" => Ok(Self::ListBasins),
676 | "create-basin" => Ok(Self::CreateBasin),
677 | "delete-basin" => Ok(Self::DeleteBasin),
678 | "reconfigure-basin" => Ok(Self::ReconfigureBasin),
679 | "get-basin-config" => Ok(Self::GetBasinConfig),
680 | "issue-access-token" => Ok(Self::IssueAccessToken),
681 | "revoke-access-token" => Ok(Self::RevokeAccessToken),
682 | "list-access-tokens" => Ok(Self::ListAccessTokens),
683 | "list-streams" => Ok(Self::ListStreams),
684 | "create-stream" => Ok(Self::CreateStream),
685 | "delete-stream" => Ok(Self::DeleteStream),
686 | "get-stream-config" => Ok(Self::GetStreamConfig),
687 | "reconfigure-stream" => Ok(Self::ReconfigureStream),
688 | "check-tail" => Ok(Self::CheckTail),
689 | "append" => Ok(Self::Append),
690 | "read" => Ok(Self::Read),
691 | "trim" => Ok(Self::Trim),
692 | "fence" => Ok(Self::Fence),
693 | _ => Err(OperationParseError::InvalidOperation(s.to_owned())),
694 | }
695 | }
696 | }
697 |
698 | #[cfg(test)]
699 | mod tests {
700 | use crate::{error::S2UriParseError, types::S2BasinAndStreamUri};
701 | use rstest::rstest;
702 |
703 | use super::{
704 | OpGroupsParseError, PermittedOperationGroups, ReadWritePermissions, ResourceSet,
705 | ResourceSetParseError, S2BasinAndMaybeStreamUri, S2BasinUri, S2Uri,
706 | };
707 |
708 | #[rstest]
709 | #[case("", Ok(PermittedOperationGroups {
710 | account: None,
711 | basin: None,
712 | stream: None,
713 | }))]
714 | #[case("account=r", Ok(PermittedOperationGroups {
715 | account: Some(ReadWritePermissions {
716 | read: true,
717 | write: false,
718 | }),
719 | basin: None,
720 | stream: None,
721 | }))]
722 | #[case("account=w", Ok(PermittedOperationGroups {
723 | account: Some(ReadWritePermissions {
724 | read: false,
725 | write: true,
726 | }),
727 | basin: None,
728 | stream: None,
729 | }))]
730 | #[case("account=rw", Ok(PermittedOperationGroups {
731 | account: Some(ReadWritePermissions {
732 | read: true,
733 | write: true,
734 | }),
735 | basin: None,
736 | stream: None,
737 | }))]
738 | #[case("basin=r,stream=w", Ok(PermittedOperationGroups {
739 | account: None,
740 | basin: Some(ReadWritePermissions {
741 | read: true,
742 | write: false,
743 | }),
744 | stream: Some(ReadWritePermissions {
745 | read: false,
746 | write: true,
747 | }),
748 | }))]
749 | #[case("account=rw,basin=rw,stream=rw", Ok(PermittedOperationGroups {
750 | account: Some(ReadWritePermissions {
751 | read: true,
752 | write: true,
753 | }),
754 | basin: Some(ReadWritePermissions {
755 | read: true,
756 | write: true,
757 | }),
758 | stream: Some(ReadWritePermissions {
759 | read: true,
760 | write: true,
761 | }),
762 | }))]
763 | #[case("invalid", Err(OpGroupsParseError::InvalidFormat { value: "invalid".to_owned() }))]
764 | #[case("unknown=rw", Err(OpGroupsParseError::InvalidKey { key: "unknown".to_owned() }))]
765 | #[case("account=", Err(OpGroupsParseError::MissingPermission))]
766 | #[case("account=x", Err(OpGroupsParseError::InvalidPermissionChar('x')))]
767 | fn test_parse_op_groups(
768 | #[case] input: &str,
769 | #[case] expected: Result,
770 | ) {
771 | assert_eq!(
772 | input.parse::(),
773 | expected,
774 | "Testing input: {}",
775 | input
776 | );
777 | }
778 |
779 | #[rstest]
780 | // Valid empty string case
781 | #[case("", Ok(ResourceSet::<8, 48>::Prefix(String::new())))]
782 | // Valid exact values
783 | #[case("=exact-value", Ok(ResourceSet::<8, 48>::Exact("exact-value".to_string())))]
784 | #[case("=mybasintestingvalue", Ok(ResourceSet::<8, 48>::Exact("mybasintestingvalue".to_string())))]
785 | // Valid prefix values
786 | #[case("prefix", Ok(ResourceSet::<8, 48>::Prefix("prefix".to_string())))]
787 | #[case("my-prefix", Ok(ResourceSet::<8, 48>::Prefix("my-prefix".to_string())))]
788 | // Error cases for exact values - too short or too long
789 | #[case("=short", Err(ResourceSetParseError::ExactValueLengthInvalid {
790 | value: "short".to_owned(), length: 5, min: 8, max: 48
791 | }))]
792 | #[case("=waytoolongvaluethatshouldexceedthemaximumlengthallowed",
793 | Err(ResourceSetParseError::ExactValueLengthInvalid {
794 | value: "waytoolongvaluethatshouldexceedthemaximumlengthallowed".to_owned(),
795 | length: 54, min: 8, max: 48
796 | }))]
797 | // Error case for prefix - too long
798 | #[case("waytoolongvaluethatshouldexceedthemaximumlengthallowed",
799 | Err(ResourceSetParseError::PrefixTooLong {
800 | value: "waytoolongvaluethatshouldexceedthemaximumlengthallowed".to_owned(),
801 | length: 54, max: 48
802 | }))]
803 | fn test_resource_set_parsing(
804 | #[case] input: &str,
805 | #[case] expected: Result, ResourceSetParseError>,
806 | ) {
807 | assert_eq!(
808 | input.parse::>(),
809 | expected,
810 | "Testing input: {}",
811 | input
812 | );
813 | }
814 |
815 | #[test]
816 | fn test_s2_uri_parse() {
817 | let test_cases = vec![
818 | (
819 | "valid-basin",
820 | Err(S2UriParseError::MissingUriScheme),
821 | Ok(S2BasinUri("valid-basin".parse().unwrap())),
822 | Err(S2UriParseError::MissingUriScheme),
823 | Ok(S2BasinAndMaybeStreamUri {
824 | basin: "valid-basin".parse().unwrap(),
825 | stream: None,
826 | }),
827 | ),
828 | (
829 | "s2://valid-basin",
830 | Ok(S2Uri {
831 | basin: "valid-basin".parse().unwrap(),
832 | stream: None,
833 | }),
834 | Ok(S2BasinUri("valid-basin".parse().unwrap())),
835 | Err(S2UriParseError::MissingStreamName),
836 | Ok(S2BasinAndMaybeStreamUri {
837 | basin: "valid-basin".parse().unwrap(),
838 | stream: None,
839 | }),
840 | ),
841 | (
842 | "s2://valid-basin/",
843 | Ok(S2Uri {
844 | basin: "valid-basin".parse().unwrap(),
845 | stream: None,
846 | }),
847 | Ok(S2BasinUri("valid-basin".parse().unwrap())),
848 | Err(S2UriParseError::MissingStreamName),
849 | Ok(S2BasinAndMaybeStreamUri {
850 | basin: "valid-basin".parse().unwrap(),
851 | stream: None,
852 | }),
853 | ),
854 | (
855 | "s2://valid-basin/stream/name",
856 | Ok(S2Uri {
857 | basin: "valid-basin".parse().unwrap(),
858 | stream: Some("stream/name".to_owned()),
859 | }),
860 | Err(S2UriParseError::UnexpectedStreamName),
861 | Ok(S2BasinAndStreamUri {
862 | basin: "valid-basin".parse().unwrap(),
863 | stream: "stream/name".to_owned(),
864 | }),
865 | Ok(S2BasinAndMaybeStreamUri {
866 | basin: "valid-basin".parse().unwrap(),
867 | stream: Some("stream/name".to_owned()),
868 | }),
869 | ),
870 | (
871 | "-invalid-basin",
872 | Err(S2UriParseError::MissingUriScheme),
873 | Err(S2UriParseError::InvalidBasinName("".into())),
874 | Err(S2UriParseError::MissingUriScheme),
875 | Err(S2UriParseError::InvalidBasinName("".into())),
876 | ),
877 | (
878 | "http://valid-basin",
879 | Err(S2UriParseError::InvalidUriScheme("http".to_owned())),
880 | Err(S2UriParseError::InvalidUriScheme("http".to_owned())),
881 | Err(S2UriParseError::InvalidUriScheme("http".to_owned())),
882 | Err(S2UriParseError::InvalidUriScheme("http".to_owned())),
883 | ),
884 | (
885 | "s2://-invalid-basin",
886 | Err(S2UriParseError::InvalidBasinName("".into())),
887 | Err(S2UriParseError::InvalidBasinName("".into())),
888 | Err(S2UriParseError::InvalidBasinName("".into())),
889 | Err(S2UriParseError::InvalidBasinName("".into())),
890 | ),
891 | (
892 | "s2:///stream/name",
893 | Err(S2UriParseError::InvalidBasinName("".into())),
894 | Err(S2UriParseError::InvalidBasinName("".into())),
895 | Err(S2UriParseError::InvalidBasinName("".into())),
896 | Err(S2UriParseError::InvalidBasinName("".into())),
897 | ),
898 | (
899 | "random:::string",
900 | Err(S2UriParseError::MissingUriScheme),
901 | Err(S2UriParseError::InvalidBasinName("".into())),
902 | Err(S2UriParseError::MissingUriScheme),
903 | Err(S2UriParseError::InvalidBasinName("".into())),
904 | ),
905 | ];
906 |
907 | for (
908 | s,
909 | expected_uri,
910 | expected_basin_uri,
911 | expected_basin_and_stream_uri,
912 | expected_basin_and_maybe_stream_uri,
913 | ) in test_cases
914 | {
915 | assert_eq!(s.parse(), expected_uri, "S2Uri: {s}");
916 | assert_eq!(s.parse(), expected_basin_uri, "S2BasinUri: {s}");
917 | assert_eq!(
918 | s.parse(),
919 | expected_basin_and_stream_uri,
920 | "S2BasinAndStreamUri: {s}"
921 | );
922 | assert_eq!(
923 | s.parse(),
924 | expected_basin_and_maybe_stream_uri,
925 | "S2BasinAndMaybeStreamUri: {s}"
926 | );
927 | }
928 | }
929 | }
930 |
--------------------------------------------------------------------------------