├── .github
└── workflows
│ └── release.yml
├── .gitignore
├── LICENSE
├── Makefile
├── build.zig
├── build.zig.zon
├── readme.md
├── src
├── app.zig
├── config.zig
├── dproxy.zig
├── env.zig
├── init.zig
├── main.zig
├── parameter.zig
├── t.zig
├── version.txt
└── web
│ ├── sql
│ ├── _sql.zig
│ └── exec.zig
│ └── web.zig
└── test_runner.zig
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 |
3 | permissions:
4 | contents: write
5 |
6 | env:
7 | DUCKDB_VERSION: 1.0.0
8 |
9 | on:
10 | push:
11 | tags:
12 | - "v*.*.*"
13 |
14 | jobs:
15 | release:
16 | runs-on: ubuntu-latest
17 | steps:
18 | - name: checkout
19 | uses: actions/checkout@v4
20 |
21 | - name: install zig
22 | run: |
23 | sudo snap install zig --classic --edge
24 |
25 | - name: pre-build
26 | run: |
27 | echo "commit: $(git rev-parse HEAD | tr -d '\n')" > src/version.txt
28 | echo "zig: $(zig version)" >> src/version.txt
29 |
30 | - name: x86_64-linux-gnu
31 | run: |
32 | wget -q "https://github.com/duckdb/duckdb/releases/download/v${DUCKDB_VERSION}/libduckdb-linux-amd64.zip"
33 | unzip -n *.zip && rm *.zip
34 |
35 | mkdir -p release/duckdb-proxy-x86_64-linux-gnu/
36 | zig build -Dtarget=x86_64-linux-gnu -Dcpu=x86_64_v2 -Doptimize=ReleaseFast
37 | mv libduckdb.so zig-out/bin/duckdb-proxy release/duckdb-proxy-x86_64-linux-gnu/
38 |
39 | cd release
40 | tar -cJf duckdb-proxy-x86_64-linux-gnu.tar.xz duckdb-proxy-x86_64-linux-gnu
41 |
42 | - name: aarch64-macos
43 | run: |
44 | wget -q "https://github.com/duckdb/duckdb/releases/download/v${DUCKDB_VERSION}/libduckdb-osx-universal.zip"
45 | unzip -n *.zip && rm *.zip
46 |
47 | mkdir -p release/duckdb-proxy-aarch64-macos/
48 | zig build -Dtarget=aarch64-macos -Doptimize=ReleaseFast
49 | cp libduckdb.dylib zig-out/bin/duckdb-proxy release/duckdb-proxy-aarch64-macos/
50 |
51 | cd release
52 | tar -cJf duckdb-proxy-aarch64-macos.tar.xz duckdb-proxy-aarch64-macos
53 |
54 | - name: x86_64-macos
55 | run: |
56 | mkdir -p release/duckdb-proxy-x86_64-macos/
57 | zig build -Dtarget=x86_64-macos -Doptimize=ReleaseFast
58 | mv libduckdb.dylib zig-out/bin/duckdb-proxy release/duckdb-proxy-x86_64-macos/
59 |
60 | cd release
61 | tar -cJf duckdb-proxy-x86_64-macos.tar.xz duckdb-proxy-x86_64-macos
62 |
63 | - name: x86_64-windows
64 | run: |
65 | wget -q "https://github.com/duckdb/duckdb/releases/download/v${DUCKDB_VERSION}/libduckdb-windows-amd64.zip"
66 | unzip -n *.zip && rm *.zip
67 |
68 | mkdir -p release/duckdb-proxy-x86_64-windows-gnu/
69 | zig build -Dtarget=x86_64-windows -Doptimize=ReleaseFast
70 | mv duckdb.dll zig-out/bin/duckdb-proxy.exe release/duckdb-proxy-x86_64-windows-gnu/
71 |
72 | cd release
73 | zip -r duckdb-proxy-x86_64-windows-gnu.zip duckdb-proxy-x86_64-windows-gnu
74 |
75 | - name: release
76 | uses: softprops/action-gh-release@v1
77 | with:
78 | files: |
79 | /home/runner/work/duckdb-proxy/duckdb-proxy/release/duckdb-proxy-x86_64-linux-gnu.tar.xz
80 | /home/runner/work/duckdb-proxy/duckdb-proxy/release/duckdb-proxy-aarch64-macos.tar.xz
81 | /home/runner/work/duckdb-proxy/duckdb-proxy/release/duckdb-proxy-x86_64-macos.tar.xz
82 | /home/runner/work/duckdb-proxy/duckdb-proxy/release/duckdb-proxy-x86_64-windows-gnu.zip
83 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | zig-out/
3 | .zig-cache/
4 | db.duckdb*
5 | tests/db.duckdb*
6 | duckdb.h
7 | libduckdb.dylib
8 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Mozilla Public License Version 2.0
2 | ==================================
3 |
4 | 1. Definitions
5 | --------------
6 |
7 | 1.1. "Contributor"
8 | means each individual or legal entity that creates, contributes to
9 | the creation of, or owns Covered Software.
10 |
11 | 1.2. "Contributor Version"
12 | means the combination of the Contributions of others (if any) used
13 | by a Contributor and that particular Contributor's Contribution.
14 |
15 | 1.3. "Contribution"
16 | means Covered Software of a particular Contributor.
17 |
18 | 1.4. "Covered Software"
19 | means Source Code Form to which the initial Contributor has attached
20 | the notice in Exhibit A, the Executable Form of such Source Code
21 | Form, and Modifications of such Source Code Form, in each case
22 | including portions thereof.
23 |
24 | 1.5. "Incompatible With Secondary Licenses"
25 | means
26 |
27 | (a) that the initial Contributor has attached the notice described
28 | in Exhibit B to the Covered Software; or
29 |
30 | (b) that the Covered Software was made available under the terms of
31 | version 1.1 or earlier of the License, but not also under the
32 | terms of a Secondary License.
33 |
34 | 1.6. "Executable Form"
35 | means any form of the work other than Source Code Form.
36 |
37 | 1.7. "Larger Work"
38 | means a work that combines Covered Software with other material, in
39 | a separate file or files, that is not Covered Software.
40 |
41 | 1.8. "License"
42 | means this document.
43 |
44 | 1.9. "Licensable"
45 | means having the right to grant, to the maximum extent possible,
46 | whether at the time of the initial grant or subsequently, any and
47 | all of the rights conveyed by this License.
48 |
49 | 1.10. "Modifications"
50 | means any of the following:
51 |
52 | (a) any file in Source Code Form that results from an addition to,
53 | deletion from, or modification of the contents of Covered
54 | Software; or
55 |
56 | (b) any new file in Source Code Form that contains any Covered
57 | Software.
58 |
59 | 1.11. "Patent Claims" of a Contributor
60 | means any patent claim(s), including without limitation, method,
61 | process, and apparatus claims, in any patent Licensable by such
62 | Contributor that would be infringed, but for the grant of the
63 | License, by the making, using, selling, offering for sale, having
64 | made, import, or transfer of either its Contributions or its
65 | Contributor Version.
66 |
67 | 1.12. "Secondary License"
68 | means either the GNU General Public License, Version 2.0, the GNU
69 | Lesser General Public License, Version 2.1, the GNU Affero General
70 | Public License, Version 3.0, or any later versions of those
71 | licenses.
72 |
73 | 1.13. "Source Code Form"
74 | means the form of the work preferred for making modifications.
75 |
76 | 1.14. "You" (or "Your")
77 | means an individual or a legal entity exercising rights under this
78 | License. For legal entities, "You" includes any entity that
79 | controls, is controlled by, or is under common control with You. For
80 | purposes of this definition, "control" means (a) the power, direct
81 | or indirect, to cause the direction or management of such entity,
82 | whether by contract or otherwise, or (b) ownership of more than
83 | fifty percent (50%) of the outstanding shares or beneficial
84 | ownership of such entity.
85 |
86 | 2. License Grants and Conditions
87 | --------------------------------
88 |
89 | 2.1. Grants
90 |
91 | Each Contributor hereby grants You a world-wide, royalty-free,
92 | non-exclusive license:
93 |
94 | (a) under intellectual property rights (other than patent or trademark)
95 | Licensable by such Contributor to use, reproduce, make available,
96 | modify, display, perform, distribute, and otherwise exploit its
97 | Contributions, either on an unmodified basis, with Modifications, or
98 | as part of a Larger Work; and
99 |
100 | (b) under Patent Claims of such Contributor to make, use, sell, offer
101 | for sale, have made, import, and otherwise transfer either its
102 | Contributions or its Contributor Version.
103 |
104 | 2.2. Effective Date
105 |
106 | The licenses granted in Section 2.1 with respect to any Contribution
107 | become effective for each Contribution on the date the Contributor first
108 | distributes such Contribution.
109 |
110 | 2.3. Limitations on Grant Scope
111 |
112 | The licenses granted in this Section 2 are the only rights granted under
113 | this License. No additional rights or licenses will be implied from the
114 | distribution or licensing of Covered Software under this License.
115 | Notwithstanding Section 2.1(b) above, no patent license is granted by a
116 | Contributor:
117 |
118 | (a) for any code that a Contributor has removed from Covered Software;
119 | or
120 |
121 | (b) for infringements caused by: (i) Your and any other third party's
122 | modifications of Covered Software, or (ii) the combination of its
123 | Contributions with other software (except as part of its Contributor
124 | Version); or
125 |
126 | (c) under Patent Claims infringed by Covered Software in the absence of
127 | its Contributions.
128 |
129 | This License does not grant any rights in the trademarks, service marks,
130 | or logos of any Contributor (except as may be necessary to comply with
131 | the notice requirements in Section 3.4).
132 |
133 | 2.4. Subsequent Licenses
134 |
135 | No Contributor makes additional grants as a result of Your choice to
136 | distribute the Covered Software under a subsequent version of this
137 | License (see Section 10.2) or under the terms of a Secondary License (if
138 | permitted under the terms of Section 3.3).
139 |
140 | 2.5. Representation
141 |
142 | Each Contributor represents that the Contributor believes its
143 | Contributions are its original creation(s) or it has sufficient rights
144 | to grant the rights to its Contributions conveyed by this License.
145 |
146 | 2.6. Fair Use
147 |
148 | This License is not intended to limit any rights You have under
149 | applicable copyright doctrines of fair use, fair dealing, or other
150 | equivalents.
151 |
152 | 2.7. Conditions
153 |
154 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
155 | in Section 2.1.
156 |
157 | 3. Responsibilities
158 | -------------------
159 |
160 | 3.1. Distribution of Source Form
161 |
162 | All distribution of Covered Software in Source Code Form, including any
163 | Modifications that You create or to which You contribute, must be under
164 | the terms of this License. You must inform recipients that the Source
165 | Code Form of the Covered Software is governed by the terms of this
166 | License, and how they can obtain a copy of this License. You may not
167 | attempt to alter or restrict the recipients' rights in the Source Code
168 | Form.
169 |
170 | 3.2. Distribution of Executable Form
171 |
172 | If You distribute Covered Software in Executable Form then:
173 |
174 | (a) such Covered Software must also be made available in Source Code
175 | Form, as described in Section 3.1, and You must inform recipients of
176 | the Executable Form how they can obtain a copy of such Source Code
177 | Form by reasonable means in a timely manner, at a charge no more
178 | than the cost of distribution to the recipient; and
179 |
180 | (b) You may distribute such Executable Form under the terms of this
181 | License, or sublicense it under different terms, provided that the
182 | license for the Executable Form does not attempt to limit or alter
183 | the recipients' rights in the Source Code Form under this License.
184 |
185 | 3.3. Distribution of a Larger Work
186 |
187 | You may create and distribute a Larger Work under terms of Your choice,
188 | provided that You also comply with the requirements of this License for
189 | the Covered Software. If the Larger Work is a combination of Covered
190 | Software with a work governed by one or more Secondary Licenses, and the
191 | Covered Software is not Incompatible With Secondary Licenses, this
192 | License permits You to additionally distribute such Covered Software
193 | under the terms of such Secondary License(s), so that the recipient of
194 | the Larger Work may, at their option, further distribute the Covered
195 | Software under the terms of either this License or such Secondary
196 | License(s).
197 |
198 | 3.4. Notices
199 |
200 | You may not remove or alter the substance of any license notices
201 | (including copyright notices, patent notices, disclaimers of warranty,
202 | or limitations of liability) contained within the Source Code Form of
203 | the Covered Software, except that You may alter any license notices to
204 | the extent required to remedy known factual inaccuracies.
205 |
206 | 3.5. Application of Additional Terms
207 |
208 | You may choose to offer, and to charge a fee for, warranty, support,
209 | indemnity or liability obligations to one or more recipients of Covered
210 | Software. However, You may do so only on Your own behalf, and not on
211 | behalf of any Contributor. You must make it absolutely clear that any
212 | such warranty, support, indemnity, or liability obligation is offered by
213 | You alone, and You hereby agree to indemnify every Contributor for any
214 | liability incurred by such Contributor as a result of warranty, support,
215 | indemnity or liability terms You offer. You may include additional
216 | disclaimers of warranty and limitations of liability specific to any
217 | jurisdiction.
218 |
219 | 4. Inability to Comply Due to Statute or Regulation
220 | ---------------------------------------------------
221 |
222 | If it is impossible for You to comply with any of the terms of this
223 | License with respect to some or all of the Covered Software due to
224 | statute, judicial order, or regulation then You must: (a) comply with
225 | the terms of this License to the maximum extent possible; and (b)
226 | describe the limitations and the code they affect. Such description must
227 | be placed in a text file included with all distributions of the Covered
228 | Software under this License. Except to the extent prohibited by statute
229 | or regulation, such description must be sufficiently detailed for a
230 | recipient of ordinary skill to be able to understand it.
231 |
232 | 5. Termination
233 | --------------
234 |
235 | 5.1. The rights granted under this License will terminate automatically
236 | if You fail to comply with any of its terms. However, if You become
237 | compliant, then the rights granted under this License from a particular
238 | Contributor are reinstated (a) provisionally, unless and until such
239 | Contributor explicitly and finally terminates Your grants, and (b) on an
240 | ongoing basis, if such Contributor fails to notify You of the
241 | non-compliance by some reasonable means prior to 60 days after You have
242 | come back into compliance. Moreover, Your grants from a particular
243 | Contributor are reinstated on an ongoing basis if such Contributor
244 | notifies You of the non-compliance by some reasonable means, this is the
245 | first time You have received notice of non-compliance with this License
246 | from such Contributor, and You become compliant prior to 30 days after
247 | Your receipt of the notice.
248 |
249 | 5.2. If You initiate litigation against any entity by asserting a patent
250 | infringement claim (excluding declaratory judgment actions,
251 | counter-claims, and cross-claims) alleging that a Contributor Version
252 | directly or indirectly infringes any patent, then the rights granted to
253 | You by any and all Contributors for the Covered Software under Section
254 | 2.1 of this License shall terminate.
255 |
256 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all
257 | end user license agreements (excluding distributors and resellers) which
258 | have been validly granted by You or Your distributors under this License
259 | prior to termination shall survive termination.
260 |
261 | ************************************************************************
262 | * *
263 | * 6. Disclaimer of Warranty *
264 | * ------------------------- *
265 | * *
266 | * Covered Software is provided under this License on an "as is" *
267 | * basis, without warranty of any kind, either expressed, implied, or *
268 | * statutory, including, without limitation, warranties that the *
269 | * Covered Software is free of defects, merchantable, fit for a *
270 | * particular purpose or non-infringing. The entire risk as to the *
271 | * quality and performance of the Covered Software is with You. *
272 | * Should any Covered Software prove defective in any respect, You *
273 | * (not any Contributor) assume the cost of any necessary servicing, *
274 | * repair, or correction. This disclaimer of warranty constitutes an *
275 | * essential part of this License. No use of any Covered Software is *
276 | * authorized under this License except under this disclaimer. *
277 | * *
278 | ************************************************************************
279 |
280 | ************************************************************************
281 | * *
282 | * 7. Limitation of Liability *
283 | * -------------------------- *
284 | * *
285 | * Under no circumstances and under no legal theory, whether tort *
286 | * (including negligence), contract, or otherwise, shall any *
287 | * Contributor, or anyone who distributes Covered Software as *
288 | * permitted above, be liable to You for any direct, indirect, *
289 | * special, incidental, or consequential damages of any character *
290 | * including, without limitation, damages for lost profits, loss of *
291 | * goodwill, work stoppage, computer failure or malfunction, or any *
292 | * and all other commercial damages or losses, even if such party *
293 | * shall have been informed of the possibility of such damages. This *
294 | * limitation of liability shall not apply to liability for death or *
295 | * personal injury resulting from such party's negligence to the *
296 | * extent applicable law prohibits such limitation. Some *
297 | * jurisdictions do not allow the exclusion or limitation of *
298 | * incidental or consequential damages, so this exclusion and *
299 | * limitation may not apply to You. *
300 | * *
301 | ************************************************************************
302 |
303 | 8. Litigation
304 | -------------
305 |
306 | Any litigation relating to this License may be brought only in the
307 | courts of a jurisdiction where the defendant maintains its principal
308 | place of business and such litigation shall be governed by laws of that
309 | jurisdiction, without reference to its conflict-of-law provisions.
310 | Nothing in this Section shall prevent a party's ability to bring
311 | cross-claims or counter-claims.
312 |
313 | 9. Miscellaneous
314 | ----------------
315 |
316 | This License represents the complete agreement concerning the subject
317 | matter hereof. If any provision of this License is held to be
318 | unenforceable, such provision shall be reformed only to the extent
319 | necessary to make it enforceable. Any law or regulation which provides
320 | that the language of a contract shall be construed against the drafter
321 | shall not be used to construe this License against a Contributor.
322 |
323 | 10. Versions of the License
324 | ---------------------------
325 |
326 | 10.1. New Versions
327 |
328 | Mozilla Foundation is the license steward. Except as provided in Section
329 | 10.3, no one other than the license steward has the right to modify or
330 | publish new versions of this License. Each version will be given a
331 | distinguishing version number.
332 |
333 | 10.2. Effect of New Versions
334 |
335 | You may distribute the Covered Software under the terms of the version
336 | of the License under which You originally received the Covered Software,
337 | or under the terms of any subsequent version published by the license
338 | steward.
339 |
340 | 10.3. Modified Versions
341 |
342 | If you create software not governed by this License, and you want to
343 | create a new license for such software, you may create and use a
344 | modified version of this License if you rename the license and remove
345 | any references to the name of the license steward (except to note that
346 | such modified license differs from this License).
347 |
348 | 10.4. Distributing Source Code Form that is Incompatible With Secondary
349 | Licenses
350 |
351 | If You choose to distribute Source Code Form that is Incompatible With
352 | Secondary Licenses under the terms of this version of the License, the
353 | notice described in Exhibit B of this License must be attached.
354 |
355 | Exhibit A - Source Code Form License Notice
356 | -------------------------------------------
357 |
358 | This Source Code Form is subject to the terms of the Mozilla Public
359 | License, v. 2.0. If a copy of the MPL was not distributed with this
360 | file, You can obtain one at http://mozilla.org/MPL/2.0/.
361 |
362 | If it is not possible or desirable to put the notice in a particular
363 | file, then You may include the notice in a location (such as a LICENSE
364 | file in a relevant directory) where a recipient would be likely to look
365 | for such a notice.
366 |
367 | You may add additional accurate notices of copyright ownership.
368 |
369 | Exhibit B - "Incompatible With Secondary Licenses" Notice
370 | ---------------------------------------------------------
371 |
372 | This Source Code Form is "Incompatible With Secondary Licenses", as
373 | defined by the Mozilla Public License, v. 2.0.
374 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | F=
2 | .PHONY: t
3 | t:
4 | TEST_FILTER="${F}" zig build test --summary all -freference-trace
5 |
6 | .PHONY: s
7 | s:
8 | zig build run
9 |
--------------------------------------------------------------------------------
/build.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 |
3 | const ModuleMap = std.StringArrayHashMap(*std.Build.Module);
4 |
5 | pub fn build(b: *std.Build) !void {
6 | var gpa = std.heap.GeneralPurposeAllocator(.{}){};
7 | const allocator = gpa.allocator();
8 |
9 | const target = b.standardTargetOptions(.{});
10 | const optimize = b.standardOptimizeOption(.{});
11 |
12 | var modules = ModuleMap.init(allocator);
13 | defer modules.deinit();
14 |
15 | const dep_opts = .{.target = target,.optimize = optimize};
16 |
17 | try modules.put("zul", b.dependency("zul", dep_opts).module("zul"));
18 | try modules.put("logz", b.dependency("logz", dep_opts).module("logz"));
19 | try modules.put("httpz", b.dependency("httpz", dep_opts).module("httpz"));
20 | try modules.put("typed", b.dependency("typed", dep_opts).module("typed"));
21 | try modules.put("validate", b.dependency("validate", dep_opts).module("validate"));
22 | const zuckdb = b.dependency("zuckdb", dep_opts).module("zuckdb");
23 | try modules.put("zuckdb", zuckdb);
24 |
25 | zuckdb.addRPathSpecial(".");
26 | zuckdb.addIncludePath(b.path("."));
27 | zuckdb.addLibraryPath(b.path("."));
28 |
29 | // setup executable
30 | const exe = b.addExecutable(.{
31 | .name = "duckdb-proxy",
32 | .root_source_file = b.path("src/main.zig"),
33 | .target = target,
34 | .optimize = optimize,
35 | });
36 | try addLibs(exe, modules);
37 | b.installArtifact(exe);
38 |
39 | const run_cmd = b.addRunArtifact(exe);
40 | run_cmd.step.dependOn(b.getInstallStep());
41 | if (b.args) |args| {
42 | run_cmd.addArgs(args);
43 | }
44 |
45 | // setup tests
46 | const run_step = b.step("run", "Run the app");
47 | run_step.dependOn(&run_cmd.step);
48 |
49 | const tests = b.addTest(.{
50 | .root_source_file = b.path("src/main.zig"),
51 | .target = target,
52 | .optimize = optimize,
53 | .test_runner = b.path("test_runner.zig"),
54 | });
55 |
56 | try addLibs(tests, modules);
57 | const run_test = b.addRunArtifact(tests);
58 | run_test.has_side_effects = true;
59 |
60 | const test_step = b.step("test", "Run tests");
61 | test_step.dependOn(&run_test.step);
62 | }
63 |
64 | fn addLibs(step: *std.Build.Step.Compile, modules: ModuleMap) !void {
65 | var it = modules.iterator();
66 | while (it.next()) |m| {
67 | step.root_module.addImport(m.key_ptr.*, m.value_ptr.*);
68 | }
69 | step.linkLibC();
70 | step.linkSystemLibrary("duckdb");
71 | }
72 |
--------------------------------------------------------------------------------
/build.zig.zon:
--------------------------------------------------------------------------------
1 | .{
2 | .name = "duckdb-proxy",
3 | .paths = .{""},
4 | .version = "0.0.0",
5 | .dependencies = .{
6 | .httpz = .{
7 | .url = "https://github.com/karlseguin/http.zig/archive/12764925eb6a7929004c1be9032b04f97f4e43e2.tar.gz",
8 | .hash = "1220ffb589c6cd1a040bfd4446c74c38b5e873ba82e737cb33c98711c95787b92c81"
9 | },
10 | .logz = .{
11 | .url = "https://github.com/karlseguin/log.zig/archive/adc6910d2e8f50e0ec5a4792d6a7136f46778061.tar.gz",
12 | .hash = "1220a5687ab17f0691a64d19dfd7be1299df64d2e030bf7d56a7e0be041fbb289845"
13 | },
14 | .typed = .{
15 | .url = "https://github.com/karlseguin/typed.zig/archive/dad3c4295008fbbbfdf7d2d0403124993949875d.tar.gz",
16 | .hash = "1220470ae86a8ebe1247000b266f8d1d3c0b29acc35de35d1851831bd6fe210e6c20"
17 | },
18 | .validate = .{
19 | .url = "https://github.com/karlseguin/validate.zig/archive/bc54d48bab18d480ea28d885364d2b63b6d02fb3.tar.gz",
20 | .hash = "122068496a99e0bcd8750844d302d430e840e0ca4a35967db36fb19a846df9755e74"
21 | },
22 | .zuckdb = .{
23 | .url = "https://github.com/karlseguin/zuckdb.zig/archive/15447c4f31829175511a1e71ee7c64ef38de8c5e.tar.gz",
24 | .hash = "12207fcc9646d0fee6b2103bffb41856a0ba372dfea03a24c857a72629f44a3cb01c"
25 | },
26 | .zul = .{
27 | .url = "https://github.com/karlseguin/zul/archive/7779642127b560a04075da96d9d07703847923cb.tar.gz",
28 | .hash = "12203d26bd4179e9072949037a2a99a12c88909c27f2465f230beec7b1a6b1db8688"
29 | },
30 | },
31 | }
32 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | # duckdb-proxy
2 | Full usage documentation at .
3 |
4 | Simple demo at .
5 |
6 | ```bash
7 | $ ./duckdb-proxy
8 | @ts=1687579487928 @l=INFO @ctx=Log.setup level=Info note="alter via --log_level=LEVEL flag"
9 | @ts=1687579487937 @l=INFO @ctx=http.listener address=http://127.0.0.1:8012 db_path=db.duckdb
10 | ```
11 |
12 | Then POST your `sql` and `params` to the `/api/1/exec` route
13 |
14 | ```bash
15 | $ curl "http://localhost:8012/api/1/exec" -d '{
16 | "sql": "select $1::int as over",
17 | "params": [9000]
18 | }'
19 |
20 | {
21 | "cols": ["over"],
22 | "rows": [
23 | [9000]
24 | ]
25 | ```
26 |
--------------------------------------------------------------------------------
/src/app.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const logz = @import("logz");
3 | const zuckdb = @import("zuckdb");
4 | const validate = @import("validate");
5 | const dproxy = @import("dproxy.zig");
6 | const BufferPool = @import("zul").StringBuilder.Pool;
7 |
8 | const Config = dproxy.Config;
9 | const Allocator = std.mem.Allocator;
10 |
11 | pub const App = struct {
12 | config: Config,
13 | log_http: bool,
14 | with_wrap: bool,
15 | max_limit: ?[]const u8,
16 | dbs: *zuckdb.Pool,
17 | allocator: Allocator,
18 | buffer_pool: *BufferPool,
19 | validators: validate.Pool(void),
20 |
21 | pub fn init(allocator: Allocator, config: Config) !App {
22 | const db_config = config.db;
23 | const zuckdb_config = zuckdb.DB.Config{
24 | .access_mode = if (db_config.readonly) .read_only else .read_write,
25 | .enable_external_access = db_config.external_access,
26 | };
27 |
28 | var open_err: ?[]u8 = null;
29 | const db = zuckdb.DB.initWithErr(allocator, db_config.path, zuckdb_config, &open_err) catch |err| {
30 | if (err == error.OpenDB) {
31 | defer allocator.free(open_err.?);
32 | return dproxy.duckdbError("db.init", open_err.?, logz.err().string("path", db_config.path));
33 | }
34 | return err;
35 | };
36 | errdefer db.deinit();
37 |
38 | var dbs = try zuckdb.Pool.init(db, .{.size = db_config.pool_size});
39 | errdefer dbs.deinit();
40 |
41 | var max_limit: ?[]const u8 = null;
42 | if (config.max_limit) |l| {
43 | // no reason to do this more than once!
44 | max_limit = try std.fmt.allocPrint(allocator, " limit {d}", .{l});
45 | }
46 |
47 | return .{
48 | .dbs = dbs,
49 | .config = config,
50 | .allocator = allocator,
51 | .max_limit = max_limit,
52 | .log_http = config.log_http,
53 | .with_wrap = config.with_wrap,
54 | .validators = try validate.Pool(void).init(allocator, .{}),
55 | .buffer_pool = try BufferPool.init(allocator, db_config.pool_size, 2048),
56 | };
57 | }
58 |
59 | pub fn deinit(self: *App) void {
60 | self.dbs.deinit();
61 | self.validators.deinit();
62 | self.buffer_pool.deinit();
63 | if (self.max_limit) |l| {
64 | self.allocator.free(l);
65 | }
66 | }
67 | };
68 |
--------------------------------------------------------------------------------
/src/config.zig:
--------------------------------------------------------------------------------
1 | const logz = @import("logz");
2 | const httpz = @import("httpz");
3 | const builtin = @import("builtin");
4 |
5 | pub const Config = struct {
6 | const DB = struct {
7 | // path to the db
8 | path: [:0]const u8 = "db.duckdb",
9 |
10 | // number of connections to the db to keep
11 | pool_size: u16 = 50,
12 |
13 | // sets the enable_external_access duckdb flag
14 | external_access: bool = true,
15 |
16 | // sets the duckdb access_mode flag
17 | readonly: bool = false,
18 | };
19 |
20 | // Put a limit on the number of allowed parameters per query
21 | max_parameters: ?u32 = null,
22 |
23 | // whether to wrap the SQL in a "with _ as ($SQL) select * from _", this restricts
24 | // the types of SQL statements that can be executed.
25 | with_wrap: bool = false,
26 |
27 | // forces a limit on the number of returned rows, when set, implies with_sql_wrapper.
28 | max_limit: ?u32 = 0,
29 |
30 | // For improving the uniqueness of request_id in a multi-server setup
31 | // The instance_id is part of the request_id, thus N instances will generate
32 | // distinct request_ids from each other
33 | instance_id: u8 = 0,
34 |
35 | logger: logz.Config = logz.Config{},
36 |
37 | // https://github.com/ziglang/zig/issues/15091
38 | log_http: bool = if (builtin.is_test) false else true,
39 |
40 | http: httpz.Config = .{},
41 |
42 | db: DB = .{},
43 | };
44 |
--------------------------------------------------------------------------------
/src/dproxy.zig:
--------------------------------------------------------------------------------
1 | // Serves two similar purposes. The first is to put misc global functions. Not
2 | // great, but we don't have many, so not bad either.
3 | // The other is to act as a single source of truth for types within the project.
4 | // In most cases, if you need to reference something _within_ the project, you
5 | // should just import dproxy and that should expose anything you might need.
6 |
7 | const std = @import("std");
8 | pub const testing = @import("t.zig");
9 | pub const App = @import("app.zig").App;
10 | pub const Env = @import("env.zig").Env;
11 | pub const Config = @import("config.zig").Config;
12 | pub const Parameter = @import("parameter.zig").Parameter;
13 |
14 | pub const version = @embedFile("version.txt");
15 |
16 | // Log DuckDB error.
17 | const logz = @import("logz");
18 | pub fn duckdbError(ctx: []const u8, err: []const u8, logger: logz.Logger) error{DuckDBError, ReadOnly} {
19 | // DuckDB only exposes error strings, so here we are.
20 | if (std.mem.endsWith(u8, err, "read-only mode!")) {
21 | return error.ReadOnly;
22 | }
23 |
24 | logger.level(.Error).ctx(ctx).boolean("duckdb", true).string("err", err).string("desc", err).log();
25 | return error.DuckDBError;
26 | }
27 |
28 | // Different places can return different error. I like having them all in one place.
29 | pub const codes = struct {
30 | pub const INTERNAL_SERVER_ERROR_UNCAUGHT = 0;
31 | pub const INTERNAL_SERVER_ERROR_CAUGHT = 1;
32 | pub const NOT_FOUND = 2;
33 | pub const INVALID_JSON = 10;
34 | pub const VALIDATION_ERROR = 11;
35 | pub const READONLY = 12;
36 | };
37 |
38 |
39 | pub const val = struct {
40 | pub const INVALID_SQL = 100;
41 | pub const UNSUPPORTED_PARAMETER_TYPE = 102;
42 | pub const WRONG_PARAMETER_COUNT = 103;
43 | pub const INVALID_BITSTRING = 104;
44 | };
45 |
--------------------------------------------------------------------------------
/src/env.zig:
--------------------------------------------------------------------------------
1 | const logz = @import("logz");
2 | const validate = @import("validate");
3 | const dproxy = @import("dproxy.zig");
4 |
5 | pub const Env = struct {
6 | app: *dproxy.App,
7 |
8 | // This logger has the "$rid=REQUEST_ID" attribute automatically added to any
9 | // generated log.
10 | logger: logz.Logger,
11 |
12 | // Most request will do some validation, so we load a validation context with
13 | // every request. Makes it its lifecycle can be managed by the dispatcher.
14 | validator: *validate.Context(void)
15 | };
16 |
--------------------------------------------------------------------------------
/src/init.zig:
--------------------------------------------------------------------------------
1 | const validate = @import("validate");
2 | const Config = @import("config.zig").Config;
3 |
4 | // There's no facility to do initialization on startup (like Go's init), so
5 | // we'll just hard-code this ourselves. The reason we extract this out is
6 | // largely so that our tests can call this (done when a test context is created)
7 | pub fn init(builder: *validate.Builder(void), config: Config) !void {
8 | try @import("web/sql/_sql.zig").init(builder, config.max_parameters);
9 | try @import("parameter.zig").init(builder);
10 | }
11 |
--------------------------------------------------------------------------------
/src/main.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const zul = @import("zul");
3 | const logz = @import("logz");
4 | const validate = @import("validate");
5 |
6 | const init = @import("init.zig");
7 | const web = @import("web/web.zig");
8 | const dproxy = @import("dproxy.zig");
9 |
10 | const Allocator = std.mem.Allocator;
11 |
12 | pub fn main() !void {
13 | var gpa = std.heap.GeneralPurposeAllocator(.{}){};
14 | const allocator = gpa.allocator();
15 |
16 | const config = (try parseArgs(allocator)) orelse std.process.exit(1);
17 |
18 | try logz.setup(allocator, config.logger);
19 | logz.info().ctx("Log.setup").
20 | stringSafe("level", @tagName(logz.level())).
21 | string("note", "alter via --log_level=LEVEL flag").
22 | log();
23 |
24 | var app = try dproxy.App.init(allocator, config);
25 | var validation_builder = try validate.Builder(void).init(allocator);
26 | try init.init(&validation_builder, app.config);
27 |
28 | try web.start(&app);
29 | }
30 |
31 | fn parseArgs(allocator: Allocator) !?dproxy.Config {
32 | const httpz = @import("httpz");
33 |
34 | var args = try zul.CommandLineArgs.parse(allocator);
35 | defer args.deinit();
36 |
37 | const stdout = std.io.getStdOut().writer();
38 |
39 | if (args.contains("version") or args.contains("v")) {
40 | try stdout.print(dproxy.version, .{});
41 | std.process.exit(0);
42 | }
43 |
44 | if (args.contains("help") or args.contains("h")) {
45 | try stdout.print("./duckdb-proxy [OPTS] DB_PATH\n\n", .{});
46 | try stdout.print("OPTS:\n", .{});
47 | try stdout.print(" --port \n\tPort to listen on (default: 8012)\n\n", .{});
48 | try stdout.print(" --address \n\tAddress to bind to (default: 127.0.0.1)\n\n", .{});
49 | try stdout.print(" --readonly\n\tOpens the database in readonly mode\n\n", .{});
50 | try stdout.print(" --with_wrap\n\tExecutes the provided SQL as \"with _ as ($SQL) select * from _\",\n\tsignificantly limiting the type of queries that can be run\n\n", .{});
51 | try stdout.print(" --max_limit\n\tForce a \"limit N\" on all SQL, this automatically enables --with_wrap\n\n", .{});
52 | try stdout.print(" --external_access\n\tEnables the duckdb enable_external_access configuration\n\n", .{});
53 | try stdout.print(" --pool_size \n\tNumber of connections to keep open (default: 50)\n\n", .{});
54 | try stdout.print(" --max_params \n\tMaximum number of parameters allowed per request (default: no limit)\n\n", .{});
55 | try stdout.print(" --max_request_size \n\tMaximum size of the request body (default: 65536)\n\n", .{});
56 | try stdout.print(" --log_level \n\tLog level to use (default: INFO).\n\tValid values are: info, warn, error, fatal, none. See also log_http)\n\n", .{});
57 | try stdout.print(" --log_http\n\tLog http request lines, works independently of log_level\n\n", .{});
58 | try stdout.print(" --cors_origin \n\tEnables CORS response headers using the specified origin\n\n", .{});
59 | try stdout.print(" -v, --version\n\tPrint the version and exit\n\n", .{});
60 | std.process.exit(0);
61 | }
62 |
63 | var pool_size: u16 = 50;
64 | var log_level = logz.Level.Info;
65 | var max_limit: ?u32 = null;
66 | var max_parameters: ?u32 = null;
67 | var cors: ?httpz.Config.CORS = null;
68 | var port: u16 = 8012;
69 | var address: []const u8 = "127.0.0.1";
70 | var max_request_size: u32 = 65536;
71 |
72 | if (args.get("pool_size")) |value| {
73 | pool_size = std.fmt.parseInt(u16, value, 10) catch {
74 | try stdout.print("pool_size must be a positive integer\n", .{});
75 | return null;
76 | };
77 | if (pool_size == 0) {
78 | try stdout.print("pool_size must be greater than 0\n", .{});
79 | return null;
80 | }
81 | }
82 |
83 | if (args.get("max_limit")) |value| {
84 | max_limit = std.fmt.parseInt(u32, value, 10) catch {
85 | try stdout.print("max_limit must be a positive integer\n", .{});
86 | return null;
87 | };
88 | }
89 |
90 | if (args.get("max_params")) |value| {
91 | max_parameters = std.fmt.parseInt(u32, value, 10) catch {
92 | try stdout.print("max_params must be a positive integer\n", .{});
93 | return null;
94 | };
95 | }
96 |
97 | if (args.get("max_request_size")) |value| {
98 | max_request_size = std.fmt.parseInt(u32, value, 10) catch {
99 | try stdout.print("max_request_size must be a positive integer\n", .{});
100 | return null;
101 | };
102 | }
103 |
104 | if (args.get("cors_origin")) |value| {
105 | cors = .{
106 | .origin = try allocator.dupe(u8, value),
107 | .headers = "content-type",
108 | .max_age = "7200",
109 | };
110 | }
111 |
112 | if (args.get("port")) |value| {
113 | port = std.fmt.parseInt(u16, value, 10) catch {
114 | try stdout.print("port must be a positive integer\n", .{});
115 | return null;
116 | };
117 | }
118 |
119 | if (args.get("address")) |value| {
120 | address = try allocator.dupe(u8, value);
121 | }
122 |
123 | if (args.get("log_level")) |value| {
124 | log_level = logz.Level.parse(value) orelse {
125 | try stdout.print("invalid log_level value\n", .{});
126 | return null;
127 | };
128 | }
129 |
130 | return .{
131 | .db = .{
132 | .path = if (args.tail.len == 1) try allocator.dupeZ(u8, args.tail[0]) else "db.duckdb",
133 | .pool_size = pool_size,
134 | .readonly = args.contains("readonly"),
135 | .external_access = args.contains("external_access")
136 | },
137 | .http = .{
138 | .port = port,
139 | .address = address,
140 | .cors = cors,
141 | .response = .{
142 | // we use chunked responses, so don't a response buffer
143 | .body_buffer_size = 2048
144 | },
145 | .request = .{
146 | .max_body_size = max_request_size,
147 | },
148 | },
149 | .max_limit = max_limit,
150 | .max_parameters = max_parameters,
151 | .with_wrap = args.contains("with_wrap") or max_limit != null,
152 | .logger = .{.level = log_level},
153 | .log_http = args.contains("log_http"),
154 | };
155 | }
156 |
157 | test {
158 | dproxy.testing.setup();
159 | std.testing.refAllDecls(@This());
160 | }
161 |
--------------------------------------------------------------------------------
/src/parameter.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const typed = @import("typed");
3 | const zuckdb = @import("zuckdb");
4 | const validate = @import("validate");
5 | const dproxy = @import("dproxy.zig");
6 |
7 | const Allocator = std.mem.Allocator;
8 |
9 | pub const Parameter = struct {
10 | // This does a lot, but having validation + binding in a single place does
11 | // streamline a lot of code.
12 | pub fn validateAndBind(aa: Allocator, index: usize, stmt: zuckdb.Stmt, value: typed.Value, validator: *validate.Context(void)) !void {
13 | if (std.meta.activeTag(value) == typed.Value.null) {
14 | return stmt.bindValue(null, index);
15 | }
16 |
17 | // for the "field" of the error message
18 | validator.field = null;
19 | validator.force_prefix = try fieldName(aa, index);
20 | switch (stmt.dataType(index)) {
21 | .boolean => {
22 | switch (try bool_validator.validateValue(value, validator)) {
23 | .bool => |v| return stmt.bindValue(v, index),
24 | else => return stmt.bindValue(null, index),
25 | }
26 | },
27 | .uuid => {
28 | switch (try uuid_validator.validateValue(value, validator)) {
29 | .string => |v| return stmt.bindValue(v, index),
30 | else => return stmt.bindValue(null, index),
31 | }
32 | },
33 | .tinyint => {
34 | switch (try i8_validator.validateValue(value, validator)) {
35 | .i8 => |v| return stmt.bindValue(v, index),
36 | else => return stmt.bindValue(null, index),
37 | }
38 | },
39 | .smallint => {
40 | switch (try i16_validator.validateValue(value, validator)) {
41 | .i16 => |v| return stmt.bindValue(v, index),
42 | else => return stmt.bindValue(null, index),
43 | }
44 | },
45 | .integer => {
46 | switch (try i32_validator.validateValue(value, validator)) {
47 | .i32 => |v| return stmt.bindValue(v, index),
48 | else => return stmt.bindValue(null, index),
49 | }
50 | },
51 | .bigint => {
52 | switch (try i64_validator.validateValue(value, validator)) {
53 | .i64 => |v| return stmt.bindValue(v, index),
54 | else => return stmt.bindValue(null, index),
55 | }
56 | },
57 | .hugeint => {
58 | switch (try i128_validator.validateValue(value, validator)) {
59 | .i128 => |v| return stmt.bindValue(v, index),
60 | else => return stmt.bindValue(null, index),
61 | }
62 | },
63 | .utinyint => {
64 | switch (try u8_validator.validateValue(value, validator)) {
65 | .u8 => |v| return stmt.bindValue(v, index),
66 | else => return stmt.bindValue(null, index),
67 | }
68 | },
69 | .usmallint => {
70 | switch (try u16_validator.validateValue(value, validator)) {
71 | .u16 => |v| return stmt.bindValue(v, index),
72 | else => return stmt.bindValue(null, index),
73 | }
74 | },
75 | .uinteger => {
76 | switch (try u32_validator.validateValue(value, validator)) {
77 | .u32 => |v| return stmt.bindValue(v, index),
78 | else => return stmt.bindValue(null, index),
79 | }
80 | },
81 | .ubigint => {
82 | switch (try u64_validator.validateValue(value, validator)) {
83 | .u64 => |v| return stmt.bindValue(v, index),
84 | else => return stmt.bindValue(null, index),
85 | }
86 | },
87 | .real => {
88 | switch (try f32_validator.validateValue(value, validator)) {
89 | .f32 => |v| return stmt.bindValue(v, index),
90 | else => return stmt.bindValue(null, index),
91 | }
92 | },
93 | .double => {
94 | switch (try f64_validator.validateValue(value, validator)) {
95 | .f64 => |v| return stmt.bindValue(v, index),
96 | else => return stmt.bindValue(null, index),
97 | }
98 | },
99 | .decimal => {
100 | switch (try f64_validator.validateValue(value, validator)) {
101 | .f64 => |v| return stmt.bindValue(v, index),
102 | else => return stmt.bindValue(null, index),
103 | }
104 | },
105 | .timestamp, .timestamptz => {
106 | switch (try i64_validator.validateValue(value, validator)) {
107 | .i64 => |v| return stmt.bindValue(v, index),
108 | else => return stmt.bindValue(null, index),
109 | }
110 | },
111 | .varchar => {
112 | switch (try string_validator.validateValue(value, validator)) {
113 | .string => |v| return stmt.bindValue(v, index),
114 | else => return stmt.bindValue(null, index),
115 | }
116 | },
117 | .blob => {
118 | switch (try blob_validator.validateValue(value, validator)) {
119 | .string => |v| return stmt.bindValue(v, index),
120 | else => return stmt.bindValue(null, index),
121 | }
122 | },
123 | .bit => {
124 | switch (try bitstring_validator.validateValue(value, validator)) {
125 | .string => |v| return stmt.bindValue(v, index),
126 | else => return stmt.bindValue(null, index),
127 | }
128 | },
129 | .date => {
130 | switch (try date_validator.validateValue(value, validator)) {
131 | .date => |v| {
132 | return stmt.bindValue(zuckdb.Date{
133 | .year = v.year,
134 | .month = @intCast(v.month),
135 | .day = @intCast(v.day),
136 | }, index);
137 | },
138 | else => return stmt.bindValue(null, index),
139 | }
140 | },
141 | .time => {
142 | switch (try time_validator.validateValue(value, validator)) {
143 | .time => |v| {
144 | return stmt.bindValue(zuckdb.Time{
145 | .hour = @intCast(v.hour),
146 | .min = @intCast(v.min),
147 | .sec = @intCast(v.sec),
148 | .micros = @intCast(v.micros),
149 | }, index);
150 | },
151 | else => return stmt.bindValue(null, index),
152 | }
153 | },
154 | .@"enum" => {
155 | switch (try string_validator.validateValue(value, validator)) {
156 | .string => |v| return stmt.bindValue(v, index),
157 | else => return stmt.bindValue(null, index),
158 | }
159 | },
160 | .interval => {
161 | switch (value) {
162 | .string => { // can either be a string, e.g. "4 hours"
163 | switch (try string_validator.validateValue(value, validator)) {
164 | .string => |v| return stmt.bindValue(v, index),
165 | else => return stmt.bindValue(null, index),
166 | }
167 | },
168 | else => { // or an object {months: X, days: Y, micros: Z}
169 | switch (try interval_validator.validateValue(value, validator)) {
170 | .map => |v| return stmt.bindValue(zuckdb.Interval{
171 | .months = v.get("months").?.i32,
172 | .days = v.get("days").?.i32,
173 | .micros = v.get("micros").?.i64,
174 | }, index),
175 | else => return stmt.bindValue(null, index),
176 | }
177 | }
178 | }
179 | },
180 | else => |tpe| {
181 | const type_name = @tagName(tpe);
182 | return validator.add(.{
183 | .code = dproxy.val.UNSUPPORTED_PARAMETER_TYPE,
184 | .err = try std.fmt.allocPrint(aa, "Unsupported parameter type: ${d} - ${s}", .{index+1, type_name}),
185 | .data = try validator.dataBuilder().put("index", index).put("type", type_name).done(),
186 | });
187 | }
188 | }
189 | }
190 |
191 | // A few places need to generate this error, having it here makes sure that it's consistent
192 | pub fn invalidParameterCount(aa: Allocator, stmt_count: usize, input_count: usize, validator: *validate.Context(void)) !void {
193 | const err_format = "SQL statement requires {d} parameter{s}, {d} {s} given";
194 | const err = try std.fmt.allocPrint(aa, err_format, .{
195 | stmt_count,
196 | if (stmt_count == 1) "" else "s",
197 | input_count,
198 | if (input_count == 1) "was" else "were",
199 | });
200 |
201 | validator.addInvalidField(.{
202 | .err = err,
203 | .field = "params",
204 | .code = dproxy.val.WRONG_PARAMETER_COUNT,
205 | .data = try validator.dataBuilder().put("stmt", stmt_count).put("input", input_count).done(),
206 | });
207 | return error.Validation;
208 | }
209 | };
210 |
211 | // We want to give nice error messages that reference the exact parameter
212 | // which is invalid. This is an unfortunate detail of our validation framework.
213 | // Normally, it could take care of it internally, but we're manually validating
214 | // the params, because it's dynamic, based on the specific query.
215 | fn fieldName(allocator: Allocator, i: usize) ![]const u8 {
216 | return switch (i) {
217 | 0 => "params.0",
218 | 1 => "params.1",
219 | 2 => "params.2",
220 | 3 => "params.3",
221 | 4 => "params.4",
222 | 5 => "params.5",
223 | 6 => "params.6",
224 | 7 => "params.7",
225 | 8 => "params.8",
226 | 9 => "params.9",
227 | 10 => "params.10",
228 | 11 => "params.11",
229 | 12 => "params.12",
230 | 13 => "params.13",
231 | 14 => "params.14",
232 | 15 => "params.15",
233 | 16 => "params.16",
234 | 17 => "params.17",
235 | 18 => "params.18",
236 | 19 => "params.19",
237 | 20 => "params.20",
238 | 21 => "params.21",
239 | 22 => "params.22",
240 | 23 => "params.23",
241 | 24 => "params.24",
242 | 25 => "params.25",
243 | 26 => "params.26",
244 | 27 => "params.27",
245 | 28 => "params.28",
246 | 29 => "params.29",
247 | 30 => "params.30",
248 | 31 => "params.31",
249 | 32 => "params.32",
250 | 33 => "params.33",
251 | 34 => "params.34",
252 | 35 => "params.35",
253 | 36 => "params.36",
254 | 37 => "params.37",
255 | 38 => "params.38",
256 | 39 => "params.39",
257 | 40 => "params.40",
258 | 41 => "params.41",
259 | 42 => "params.42",
260 | 43 => "params.43",
261 | 44 => "params.44",
262 | 45 => "params.45",
263 | 46 => "params.46",
264 | 47 => "params.47",
265 | 48 => "params.48",
266 | 49 => "params.49",
267 | 50 => "params.50",
268 | else => std.fmt.allocPrint(allocator, "params.{d}", .{i}),
269 | };
270 | }
271 |
272 | var i8_validator: *validate.Int(i8, void) = undefined;
273 | var i16_validator: *validate.Int(i16, void) = undefined;
274 | var i32_validator: *validate.Int(i32, void) = undefined;
275 | var i64_validator: *validate.Int(i64, void) = undefined;
276 | var i128_validator: *validate.Int(i128, void) = undefined;
277 | var u8_validator: *validate.Int(u8, void) = undefined;
278 | var u16_validator: *validate.Int(u16, void) = undefined;
279 | var u32_validator: *validate.Int(u32, void) = undefined;
280 | var u64_validator: *validate.Int(u64, void) = undefined;
281 | var f32_validator: *validate.Float(f32, void) = undefined;
282 | var f64_validator: *validate.Float(f64, void) = undefined;
283 | var bool_validator: *validate.Bool(void) = undefined;
284 | var uuid_validator: *validate.UUID(void) = undefined;
285 | var date_validator: *validate.Date(void) = undefined;
286 | var time_validator: *validate.Time(void) = undefined;
287 | var string_validator: *validate.String(void) = undefined;
288 | var blob_validator: *validate.String(void) = undefined;
289 | var bitstring_validator: *validate.String(void) = undefined;
290 | var interval_validator: *validate.Object(void) = undefined;
291 |
292 | // Called in init.zig
293 | pub fn init(builder: *validate.Builder(void)) !void {
294 | // All of these validators are very simple. They largely just assert type
295 | // correctness.
296 |
297 | // std.json represents large integers are strings (fail), so we need to enable
298 | // test parsing for those.
299 | i8_validator = builder.int(i8, .{});
300 | i16_validator = builder.int(i16, .{});
301 | i32_validator = builder.int(i32, .{});
302 | i64_validator = builder.int(i64, .{.parse = true});
303 | i128_validator = builder.int(i128, .{.parse = true});
304 | u8_validator = builder.int(u8, .{});
305 | u16_validator = builder.int(u16, .{});
306 | u32_validator = builder.int(u32, .{});
307 | u64_validator = builder.int(u64, .{.parse = true});
308 | f32_validator = builder.float(f32, .{});
309 | f64_validator = builder.float(f64, .{});
310 | bool_validator = builder.boolean(.{});
311 | uuid_validator = builder.uuid(.{});
312 | date_validator = builder.date(.{.parse = true});
313 | time_validator = builder.time(.{.parse = true});
314 | string_validator = builder.string(.{});
315 | blob_validator = builder.string(.{.decode = .base64});
316 | bitstring_validator = builder.string(.{.function = validateBitstring});
317 | interval_validator = builder.object(&.{
318 | builder.field("months", builder.int(i32, .{.default = 0})),
319 | builder.field("days", builder.int(i32, .{.default = 0})),
320 | builder.field("micros", builder.int(i64, .{.default = 0})),
321 | }, .{});
322 | }
323 |
324 | fn validateBitstring(optional_value: ?[]const u8, context: *validate.Context(void)) !?[]const u8 {
325 | const value = optional_value orelse return null;
326 | for (value) |b| {
327 | if (b != '0' and b != '1') {
328 | try context.add(.{
329 | .code = dproxy.val.INVALID_BITSTRING,
330 | .err = "bitstring must contain only 0s and 1s",
331 | });
332 | return null;
333 | }
334 | }
335 | return value;
336 |
337 | }
338 |
--------------------------------------------------------------------------------
/src/t.zig:
--------------------------------------------------------------------------------
1 | // Test helpers.
2 | const std = @import("std");
3 | const logz = @import("logz");
4 | const typed = @import("typed");
5 | const zuckdb = @import("zuckdb");
6 | const validate = @import("validate");
7 | pub const web = @import("httpz").testing;
8 | const dproxy = @import("dproxy.zig");
9 |
10 | pub usingnamespace @import("zul").testing;
11 | pub const allocator = std.testing.allocator;
12 |
13 | // We will _very_ rarely use this. Zig test doesn't have test lifecycle hooks. We
14 | // can setup globals on startup, but we can't clean this up properly. If we use
15 | // std.testing.allocator for these, it'll report a leak. So, we create a gpa
16 | // without any leak reporting, and use that for the few globals that we have.
17 | var gpa = std.heap.GeneralPurposeAllocator(.{}){};
18 | const leaking_allocator = gpa.allocator();
19 |
20 | pub fn noLogs() void {
21 | logz.setup(leaking_allocator, .{.pool_size = 1, .level = .None, .output = .stderr}) catch unreachable;
22 | }
23 |
24 | pub fn restoreLogs() void {
25 | logz.setup(leaking_allocator, .{.pool_size = 2, .level = .Error, .output = .stderr}) catch unreachable;
26 | }
27 |
28 | // Run once, in main.zig's nameless test {...} block
29 | pub fn setup() void {
30 | restoreLogs();
31 |
32 | var builder = validate.Builder(void).init(leaking_allocator) catch unreachable;
33 | @import("init.zig").init(&builder, .{}) catch unreachable;
34 |
35 | {
36 | std.fs.cwd().deleteFile("tests/db.duckdb") catch |err| switch (err) {
37 | error.FileNotFound => {},
38 | else => {
39 | std.debug.print("Failed to delete 'tests/db.duckdb' - {any}\n", .{err});
40 | unreachable;
41 | }
42 | };
43 |
44 | // create some dummy data
45 | const db = zuckdb.DB.init(allocator, "tests/db.duckdb", .{}) catch unreachable;
46 | defer db.deinit();
47 |
48 | var conn = db.conn() catch unreachable;
49 | defer conn.deinit();
50 |
51 | _ = conn.exec("create type everything_type as enum ('type_a', 'type_b')", .{}) catch unreachable;
52 |
53 | _ = conn.exec(
54 | \\ create table everythings (
55 | \\ col_tinyint tinyint,
56 | \\ col_smallint smallint,
57 | \\ col_integer integer,
58 | \\ col_bigint bigint,
59 | \\ col_hugeint hugeint,
60 | \\ col_utinyint utinyint,
61 | \\ col_usmallint usmallint,
62 | \\ col_uinteger uinteger,
63 | \\ col_ubigint ubigint,
64 | \\ col_real real,
65 | \\ col_double double,
66 | \\ col_decimal decimal(5, 2),
67 | \\ col_bool bool,
68 | \\ col_date date,
69 | \\ col_time time,
70 | \\ col_timestamp timestamp,
71 | \\ col_blob blob,
72 | \\ col_varchar varchar,
73 | \\ col_uuid uuid,
74 | \\ col_json json,
75 | \\ col_enum everything_type,
76 | \\ col_list_integer integer[],
77 | \\ col_list_varchar varchar[],
78 | \\ col_interval interval,
79 | \\ col_bitstring bit
80 | \\ )
81 | , .{}) catch unreachable;
82 | }
83 | }
84 |
85 | // The test context contains an *App and *Env that we can use in our tests.
86 | // It also includes a httpz.testing instance, so that we can easily test http
87 | // handlers. It uses and exposes an arena allocator so that, any memory we need
88 | // to allocate within the test itself, doesn't have to be micro-managed.
89 | pub fn context(config: Context.Config) *Context {
90 | var arena = allocator.create(std.heap.ArenaAllocator) catch unreachable;
91 | arena.* = std.heap.ArenaAllocator.init(allocator);
92 |
93 | var aa = arena.allocator();
94 | const app = aa.create(dproxy.App) catch unreachable;
95 | app.* = dproxy.App.init(allocator, .{
96 | .db = .{
97 | .pool_size = 2,
98 | .path = "tests/db.duckdb",
99 | },
100 | .max_limit = config.max_limit,
101 | .with_wrap = config.with_wrap or config.max_limit != null,
102 | }) catch unreachable;
103 |
104 | const env = aa.create(dproxy.Env) catch unreachable;
105 | env.* = dproxy.Env{
106 | .app = app,
107 | .logger = logz.logger().multiuse(),
108 | .validator = app.validators.acquire({}) catch unreachable,
109 | };
110 |
111 | const ctx = allocator.create(Context) catch unreachable;
112 | ctx.* = .{
113 | ._arena = arena,
114 | .app = app,
115 | .env = env,
116 | .arena = aa,
117 | .web = web.init(.{}),
118 | };
119 | return ctx;
120 | }
121 |
122 | pub const Context = struct {
123 | _arena: *std.heap.ArenaAllocator,
124 | app: *dproxy.App,
125 | env: *dproxy.Env,
126 | web: web.Testing,
127 | arena: std.mem.Allocator,
128 |
129 | const Config = struct {
130 | with_wrap: bool = false,
131 | max_limit: ?u32 = null,
132 | };
133 |
134 | pub fn deinit(self: *Context) void {
135 | self.env.logger.release();
136 | self.app.validators.release(self.env.validator);
137 |
138 | self.web.deinit();
139 | self.app.deinit();
140 |
141 | self._arena.deinit();
142 | allocator.destroy(self._arena);
143 | allocator.destroy(self);
144 | }
145 |
146 | pub fn expectInvalid(self: Context, expectation: anytype) !void {
147 | return validate.testing.expectInvalid(expectation, self.env.validator);
148 | }
149 |
150 | pub fn reset(self: *Context) void {
151 | self.env.validator.reset();
152 | self.web.deinit();
153 | self.web = web.init(.{});
154 | }
155 |
156 | pub fn getRow(self: *Context, sql: [:0]const u8, values: anytype) ?zuckdb.OwningRow {
157 | var conn = self.app.dbs.acquire() catch unreachable;
158 | defer self.app.dbs.release(conn);
159 |
160 | return conn.row(sql, values) catch |err| {
161 | std.log.err("GetRow: {s}\nErr: {s}", .{sql, conn.err orelse @errorName(err)});
162 | unreachable;
163 | } orelse return null;
164 | }
165 |
166 | pub fn handlerError(self: *Context, err: anyerror) void {
167 | switch (err) {
168 | error.Validation => self.env.validator.dump() catch unreachable,
169 | else => std.debug.print("Unexpected handler error: {any}:\n", .{err}),
170 | }
171 | unreachable;
172 | }
173 | };
174 |
--------------------------------------------------------------------------------
/src/version.txt:
--------------------------------------------------------------------------------
1 | local-dev
2 |
--------------------------------------------------------------------------------
/src/web/sql/_sql.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const validate = @import("validate");
3 |
4 | pub const web = @import("../web.zig");
5 | pub const dproxy = web.dproxy;
6 |
7 | // expose handlers
8 | const _exec = @import("exec.zig");
9 | pub const exec = _exec.handler;
10 |
11 | pub fn init(builder: *validate.Builder(void), max_parameters: ?u32) !void {
12 | try _exec.init(builder, max_parameters);
13 | }
14 |
--------------------------------------------------------------------------------
/src/web/sql/exec.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const zul = @import("zul");
3 | const logz = @import("logz");
4 | const httpz = @import("httpz");
5 | const typed = @import("typed");
6 | const zuckdb = @import("zuckdb");
7 | const validate = @import("validate");
8 | const Buffer = @import("buffer").Buffer;
9 |
10 | const base = @import("_sql.zig");
11 |
12 | const dproxy = base.dproxy;
13 | const Env = dproxy.Env;
14 | const Parameter = dproxy.Parameter;
15 | const Allocator = std.mem.Allocator;
16 |
17 | var exec_validator: *validate.Object(void) = undefined;
18 | pub fn init(builder: *validate.Builder(void), max_parameters: ?u32) !void {
19 | exec_validator = builder.object(&.{
20 | builder.field("sql", builder.string(.{
21 | .min = 1,
22 | .max = 10_000,
23 | .required = true,
24 | })),
25 | builder.field("params", builder.array(null, .{.max = if (max_parameters) |max| @intCast(max) else null})),
26 | }, .{});
27 | }
28 |
29 | pub fn handler(env: *Env, req: *httpz.Request, res: *httpz.Response) !void {
30 | const input = try base.web.validateBody(env, req, exec_validator);
31 |
32 | const aa = res.arena;
33 | const sql = input.get("sql").?.string;
34 | const params = if (input.get("params")) |p| p.array.items else &[_]typed.Value{};
35 |
36 | var validator = env.validator;
37 |
38 | const app = env.app;
39 |
40 | // The zuckdb library is going to dupeZ the SQL to get a null-terminated string
41 | // We might as well do this with our arena allocator.
42 | var buf = try app.buffer_pool.acquire();
43 | defer buf.release();
44 |
45 | const sql_string = switch (app.with_wrap) {
46 | false => sql,
47 | true => blk: {
48 | try buf.ensureTotalCapacity(sql.len + 50);
49 | buf.writeAssumeCapacity("with _dproxy as (");
50 | // if we're wrapping, we need to strip any trailing ; to keep it a valid SQL
51 | buf.writeAssumeCapacity(stripTrailingSemicolon(sql));
52 | buf.writeAssumeCapacity(") select * from _dproxy");
53 | if (app.max_limit) |l| {
54 | buf.writeAssumeCapacity(l);
55 | }
56 | break :blk buf.string();
57 | },
58 | };
59 |
60 | var conn = try app.dbs.acquire();
61 | defer app.dbs.release(conn);
62 |
63 | var stmt = conn.prepare(sql_string, .{}) catch |err| switch (err) {
64 | error.DuckDBError => {
65 | validator.addInvalidField(.{
66 | .field = "sql",
67 | .err = if (conn.err) |ce| try aa.dupe(u8, ce) else "invalid sql",
68 | .code = dproxy.val.INVALID_SQL,
69 | });
70 | return error.Validation;
71 | },
72 | else => return err,
73 | };
74 | defer stmt.deinit();
75 |
76 | const parameter_count = stmt.numberOfParameters();
77 | if (parameter_count != params.len) {
78 | return Parameter.invalidParameterCount(aa, parameter_count, params.len, validator);
79 | }
80 |
81 | for (params, 0..) |param, i| {
82 | try Parameter.validateAndBind(aa, i, stmt, param, validator);
83 | }
84 | if (!validator.isValid()) {
85 | return error.Validation;
86 | }
87 |
88 | var rows = stmt.query(null) catch |err| switch (err) {
89 | error.DuckDBError => {
90 | validator.addInvalidField(.{
91 | .field = "sql",
92 | .err = if (conn.err) |ce| try aa.dupe(u8, ce) else "invalid sql",
93 | .code = dproxy.val.INVALID_SQL,
94 | });
95 | return error.Validation;
96 | },
97 | else => return err,
98 | };
99 | defer rows.deinit();
100 |
101 | res.content_type = .JSON;
102 |
103 | // AFAIC, DuckDB's API is broken when trying to get the changed rows. There's
104 | // a duckdb_rows_changed, but it's really broken. You see, internally, an insert
105 | // or update or delete stores the # of affected rows in the returned result.
106 | // But this, of course, doesn't work when the statement includes a
107 | // "returning...". So on insert/delete/update we can get a row back without
108 | // any way to tell whether it's the result of a "returning ..." or if it's
109 | // the internal changed row (internally called CHANGED_ROWS result).
110 | // What's worse is that if we call `duckdb_rows_changed`, it'll consume the
111 | // first row of the result, whether it's a `CHANGED_ROWS` or a real row from
112 | // returning ....
113 |
114 | // Despite the above (or maybe because of it), we're going to special-case
115 | // a result with a single column, of type i64 (DUCKDB_TYPE_BIGINT) where the
116 | // column name is "Count". This is a common case: it's the result from a
117 | // insert/update/delete without a "returning". It's still ambiguous: maybe
118 | // the statement had "returning Count"; - we can't tell. But it doesn't matter
119 | // even if it IS a returning, it'll be handled the same way
120 | if (isSingleI64Result(&rows) == true) {
121 | const column_name = rows.columnName(0);
122 | // column_name is a [*c]const u8, hence this unlooped comparison
123 | if (column_name[0] == 'C' and column_name[1] == 'o' and column_name[2] == 'u' and column_name[3] == 'n' and column_name[4] == 't' and column_name[5] == 0) {
124 | var optional_count: ?i64 = 0;
125 | if (try rows.next()) |row| {
126 | optional_count = row.get(?i64, 0);
127 | }
128 | const count = optional_count orelse {
129 | res.body = "{\"cols\":[\"Count\"],\"rows\":[[null]]}";
130 | return;
131 | };
132 |
133 | if (count == 0) {
134 | // further special case count == 0 (very common)
135 | res.body = "{\"cols\":[\"Count\"],\"rows\":[[0]]}";
136 | } else if (count == 1) {
137 | // further special case count == 1 (very common)
138 | res.body = "{\"cols\":[\"Count\"],\"rows\":[[1]]}";
139 | } else {
140 | res.body = try std.fmt.allocPrint(aa, "{{\"cols\":[\"Count\"],\"rows\":[[{d}]]}}", .{count});
141 | }
142 | return;
143 | }
144 | }
145 |
146 | const logger = env.logger;
147 |
148 | res.content_type = .JSON;
149 | buf.clearRetainingCapacity();
150 | const writer = buf.writer();
151 | const vectors = rows.vectors;
152 | try buf.write("{\n \"cols\": [");
153 | for (0..vectors.len) |i| {
154 | try std.json.encodeJsonString(std.mem.span(rows.columnName(i)), .{}, writer);
155 | try buf.writeByte(',');
156 | }
157 | // strip out the last comma
158 | buf.truncate(1);
159 | try buf.write("],\n \"types\": [");
160 |
161 | for (vectors) |*vector| {
162 | try buf.writeByte('"');
163 | try vector.writeType(writer);
164 | try buf.write("\",");
165 | }
166 | buf.truncate(1);
167 |
168 | const arena = res.arena;
169 | try buf.write("],\n \"rows\": [");
170 | if (try rows.next()) |first_row| {
171 | try buf.write("\n [");
172 | try writeRow(arena, &first_row, buf, vectors, logger);
173 |
174 | var row_count: usize = 1;
175 | while (try rows.next()) |row| {
176 | try buf.write("],\n [");
177 | try writeRow(arena, &row, buf, vectors, logger);
178 | if (@mod(row_count, 50) == 0) {
179 | try res.chunk(buf.string());
180 | buf.clearRetainingCapacity();
181 | }
182 | row_count += 1;
183 | }
184 | try buf.writeByte(']');
185 | }
186 | try buf.write("\n]\n}");
187 | try res.chunk(buf.string());
188 | }
189 |
190 | fn writeRow(allocator: Allocator, row: *const zuckdb.Row, buf: *zul.StringBuilder, vectors: []zuckdb.Vector, logger: logz.Logger) !void {
191 | const writer = buf.writer();
192 |
193 | for (vectors, 0..) |*vector, i| {
194 | switch (vector.type) {
195 | .list => |list_vector| {
196 | const list = row.lazyList(i) orelse {
197 | try buf.write("null,");
198 | continue;
199 | };
200 | if (list.len == 0) {
201 | try buf.write("[],");
202 | continue;
203 | }
204 |
205 | const child_type = list_vector.child;
206 | try buf.writeByte('[');
207 | for (0..list.len) |list_index| {
208 | try translateScalar(allocator, &list, child_type, list_index, writer, logger);
209 | try buf.writeByte(',');
210 | }
211 | // overwrite the last trailing comma
212 | buf.truncate(1);
213 | try buf.write("],");
214 | },
215 | .scalar => |s| {
216 | try translateScalar(allocator, row, s, i, writer, logger);
217 | try buf.writeByte(',');
218 | }
219 | }
220 | }
221 | // overwrite the last trailing comma
222 | buf.truncate(1);
223 | }
224 |
225 | // src can either be a zuckdb.Row or a zuckdb.LazyList
226 | fn translateScalar(allocator: Allocator, src: anytype, column_type: zuckdb.Vector.Type.Scalar, i: usize, writer: anytype, logger: logz.Logger) !void {
227 | if (src.isNull(i)) {
228 | return writer.writeAll("null");
229 | }
230 |
231 | switch (column_type) {
232 | .decimal => try std.fmt.format(writer, "{d}", .{src.get(f64, i)}),
233 | .@"enum" => try std.json.encodeJsonString(try src.get(zuckdb.Enum, i).rowCache(), .{}, writer),
234 | .simple => |s| switch (s) {
235 | zuckdb.c.DUCKDB_TYPE_VARCHAR => try std.json.encodeJsonString(src.get([]const u8, i), .{}, writer),
236 | zuckdb.c.DUCKDB_TYPE_BOOLEAN => try writer.writeAll(if (src.get(bool, i)) "true" else "false"),
237 | zuckdb.c.DUCKDB_TYPE_TINYINT => try std.fmt.formatInt(src.get(i8, i), 10, .lower, .{}, writer),
238 | zuckdb.c.DUCKDB_TYPE_SMALLINT => try std.fmt.formatInt(src.get(i16, i), 10, .lower, .{}, writer),
239 | zuckdb.c.DUCKDB_TYPE_INTEGER => try std.fmt.formatInt(src.get(i32, i), 10, .lower, .{}, writer),
240 | zuckdb.c.DUCKDB_TYPE_BIGINT => try std.fmt.formatInt(src.get(i64, i), 10, .lower, .{}, writer),
241 | zuckdb.c.DUCKDB_TYPE_HUGEINT => try std.fmt.formatInt(src.get(i128, i), 10, .lower, .{}, writer),
242 | zuckdb.c.DUCKDB_TYPE_UTINYINT => try std.fmt.formatInt(src.get(u8, i), 10, .lower, .{}, writer),
243 | zuckdb.c.DUCKDB_TYPE_USMALLINT => try std.fmt.formatInt(src.get(u16, i), 10, .lower, .{}, writer),
244 | zuckdb.c.DUCKDB_TYPE_UINTEGER => try std.fmt.formatInt(src.get(u32, i), 10, .lower, .{}, writer),
245 | zuckdb.c.DUCKDB_TYPE_UBIGINT => try std.fmt.formatInt(src.get(u64, i), 10, .lower, .{}, writer),
246 | zuckdb.c.DUCKDB_TYPE_UHUGEINT => try std.fmt.formatInt(src.get(u128, i), 10, .lower, .{}, writer),
247 | zuckdb.c.DUCKDB_TYPE_FLOAT => try std.fmt.format(writer, "{d}", .{src.get(f32, i)}),
248 | zuckdb.c.DUCKDB_TYPE_DOUBLE => try std.fmt.format(writer, "{d}", .{src.get(f64, i)}),
249 | zuckdb.c.DUCKDB_TYPE_UUID => try std.json.encodeJsonString(&src.get(zuckdb.UUID, i), .{}, writer),
250 | zuckdb.c.DUCKDB_TYPE_DATE => {
251 | // std.fmt's integer formatting is broken when dealing with signed integers
252 | // we use our own formatter
253 | // https://github.com/ziglang/zig/issues/19488
254 | const date = src.get(zuckdb.Date, i);
255 | try std.fmt.format(writer, "\"{d}-{s}-{s}\"", .{date.year, paddingTwoDigits(date.month), paddingTwoDigits(date.day)});
256 | },
257 | zuckdb.c.DUCKDB_TYPE_TIME => {
258 | // std.fmt's integer formatting is broken when dealing with signed integers
259 | // we use our own formatter. But for micros, I'm lazy and cast it to unsigned,
260 | // which std.fmt handles better.
261 | const time = src.get(zuckdb.Time, i);
262 | try std.fmt.format(writer, "\"{s}:{s}:{s}.{d:6>0}\"", .{paddingTwoDigits(time.hour), paddingTwoDigits(time.min), paddingTwoDigits(time.sec), @as(u32, @intCast(time.micros))});
263 | },
264 | zuckdb.c.DUCKDB_TYPE_TIMESTAMP, zuckdb.c.DUCKDB_TYPE_TIMESTAMP_TZ => try std.fmt.formatInt(src.get(i64, i), 10, .lower, .{}, writer),
265 | zuckdb.c.DUCKDB_TYPE_INTERVAL => {
266 | const interval = src.get(zuckdb.Interval, i);
267 | try std.fmt.format(writer, "{{\"months\":{d},\"days\":{d},\"micros\":{d}}}", .{interval.months, interval.days, interval.micros});
268 | },
269 | zuckdb.c.DUCKDB_TYPE_BIT => try std.json.encodeJsonString(try zuckdb.bitToString(allocator, src.get([]u8, i)), .{}, writer),
270 | zuckdb.c.DUCKDB_TYPE_BLOB => {
271 | const v = src.get([]const u8, i);
272 | const encoder = std.base64.standard.Encoder;
273 | const out = try allocator.alloc(u8, encoder.calcSize(v.len));
274 | try std.json.encodeJsonString(encoder.encode(out, v), .{}, writer);
275 | },
276 | else => |duckdb_type| {
277 | try writer.writeAll("\"???\"");
278 | logger.level(.Warn).ctx("serialize.unknown_type").int("duckdb_type", duckdb_type).log();
279 | }
280 | },
281 | }
282 | }
283 |
284 | fn writeVarcharType(result: *zuckdb.c.duckdb_result, column_index: usize, buf: *zul.StringBuilder) !void {
285 | var logical_type = zuckdb.c.duckdb_column_logical_type(result, column_index);
286 | defer zuckdb.c.duckdb_destroy_logical_type(&logical_type);
287 | const alias = zuckdb.c.duckdb_logical_type_get_alias(logical_type);
288 | if (alias == null) {
289 | return buf.write("varchar");
290 | }
291 | defer zuckdb.c.duckdb_free(alias);
292 | return buf.write(std.mem.span(alias));
293 | }
294 |
295 | fn paddingTwoDigits(value: i8) [2]u8 {
296 | std.debug.assert(value < 61 and value > 0);
297 | const digits = "0001020304050607080910111213141516171819" ++
298 | "2021222324252627282930313233343536373839" ++
299 | "4041424344454647484950515253545556575859" ++
300 | "60";
301 | const index: usize = @intCast(value);
302 | return digits[index * 2 ..][0..2].*;
303 | }
304 |
305 | fn isSingleI64Result(rows: *const zuckdb.Rows) bool {
306 | if (rows.column_count != 1) {
307 | return false;
308 | }
309 | if (rows.count() != 1) {
310 | return false;
311 | }
312 |
313 | switch (rows.vectors[0].type) {
314 | .scalar => |s| switch (s) {
315 | .simple => |duckdb_type| return duckdb_type == zuckdb.c.DUCKDB_TYPE_BIGINT,
316 | else => return false,
317 | },
318 | else => return false,
319 | }
320 | }
321 |
322 | fn serializeRow(row: []typed.Value, prefix: []const u8, sb: *zul.StringBuilder, writer: anytype) ![]const u8 {
323 | sb.clearRetainingCapacity();
324 | try sb.write(prefix);
325 | try std.json.stringify(row, .{}, writer);
326 | return sb.string();
327 | }
328 |
329 | fn stripTrailingSemicolon(sql: []const u8) []const u8 {
330 | var i : usize = sql.len-1;
331 | while (i >= 0) : (i -= 1) {
332 | if (!std.ascii.isWhitespace(sql[i])) break;
333 | }
334 |
335 | while (i >= 0) : (i -= 1) {
336 | if (sql[i] != ';') break;
337 | }
338 | return sql[0..i+1];
339 | }
340 |
341 | const t = dproxy.testing;
342 | test "exec: invalid json body" {
343 | var tc = t.context(.{});
344 | defer tc.deinit();
345 |
346 | tc.web.body("{hi");
347 | try t.expectError(error.InvalidJson, handler(tc.env, tc.web.req, tc.web.res));
348 | }
349 |
350 | test "exec: invalid input" {
351 | var tc = t.context(.{});
352 | defer tc.deinit();
353 |
354 | tc.web.json(.{.sql = 32, .params = true});
355 | try t.expectError(error.Validation, handler(tc.env, tc.web.req, tc.web.res));
356 | try tc.expectInvalid(.{.code = validate.codes.TYPE_STRING, .field = "sql"});
357 | try tc.expectInvalid(.{.code = validate.codes.TYPE_ARRAY, .field = "params"});
358 | }
359 |
360 | test "exec: invalid sql" {
361 | var tc = t.context(.{});
362 | defer tc.deinit();
363 |
364 | tc.web.json(.{.sql = "update x", });
365 | try t.expectError(error.Validation, handler(tc.env, tc.web.req, tc.web.res));
366 | try tc.expectInvalid(.{.code = dproxy.val.INVALID_SQL, .field = "sql", .err = "Parser Error: syntax error at end of input"});
367 | }
368 |
369 | test "exec: wrong parameters" {
370 | var tc = t.context(.{});
371 | defer tc.deinit();
372 |
373 | {
374 | tc.web.json(.{.sql = "select $1"});
375 | try t.expectError(error.Validation, handler(tc.env, tc.web.req, tc.web.res));
376 | try tc.expectInvalid(.{.code = dproxy.val.WRONG_PARAMETER_COUNT, .field = "params", .err = "SQL statement requires 1 parameter, 0 were given"});
377 | }
378 |
379 | {
380 | // test different plural form
381 | tc.reset();
382 | tc.web.json(.{.sql = "select $1, $2", .params = .{1}});
383 | try t.expectError(error.Validation, handler(tc.env, tc.web.req, tc.web.res));
384 | try tc.expectInvalid(.{.code = dproxy.val.WRONG_PARAMETER_COUNT, .field = "params", .err = "SQL statement requires 2 parameters, 1 was given"});
385 | }
386 | }
387 |
388 | test "exec: invalid parameter value" {
389 | var tc = t.context(.{});
390 | defer tc.deinit();
391 |
392 | tc.web.json(.{.sql = "select $1::bool", .params = .{"abc"}});
393 | try t.expectError(error.Validation, handler(tc.env, tc.web.req, tc.web.res));
394 | try tc.expectInvalid(.{.code = validate.codes.TYPE_BOOL, .field = "params.0"});
395 | }
396 |
397 | test "exec: invalid base64 for blog" {
398 | var tc = t.context(.{});
399 | defer tc.deinit();
400 |
401 | tc.web.json(.{.sql = "select $1::blob", .params = .{"not a blob"}});
402 | try t.expectError(error.Validation, handler(tc.env, tc.web.req, tc.web.res));
403 | try tc.expectInvalid(.{.code = validate.codes.STRING_BASE64, .field = "params.0"});
404 | }
405 |
406 | test "exec: no changes" {
407 | var tc = t.context(.{});
408 | defer tc.deinit();
409 |
410 | tc.web.json(.{.sql = "update everythings set col_integer = 1 where col_varchar = $1", .params = .{"does not exist"}});
411 | try handler(tc.env, tc.web.req, tc.web.res);
412 | try tc.web.expectJson(.{.cols = .{"Count"}, .rows = .{.{0}}});
413 | }
414 |
415 | test "exec: change with no result" {
416 | var tc = t.context(.{});
417 | defer tc.deinit();
418 |
419 | tc.web.json(.{.sql = "insert into everythings (col_varchar) values ($1)", .params = .{"insert no results"}});
420 | try handler(tc.env, tc.web.req, tc.web.res);
421 | try tc.web.expectJson(.{.cols = .{"Count"}, .rows = .{.{1}}});
422 |
423 | const row = tc.getRow("select count(*) as count from everythings where col_varchar = 'insert no results'", .{}).?;
424 | defer row.deinit();
425 | try t.expectEqual(1, row.get(i64, 0));
426 | }
427 |
428 | test "exec: every type" {
429 | var tc = t.context(.{});
430 | defer tc.deinit();
431 |
432 | tc.web.json(.{
433 | .sql =
434 | \\ insert into everythings (
435 | \\ col_tinyint,
436 | \\ col_smallint,
437 | \\ col_integer,
438 | \\ col_bigint,
439 | \\ col_hugeint,
440 | \\ col_utinyint,
441 | \\ col_usmallint,
442 | \\ col_uinteger,
443 | \\ col_ubigint,
444 | \\ col_real,
445 | \\ col_double,
446 | \\ col_decimal,
447 | \\ col_bool,
448 | \\ col_date,
449 | \\ col_time,
450 | \\ col_timestamp,
451 | \\ col_blob,
452 | \\ col_varchar,
453 | \\ col_uuid,
454 | \\ col_json,
455 | \\ col_enum,
456 | \\ col_list_integer,
457 | \\ col_list_varchar,
458 | \\ col_interval,
459 | \\ col_bitstring
460 | \\ ) values (
461 | \\ $1, $2, $3, $4, $5, $6, $7, $8, $9, $10,
462 | \\ $11, $12, $13, $14, $15, $16, $17, $18, $19, $20,
463 | \\ $21, [1, null, 2], ['over', '9000', '!', '!1'],
464 | \\ $22, $23
465 | \\ )
466 | \\ returning *
467 | ,
468 | .params = .{
469 | -32, -991, 3828, -7461123, 383821882392838192832928193,
470 | 255, 65535, 4294967295, 18446744073709551615,
471 | -1.75, 3.1400009, 901.22,
472 | true, "2023-06-20", "13:35:29.332", 1687246572940921,
473 | "dGhpcyBpcyBhIGJsb2I=", "over 9000", "804b6dd4-d23b-4ea0-af2a-e3bf39bca496",
474 | "{\"over\":9000}", "type_b", "45 days", "001010011101"
475 | }
476 | });
477 | handler(tc.env, tc.web.req, tc.web.res) catch |err| tc.handlerError(err);
478 | try tc.web.expectJson(.{
479 | .cols = .{
480 | "col_tinyint",
481 | "col_smallint",
482 | "col_integer",
483 | "col_bigint",
484 | "col_hugeint",
485 |
486 | "col_utinyint",
487 | "col_usmallint",
488 | "col_uinteger",
489 | "col_ubigint",
490 |
491 | "col_real",
492 | "col_double",
493 | "col_decimal",
494 |
495 | "col_bool",
496 | "col_date",
497 | "col_time",
498 | "col_timestamp",
499 |
500 | "col_blob",
501 | "col_varchar",
502 | "col_uuid",
503 | "col_json",
504 | "col_enum",
505 | "col_list_integer",
506 | "col_list_varchar",
507 | "col_interval",
508 | "col_bitstring"
509 | },
510 | .rows = .{.{
511 | -32,
512 | -991,
513 | 3828,
514 | -7461123,
515 | 383821882392838192832928193,
516 |
517 | 255,
518 | 65535,
519 | 4294967295,
520 | 18446744073709551615,
521 |
522 | -1.75,
523 | 3.1400009,
524 | 901.22,
525 |
526 | true,
527 | "2023-06-20",
528 | "13:35:29.332000",
529 | 1687246572940921,
530 |
531 | "dGhpcyBpcyBhIGJsb2I=",
532 | "over 9000",
533 | "804b6dd4-d23b-4ea0-af2a-e3bf39bca496",
534 | "{\"over\":9000}",
535 | "type_b",
536 |
537 | &.{1, null, 2},
538 | &.{"over", "9000", "!", "!1"},
539 |
540 | .{.months = 0, .days = 45, .micros = 0},
541 | "001010011101"
542 | }}
543 | });
544 | }
545 |
546 | // above tested interval as a string, but we also accept it as an object
547 | test "exec: interval as object" {
548 | var tc = t.context(.{});
549 | defer tc.deinit();
550 |
551 | tc.web.json(.{
552 | .sql = "insert into everythings (col_interval) values ($1), ($2), ($3) returning col_interval",
553 | .params = .{.{.months = 0}, .{.months = 33, .days = 91, .micros = 3232958}, .{.days = 5}},
554 | });
555 | handler(tc.env, tc.web.req, tc.web.res) catch |err| tc.handlerError(err);
556 | try tc.web.expectJson(.{
557 | .cols = .{"col_interval"},
558 | .rows = .{
559 | .{.{.months = 0, .days = 0, .micros = 0}},
560 | .{.{.months = 33, .days = 91, .micros = 3232958}},
561 | .{.{.months = 0, .days = 5, .micros = 0}},
562 | }
563 | });
564 | }
565 |
566 | test "exec: returning multiple rows" {
567 | var tc = t.context(.{});
568 | defer tc.deinit();
569 |
570 | tc.web.json(.{
571 | .sql = "insert into everythings (col_tinyint) values (1), (2), (3) returning col_tinyint",
572 | .params = .{}
573 | });
574 | handler(tc.env, tc.web.req, tc.web.res) catch |err| tc.handlerError(err);
575 | try tc.web.expectJson(.{
576 | .cols = .{"col_tinyint"},
577 | .rows = .{.{1}, .{2}, .{3}}
578 | });
579 | }
580 |
581 | // we have special case handling for a single row returned as an i64 "Count"
582 | test "exec: special count case" {
583 | var tc = t.context(.{});
584 | defer tc.deinit();
585 |
586 | {
587 | tc.web.json(.{.sql = "insert into everythings (col_bigint) values (0) returning col_bigint as Count"});
588 | handler(tc.env, tc.web.req, tc.web.res) catch |err| tc.handlerError(err);
589 | try tc.web.expectJson(.{.cols = .{"Count"}, .rows = .{.{0}}});
590 | }
591 |
592 | {
593 | tc.reset();
594 | tc.web.json(.{.sql = "insert into everythings (col_bigint) values (1) returning col_bigint as \"Count\"",});
595 | handler(tc.env, tc.web.req, tc.web.res) catch |err| tc.handlerError(err);
596 | try tc.web.expectJson(.{.cols = .{"Count"}, .rows = .{.{1}}});
597 | }
598 |
599 | {
600 | tc.reset();
601 | tc.web.json(.{.sql = "insert into everythings (col_bigint) values (1) returning col_bigint as \"Count\"",});
602 | handler(tc.env, tc.web.req, tc.web.res) catch |err| tc.handlerError(err);
603 | try tc.web.expectJson(.{.cols = .{"Count"}, .rows = .{.{1}}});
604 | }
605 |
606 | {
607 | tc.reset();
608 | tc.web.json(.{.sql = "insert into everythings (col_bigint) values (null) returning col_bigint as \"Count\"",});
609 | handler(tc.env, tc.web.req, tc.web.res) catch |err| tc.handlerError(err);
610 | try tc.web.expectJson(.{.cols = .{"Count"}, .rows = .{.{null}}});
611 | }
612 | }
613 |
614 | test "exec: with_wrap" {
615 | var tc = t.context(.{.with_wrap = true});
616 | defer tc.deinit();
617 |
618 | {
619 | // a select statement can be executed, no problem
620 | tc.web.json(.{.sql = "select 1 as x",});
621 | handler(tc.env, tc.web.req, tc.web.res) catch |err| tc.handlerError(err);
622 | try tc.web.expectJson(.{.cols = .{"x"}, .rows = .{.{1}}});
623 | }
624 |
625 | {
626 | // semicolon ok
627 | tc.reset();
628 | tc.web.json(.{.sql = "select 1 as x;",});
629 | handler(tc.env, tc.web.req, tc.web.res) catch |err| tc.handlerError(err);
630 | try tc.web.expectJson(.{.cols = .{"x"}, .rows = .{.{1}}});
631 | }
632 |
633 | {
634 | // semicolon with spacing
635 | tc.reset();
636 | tc.web.json(.{.sql = "select 1 as x ; \t\n ",});
637 | handler(tc.env, tc.web.req, tc.web.res) catch |err| tc.handlerError(err);
638 | try tc.web.expectJson(.{.cols = .{"x"}, .rows = .{.{1}}});
639 | }
640 |
641 | {
642 | // nested CTE, can lah!
643 | tc.reset();
644 | tc.web.json(.{.sql = "with x as (select 3 as y) select * from x union all select 4",});
645 | handler(tc.env, tc.web.req, tc.web.res) catch |err| tc.handlerError(err);
646 | try tc.web.expectJson(.{.cols = .{"y"}, .rows = .{.{3}, .{4}}});
647 | }
648 |
649 | {
650 | // other statements cannot
651 | tc.reset();
652 | tc.web.json(.{.sql = " \n DEscribe select 1 as x"});
653 | try t.expectError(error.Validation, handler(tc.env, tc.web.req, tc.web.res));
654 | try tc.expectInvalid(.{.code = dproxy.val.INVALID_SQL, .field = "sql"});
655 | }
656 |
657 | {
658 | // other statements cannot
659 | tc.reset();
660 | tc.web.json(.{.sql = "delete from everythings"});
661 | try t.expectError(error.Validation, handler(tc.env, tc.web.req, tc.web.res));
662 | try tc.expectInvalid(.{.code = dproxy.val.INVALID_SQL, .field = "sql"});
663 | }
664 |
665 | {
666 | // non describable
667 | tc.reset();
668 | tc.web.json(.{.sql = "begin"});
669 | try t.expectError(error.Validation, handler(tc.env, tc.web.req, tc.web.res));
670 | try tc.expectInvalid(.{.code = dproxy.val.INVALID_SQL, .field = "sql"});
671 | }
672 | }
673 |
674 | test "exec: max_limit" {
675 | var tc = t.context(.{.max_limit = 2});
676 | defer tc.deinit();
677 |
678 | {
679 | tc.web.json(.{.sql = "select 1 as x union all select 2",});
680 | handler(tc.env, tc.web.req, tc.web.res) catch |err| tc.handlerError(err);
681 | try tc.web.expectJson(.{.cols = .{"x"}, .rows = .{.{1}, .{2}}});
682 | }
683 |
684 | {
685 | tc.reset();
686 | tc.web.json(.{.sql = "select 1 as x union all select 2 union all select 3",});
687 | handler(tc.env, tc.web.req, tc.web.res) catch |err| tc.handlerError(err);
688 | try tc.web.expectJson(.{.cols = .{"x"}, .rows = .{.{1}, .{2}}});
689 | }
690 | }
691 |
--------------------------------------------------------------------------------
/src/web/web.zig:
--------------------------------------------------------------------------------
1 | const std = @import("std");
2 | const zul = @import("zul");
3 | const logz = @import("logz");
4 | const httpz = @import("httpz");
5 | const typed = @import("typed");
6 | const validate = @import("validate");
7 |
8 | const sql = @import("sql/_sql.zig");
9 | pub const dproxy = @import("../dproxy.zig");
10 |
11 | const App = dproxy.App;
12 | const Env = dproxy.Env;
13 | const Allocator = std.mem.Allocator;
14 |
15 | var _request_id: u32 = 0;
16 |
17 | pub fn start(app: *App) !void {
18 | {
19 | var seed: u64 = undefined;
20 | try std.posix.getrandom(std.mem.asBytes(&seed));
21 | var r = std.rand.DefaultPrng.init(seed);
22 | // request_id is allowed to have duplicates, but we'd like to minimize
23 | // that especially around deploys/restarts.
24 | _request_id = r.random().uintAtMost(u32, 10_000_000);
25 | }
26 |
27 | var server = try httpz.ServerCtx(*App, *Env).init(app.allocator, app.config.http, app);
28 | server.dispatcher(dispatcher);
29 | server.notFound(notFound);
30 | server.errorHandler(errorHandler);
31 |
32 | var router = server.router();
33 | router.post("/api/1/exec", sql.exec);
34 |
35 | // Allow this to be called with an :id parameter, the only purpose of this is
36 | // for improving the URL attribute of the log message
37 | router.post("/api/1/exec/:id", sql.exec);
38 |
39 | const http_address = try std.fmt.allocPrint(app.allocator, "http://{s}:{d}", .{server.config.address.?, server.config.port.?});
40 | logz.info().ctx("http.listener").string("address", http_address).string("db_path", app.config.db.path).log();
41 | app.allocator.free(http_address);
42 |
43 | try server.listen();
44 | }
45 |
46 | fn dispatcher(app: *App, action: httpz.Action(*Env), req: *httpz.Request, res: *httpz.Response) !void {
47 | const start_time = std.time.microTimestamp();
48 |
49 | const validator = try app.validators.acquire({});
50 | defer app.validators.release(validator);
51 |
52 | const encoded_request_id = try encodeRequestId(res.arena, app.config.instance_id, @atomicRmw(u32, &_request_id, .Add, 1, .monotonic));
53 | res.header("request-id", encoded_request_id);
54 |
55 | var logger = logz.logger().string("$rid", encoded_request_id).multiuse();
56 | defer logger.release();
57 |
58 | var env = Env{
59 | .app = app,
60 | .logger = logger,
61 | .validator = validator,
62 | };
63 |
64 | action(&env, req, res) catch |err| switch (err) {
65 | error.Validation => {
66 | res.status = 400;
67 | const code = dproxy.codes.VALIDATION_ERROR;
68 | try res.json(.{
69 | .err = "validation error",
70 | .code = code,
71 | .validation = validator.errors(),
72 | }, .{.emit_null_optional_fields = false});
73 | _ = logger.int("code", code);
74 | },
75 | error.InvalidJson => {
76 | _ = logger.int("code", errors.InvalidJson.code);
77 | errors.InvalidJson.write(res);
78 | },
79 | error.ReadOnly => {
80 | _ = logger.int("code", errors.ReadOnly.code);
81 | errors.ReadOnly.write(res);
82 | },
83 | else => {
84 | const error_id = zul.UUID.v4().toHex(.lower);
85 | logger.level(.Error).
86 | ctx("http.err").
87 | err(err).
88 | stringSafe("eid", &error_id).
89 | stringSafe("method", @tagName(req.method)).
90 | string("path", req.url.path).
91 | log();
92 |
93 | res.status = 500;
94 | try res.json(.{
95 | .err = "internal server error",
96 | .code = dproxy.codes.INTERNAL_SERVER_ERROR_CAUGHT,
97 | .error_id = &error_id,
98 | }, .{});
99 | }
100 | };
101 |
102 | if (app.log_http) {
103 | logger.
104 | stringSafe("@l", "REQ").
105 | int("status", res.status).
106 | stringSafe("method", @tagName(req.method)).
107 | string("path", req.url.path).
108 | int("us", std.time.microTimestamp() - start_time).
109 | log();
110 | }
111 | }
112 |
113 | pub fn validateBody(env: *Env, req: *httpz.Request, v: *validate.Object(void)) !typed.Map {
114 | const body = req.body() orelse return error.InvalidJson;
115 |
116 | const validator = env.validator;
117 | const input = try v.validateJsonS(body, validator);
118 | if (!validator.isValid()) {
119 | return error.Validation;
120 |
121 | }
122 | return input;
123 | }
124 |
125 | pub const Error = struct {
126 | code: i32,
127 | status: u16,
128 | body: []const u8,
129 |
130 | fn init(status: u16, comptime code: i32, comptime message: []const u8) Error {
131 | const body = std.fmt.comptimePrint("{{\"code\": {d}, \"err\": \"{s}\"}}", .{code, message});
132 | return .{
133 | .code = code,
134 | .body = body,
135 | .status = status,
136 | };
137 | }
138 |
139 | pub fn write(self: Error, res: *httpz.Response) void {
140 | res.status = self.status;
141 | res.content_type = httpz.ContentType.JSON;
142 | res.body = self.body;
143 | }
144 | };
145 |
146 | // bunch of static errors that we can serialize at comptime
147 | pub const errors = struct {
148 | pub const ServerError = Error.init(500, dproxy.codes.INTERNAL_SERVER_ERROR_UNCAUGHT, "internal server error");
149 | pub const NotFound = Error.init(404, dproxy.codes.NOT_FOUND, "not found");
150 | pub const InvalidJson = Error.init(400, dproxy.codes.INVALID_JSON, "invalid JSON");
151 | pub const ReadOnly = Error.init(409, dproxy.codes.READONLY, "database is configured for read-only operations");
152 | };
153 |
154 | fn notFound(_: *const App, _: *httpz.Request, res: *httpz.Response) !void {
155 | errors.NotFound.write(res);
156 | }
157 |
158 | fn errorHandler(_: *const App, req: *httpz.Request, res: *httpz.Response, err: anyerror) void {
159 | logz.err().err(err).ctx("errorHandler").string("path", req.url.raw).log();
160 | errors.ServerError.write(res);
161 | }
162 |
163 | fn encodeRequestId(allocator: Allocator, instance_id: u8, request_id: u32) ![]u8 {
164 | const REQUEST_ID_ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567";
165 | const encoded_requested_id = std.mem.asBytes(&request_id);
166 |
167 | var encoded = try allocator.alloc(u8, 8);
168 | encoded[7] = REQUEST_ID_ALPHABET[instance_id&0x1F];
169 | encoded[6] = REQUEST_ID_ALPHABET[(instance_id>>5|(encoded_requested_id[0]<<3))&0x1F];
170 | encoded[5] = REQUEST_ID_ALPHABET[(encoded_requested_id[0]>>2)&0x1F];
171 | encoded[4] = REQUEST_ID_ALPHABET[(encoded_requested_id[0]>>7|(encoded_requested_id[1]<<1))&0x1F];
172 | encoded[3] = REQUEST_ID_ALPHABET[((encoded_requested_id[1]>>4)|(encoded_requested_id[2]<<4))&0x1F];
173 | encoded[2] = REQUEST_ID_ALPHABET[(encoded_requested_id[2]>>1)&0x1F];
174 | encoded[1] = REQUEST_ID_ALPHABET[((encoded_requested_id[2]>>6)|(encoded_requested_id[3]<<2))&0x1F];
175 | encoded[0] = REQUEST_ID_ALPHABET[encoded_requested_id[3]>>3];
176 | return encoded;
177 | }
178 |
179 | const t = dproxy.testing;
180 |
181 | test "dispatcher: encodeRequestId" {
182 | var tc = t.context(.{});
183 | defer tc.deinit();
184 |
185 | try t.expectEqual("AAAAAAYA", try encodeRequestId(tc.arena, 0, 3));
186 | try t.expectEqual("AAAAABAA", try encodeRequestId(tc.arena, 0, 4));
187 | try t.expectEqual("AAAAAAYC", try encodeRequestId(tc.arena, 2, 3));
188 | try t.expectEqual("AAAAABAC", try encodeRequestId(tc.arena, 2, 4));
189 | }
190 |
191 | test "web.dispatch: invalid json" {
192 | var tc = t.context(.{});
193 | defer tc.deinit();
194 |
195 | try dispatcher(tc.app, testInvalidJsonAction, tc.web.req, tc.web.res);
196 | try tc.web.expectStatus(400);
197 | try tc.web.expectJson(.{.code = 10, .err = "invalid JSON"});
198 | }
199 |
200 | test "web.dispatch: failed validation" {
201 | var tc = t.context(.{});
202 | defer tc.deinit();
203 |
204 | try dispatcher(tc.app, testValidationFailAction, tc.web.req, tc.web.res);
205 | try tc.web.expectStatus(400);
206 | try tc.web.expectJson(.{.code = 11, .validation = &.{.{.code = 322, .err = "it cannot be done"}}});
207 | }
208 |
209 | test "web.dispatch: generic action error" {
210 | t.noLogs();
211 | defer t.restoreLogs();
212 |
213 | var tc = t.context(.{});
214 | defer tc.deinit();
215 |
216 | try dispatcher(tc.app, testErrorAction, tc.web.req, tc.web.res);
217 | try tc.web.expectStatus(500);
218 | try tc.web.expectJson(.{.code = 1, .err = "internal server error"});
219 | }
220 |
221 | test "web.dispatch: success" {
222 | var tc = t.context(.{});
223 | defer tc.deinit();
224 | try dispatcher(tc.app, testSuccessAction, tc.web.req, tc.web.res);
225 | try tc.web.expectStatus(200);
226 | try tc.web.expectJson(.{.success = true, .over = 9000});
227 | }
228 |
229 | fn testInvalidJsonAction(_: *Env, _: *httpz.Request, _: *httpz.Response) !void {
230 | return error.InvalidJson;
231 | }
232 |
233 | fn testValidationFailAction(env: *Env, _: *httpz.Request, _: *httpz.Response) !void {
234 | try env.validator.add(.{.code = 322, .err = "it cannot be done"});
235 | return error.Validation;
236 | }
237 |
238 | fn testErrorAction(_: *Env, _: *httpz.Request, _: *httpz.Response) !void {
239 | return error.ErrorAction;
240 | }
241 |
242 | fn testSuccessAction(_: *Env, _: *httpz.Request, res: *httpz.Response) !void {
243 | return res.json(.{.success = true, .over = 9000}, .{});
244 | }
245 |
--------------------------------------------------------------------------------
/test_runner.zig:
--------------------------------------------------------------------------------
1 | // in your build.zig, you can specify a custom test runner:
2 | // const tests = b.addTest(.{
3 | // .target = target,
4 | // .optimize = optimize,
5 | // .test_runner = "test_runner.zig", // add this line
6 | // .root_source_file = .{ .path = "src/main.zig" },
7 | // });
8 |
9 | const std = @import("std");
10 | const builtin = @import("builtin");
11 |
12 | const Allocator = std.mem.Allocator;
13 |
14 | const BORDER = "=" ** 80;
15 |
16 | // use in custom panic handler
17 | var current_test: ?[]const u8 = null;
18 |
19 | pub fn main() !void {
20 | var mem: [4096]u8 = undefined;
21 | var fba = std.heap.FixedBufferAllocator.init(&mem);
22 |
23 | const allocator = fba.allocator();
24 |
25 | const env = Env.init(allocator);
26 | defer env.deinit(allocator);
27 |
28 | var slowest = SlowTracker.init(allocator, 5);
29 | defer slowest.deinit();
30 |
31 | var pass: usize = 0;
32 | var fail: usize = 0;
33 | var skip: usize = 0;
34 | var leak: usize = 0;
35 |
36 | const printer = Printer.init();
37 | printer.fmt("\r\x1b[0K", .{}); // beginning of line and clear to end of line
38 |
39 | for (builtin.test_functions) |t| {
40 | std.testing.allocator_instance = .{};
41 | var status = Status.pass;
42 | slowest.startTiming();
43 |
44 | const is_unnamed_test = std.mem.endsWith(u8, t.name, ".test_0");
45 | if (env.filter) |f| {
46 | if (!is_unnamed_test and std.mem.indexOf(u8, t.name, f) == null) {
47 | continue;
48 | }
49 | }
50 |
51 | const friendly_name = blk: {
52 | const name = t.name;
53 | var it = std.mem.splitScalar(u8, name, '.');
54 | while (it.next()) |value| {
55 | if (std.mem.eql(u8, value, "test")) {
56 | const rest = it.rest();
57 | break :blk if (rest.len > 0) rest else name;
58 | }
59 | }
60 | break :blk name;
61 | };
62 |
63 | current_test = friendly_name;
64 | const result = t.func();
65 | current_test = null;
66 |
67 | if (is_unnamed_test) {
68 | continue;
69 | }
70 |
71 | const ns_taken = slowest.endTiming(friendly_name);
72 |
73 | if (std.testing.allocator_instance.deinit() == .leak) {
74 | leak += 1;
75 | printer.status(.fail, "\n{s}\n\"{s}\" - Memory Leak\n{s}\n", .{BORDER, friendly_name, BORDER});
76 | }
77 |
78 | if (result) |_| {
79 | pass += 1;
80 | } else |err| switch (err) {
81 | error.SkipZigTest => {
82 | skip += 1;
83 | status = .skip;
84 | },
85 | else => {
86 | status = .fail;
87 | fail += 1;
88 | printer.status(.fail, "\n{s}\n\"{s}\" - {s}\n{s}\n", .{BORDER, friendly_name, @errorName(err), BORDER});
89 | if (@errorReturnTrace()) |trace| {
90 | std.debug.dumpStackTrace(trace.*);
91 | }
92 | if (env.fail_first) {
93 | break;
94 | }
95 | }
96 | }
97 |
98 | if (env.verbose) {
99 | const ms = @as(f64, @floatFromInt(ns_taken)) / 100_000.0;
100 | printer.status(status, "{s} ({d:.2}ms)\n", .{friendly_name, ms});
101 | } else {
102 | printer.status(status, ".", .{});
103 | }
104 | }
105 |
106 | const total_tests = pass + fail;
107 | const status = if (fail == 0) Status.pass else Status.fail;
108 | printer.status(status, "\n{d} of {d} test{s} passed\n", .{pass, total_tests, if (total_tests != 1) "s" else ""});
109 | if (skip > 0) {
110 | printer.status(.skip, "{d} test{s} skipped\n", .{skip, if (skip != 1) "s" else ""});
111 | }
112 | if (leak > 0) {
113 | printer.status(.fail, "{d} test{s} leaked\n", .{leak, if (leak != 1) "s" else ""});
114 | }
115 | printer.fmt("\n", .{});
116 | try slowest.display(printer);
117 | printer.fmt("\n", .{});
118 | std.posix.exit(if (fail == 0) 0 else 1);
119 | }
120 |
121 | const Printer = struct {
122 | out: std.fs.File.Writer,
123 |
124 | fn init() Printer {
125 | return .{
126 | .out = std.io.getStdErr().writer(),
127 | };
128 | }
129 |
130 | fn fmt(self: Printer, comptime format: []const u8, args: anytype) void {
131 | std.fmt.format(self.out, format, args) catch unreachable;
132 | }
133 |
134 | fn status(self: Printer, s: Status, comptime format: []const u8, args: anytype) void {
135 | const color = switch (s) {
136 | .pass => "\x1b[32m",
137 | .fail => "\x1b[31m",
138 | .skip => "\x1b[33m",
139 | else => "",
140 | };
141 | const out = self.out;
142 | out.writeAll(color) catch @panic("writeAll failed?!");
143 | std.fmt.format(out, format, args) catch @panic("std.fmt.format failed?!");
144 | self.fmt("\x1b[0m", .{});
145 | }
146 | };
147 |
148 | const Status = enum {
149 | pass,
150 | fail,
151 | skip,
152 | text,
153 | };
154 |
155 | const SlowTracker = struct {
156 | const SlowestQueue = std.PriorityDequeue(TestInfo, void, compareTiming);
157 | max: usize,
158 | slowest: SlowestQueue,
159 | timer: std.time.Timer,
160 |
161 | fn init(allocator: Allocator, count: u32) SlowTracker {
162 | const timer = std.time.Timer.start() catch @panic("failed to start timer");
163 | var slowest = SlowestQueue.init(allocator, {});
164 | slowest.ensureTotalCapacity(count) catch @panic("OOM");
165 | return .{
166 | .max = count,
167 | .timer = timer,
168 | .slowest = slowest,
169 | };
170 | }
171 |
172 | const TestInfo = struct {
173 | ns: u64,
174 | name: []const u8,
175 | };
176 |
177 | fn deinit(self: SlowTracker) void {
178 | self.slowest.deinit();
179 | }
180 |
181 | fn startTiming(self: *SlowTracker) void {
182 | self.timer.reset();
183 | }
184 |
185 | fn endTiming(self: *SlowTracker, test_name: []const u8) u64 {
186 | var timer = self.timer;
187 | const ns = timer.lap();
188 |
189 | var slowest = &self.slowest;
190 |
191 | if (slowest.count() < self.max) {
192 | // Capacity is fixed to the # of slow tests we want to track
193 | // If we've tracked fewer tests than this capacity, than always add
194 | slowest.add(TestInfo{ .ns = ns, .name = test_name }) catch @panic("failed to track test timing");
195 | return ns;
196 | }
197 |
198 | {
199 | // Optimization to avoid shifting the dequeue for the common case
200 | // where the test isn't one of our slowest.
201 | const fastest_of_the_slow = slowest.peekMin() orelse unreachable;
202 | if (fastest_of_the_slow.ns > ns) {
203 | // the test was faster than our fastest slow test, don't add
204 | return ns;
205 | }
206 | }
207 |
208 | // the previous fastest of our slow tests, has been pushed off.
209 | _ = slowest.removeMin();
210 | slowest.add(TestInfo{ .ns = ns, .name = test_name }) catch @panic("failed to track test timing");
211 | return ns;
212 | }
213 |
214 | fn display(self: *SlowTracker, printer: Printer) !void {
215 | var slowest = self.slowest;
216 | const count = slowest.count();
217 | printer.fmt("Slowest {d} test{s}: \n", .{count, if (count != 1) "s" else ""});
218 | while (slowest.removeMinOrNull()) |info| {
219 | const ms = @as(f64, @floatFromInt(info.ns)) / 100_000.0;
220 | printer.fmt(" {d:.2}ms\t{s}\n", .{ms, info.name});
221 | }
222 | }
223 |
224 | fn compareTiming(context: void, a: TestInfo, b: TestInfo) std.math.Order {
225 | _ = context;
226 | return std.math.order(a.ns, b.ns);
227 | }
228 | };
229 |
230 | const Env = struct {
231 | verbose: bool,
232 | fail_first: bool,
233 | filter: ?[]const u8,
234 |
235 | fn init(allocator: Allocator) Env {
236 | return .{
237 | .verbose = readEnvBool(allocator, "TEST_VERBOSE", true),
238 | .fail_first = readEnvBool(allocator, "TEST_FAIL_FIRST", false),
239 | .filter = readEnv(allocator, "TEST_FILTER"),
240 | };
241 | }
242 |
243 | fn deinit(self: Env, allocator: Allocator) void {
244 | if (self.filter) |f| {
245 | allocator.free(f);
246 | }
247 | }
248 |
249 | fn readEnv(allocator: Allocator, key: []const u8) ?[]const u8 {
250 | const v = std.process.getEnvVarOwned(allocator, key) catch |err| {
251 | if (err == error.EnvironmentVariableNotFound) {
252 | return null;
253 | }
254 | std.log.warn("failed to get env var {s} due to err {}", .{key, err});
255 | return null;
256 | };
257 | return v;
258 | }
259 |
260 | fn readEnvBool(allocator: Allocator, key: []const u8, deflt: bool) bool {
261 | const value = readEnv(allocator, key) orelse return deflt;
262 | defer allocator.free(value);
263 | return std.ascii.eqlIgnoreCase(value, "true");
264 | }
265 | };
266 |
267 | pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace, ret_addr: ?usize) noreturn {
268 | if (current_test) |ct| {
269 | std.debug.print("\x1b[31m{s}\npanic running \"{s}\"\n{s}\x1b[0m\n", .{BORDER, ct, BORDER});
270 | }
271 | std.builtin.default_panic(msg, error_return_trace, ret_addr);
272 | }
273 |
--------------------------------------------------------------------------------