├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.yml │ └── config.yml ├── build-release.sh ├── workflows │ ├── CI.yml │ ├── binary.yml.disable │ ├── docs.yml │ ├── mirror.yml │ ├── release.yml │ └── simargs.yml └── zigfetch.sh ├── .gitignore ├── .tool-versions ├── .woodpecker └── ci.yml ├── LICENSE ├── Makefile ├── README.org ├── build.zig ├── build.zig.zon ├── docs ├── archetypes │ └── default.md ├── content │ ├── _index.org │ ├── install.org │ ├── packages │ │ ├── _index.org │ │ ├── pretty-table.org │ │ └── simargs.org │ ├── programs │ │ ├── _index.org │ │ ├── dark-mode.org │ │ ├── loc.org │ │ ├── night-shift.org │ │ ├── pidof.org │ │ ├── repeat.org │ │ ├── tcp-proxy.org │ │ ├── timeout.org │ │ ├── tree.org │ │ └── zigfetch.org │ └── roadmap.org ├── go.mod ├── go.sum └── hugo.toml ├── examples ├── pretty-table-demo.zig └── simargs-demo.zig ├── src ├── bin │ ├── dark-mode.zig │ ├── loc.zig │ ├── night-shift.zig │ ├── pidof.zig │ ├── pkg │ │ ├── Manifest.zig │ │ └── package.zig │ ├── repeat.zig │ ├── tcp-proxy.zig │ ├── timeout.zig │ ├── tree.zig │ ├── util.zig │ ├── yes.zig │ └── zigfetch.zig └── mod │ ├── pretty-table.zig │ └── simargs.zig └── tests ├── test.c ├── test.py ├── test.rb └── test.zig /.gitattributes: -------------------------------------------------------------------------------- 1 | *.zig text eol=lf 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.yml: -------------------------------------------------------------------------------- 1 | name: Bug Report 2 | labels: ["bug"] 3 | description: Report to help us improve 4 | body: 5 | - type: markdown 6 | attributes: 7 | value: | 8 | Thanks for taking the time to fill out this bug report! 9 | - type: textarea 10 | id: bug-description 11 | attributes: 12 | label: Describe this problem 13 | description: What this problem is and what happened. 14 | validations: 15 | required: true 16 | - type: textarea 17 | id: version 18 | attributes: 19 | label: Version 20 | description: Which version are you using when this issue arises. 21 | validations: 22 | required: true 23 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: true 2 | contact_links: 3 | - name: Suggest idea 4 | url: https://github.com/jiacai2050/zigcli/discussions/categories/ideas 5 | about: Your valuable suggestions can help us improve and provide the best possible experience, please feel free to share your thoughts. 6 | -------------------------------------------------------------------------------- /.github/build-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | OUT_DIR=${OUT_DIR:-/tmp/zigcli} 4 | VERSION=${RELEASE_VERSION:-unknown} 5 | 6 | echo "Building zigcli ${VERSION} to ${OUT_DIR}..." 7 | 8 | set -Eeuo pipefail 9 | trap cleanup SIGINT SIGTERM ERR EXIT 10 | cleanup() { 11 | trap - SIGINT SIGTERM ERR EXIT 12 | ls -ltrh "${OUT_DIR}" 13 | } 14 | 15 | mkdir -p "${OUT_DIR}" 16 | script_dir=$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd -P) 17 | 18 | cd "${script_dir}/.." 19 | 20 | targets=( 21 | "aarch64-linux" 22 | "x86_64-linux" 23 | "x86-linux" 24 | # This target is built on CI directly. 25 | # "aarch64-macos" 26 | "x86_64-macos" 27 | "x86_64-windows" 28 | "aarch64-windows" 29 | ) 30 | 31 | export BUILD_DATE=$(date +'%Y-%m-%dT%H:%M:%S%z') 32 | export GIT_COMMIT=$(git rev-parse --short HEAD) 33 | 34 | for target in "${targets[@]}"; do 35 | echo "Building for ${target}..." 36 | filename=zigcli-${VERSION}-${target} 37 | dst_dir=zig-out/${filename} 38 | 39 | # 1. Build 40 | # The '-Dcpu=baseline' flag ensures compatibility with a baseline CPU architecture, 41 | # which is necessary for certain build targets. For more details, see: 42 | # https://github.com/jiacai2050/zigcli/issues/43 43 | zig build -Doptimize=ReleaseSafe -Dtarget="${target}" -p ${dst_dir} \ 44 | -Dcpu=baseline -Dgit_commit=${GIT_COMMIT} -Dbuild_date=${BUILD_DATE} 45 | 46 | # 2. Prepare files 47 | rm -f ${dst_dir}/bin/*demo 48 | cp LICENSE README.org ${dst_dir} 49 | 50 | find zig-out 51 | 52 | # 3. Zip final file 53 | pushd zig-out 54 | zip -r ${OUT_DIR}/${filename}.zip "${filename}" 55 | popd 56 | done 57 | -------------------------------------------------------------------------------- /.github/workflows/CI.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | schedule: 5 | - cron: '10 20 * * *' 6 | workflow_dispatch: 7 | pull_request: 8 | paths: 9 | - '**.zig' 10 | - ".github/build-release.sh" 11 | - '.github/workflows/CI.yml' 12 | push: 13 | branches: 14 | - main 15 | paths: 16 | - '**.zig' 17 | - ".github/build-release.sh" 18 | - '.github/workflows/CI.yml' 19 | 20 | concurrency: 21 | group: ${{ github.workflow }}-${{ github.ref }} 22 | cancel-in-progress: true 23 | 24 | jobs: 25 | test: 26 | timeout-minutes: 10 27 | runs-on: ${{ matrix.os }} 28 | strategy: 29 | fail-fast: false 30 | matrix: 31 | os: [ubuntu-latest, macos-latest, windows-latest] 32 | zig-version: [0.14.0] 33 | steps: 34 | - uses: actions/checkout@v4 35 | with: 36 | submodules: true 37 | - uses: mlugg/setup-zig@v1 38 | with: 39 | version: ${{ matrix.zig-version }} 40 | - name: fmt and test(windows) 41 | if: matrix.os == 'windows-latest' 42 | run: | 43 | zig.exe fmt --check . 44 | zig.exe build test 45 | zig.exe build 46 | - name: fmt and test(unix) 47 | if: matrix.os != 'windows-latest' 48 | run: | 49 | zig fmt --check . 50 | zig build test 51 | zig build 52 | find zig-out 53 | - name: zigfetch compare 54 | if: matrix.os != 'windows-latest' 55 | run: | 56 | bash .github/zigfetch.sh 57 | 58 | cross-compile: 59 | timeout-minutes: 10 60 | runs-on: ${{ matrix.os }} 61 | strategy: 62 | fail-fast: false 63 | matrix: 64 | os: [ubuntu-latest] 65 | targets: [x86_64-windows, x86_64-linux, x86_64-macos, aarch64-macos] 66 | steps: 67 | - uses: actions/checkout@v4 68 | - uses: mlugg/setup-zig@v1 69 | with: 70 | version: 0.14.0 71 | - name: Build 72 | run: | 73 | zig build -Dtarget=${{ matrix.targets }} 74 | -------------------------------------------------------------------------------- /.github/workflows/binary.yml.disable: -------------------------------------------------------------------------------- 1 | name: Build binary 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | paths: 7 | - "**.zig" 8 | - ".github/workflows/CI.yml" 9 | - ".github/workflows/binary.yml" 10 | push: 11 | branches: 12 | - main 13 | paths: 14 | - "**.zig" 15 | - ".github/workflows/binary.yml" 16 | 17 | env: 18 | ZIG_VERSION: 0.13.0 19 | 20 | jobs: 21 | build: 22 | timeout-minutes: 10 23 | runs-on: ubuntu-latest 24 | strategy: 25 | fail-fast: false 26 | matrix: 27 | targets: 28 | - "x86-windows" 29 | - "x86_64-windows" 30 | - "aarch64-windows" 31 | - "x86-linux" 32 | - "x86_64-linux" 33 | - "arm-linux-musleabi" 34 | - "aarch64-linux" 35 | - "x86_64-macos" 36 | - "aarch64-macos" 37 | steps: 38 | - uses: actions/checkout@v4 39 | with: 40 | submodules: true 41 | - uses: mlugg/setup-zig@v1 42 | with: 43 | version: ${{ env.ZIG_VERSION }} 44 | - name: Set Environment Variables 45 | run: | 46 | echo "BUILD_DATE=$(date +'%Y-%m-%dT%H:%M:%S%z')" >> $GITHUB_ENV 47 | - name: Build 48 | run: | 49 | zig build -Dtarget=${{ matrix.targets }} -Doptimize=ReleaseSafe \ 50 | -Dgit_commit=${{ github.head_ref }}-${{ github.sha }} \ 51 | -Dbuild_date=${{ env.BUILD_DATE }} 52 | tar -cvf zigcli.tar zig-out/bin/ 53 | - name: Upload 54 | uses: actions/upload-artifact@v4 55 | with: 56 | name: zigcli-${{ matrix.targets }} 57 | path: zigcli.tar 58 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | name: Deploy docs 2 | 3 | on: 4 | push: 5 | branches: ["main"] 6 | paths: 7 | - "**.org" 8 | - ".github/workflows/docs.yml" 9 | 10 | workflow_dispatch: 11 | 12 | permissions: 13 | contents: read 14 | pages: write 15 | id-token: write 16 | 17 | concurrency: 18 | group: "pages" 19 | cancel-in-progress: false 20 | 21 | defaults: 22 | run: 23 | shell: bash 24 | 25 | jobs: 26 | build: 27 | runs-on: ubuntu-latest 28 | steps: 29 | - name: Checkout 30 | uses: actions/checkout@v4 31 | with: 32 | submodules: recursive 33 | - name: Setup Pages 34 | id: pages 35 | uses: actions/configure-pages@v3 36 | - name: Setup Hugo 37 | uses: peaceiris/actions-hugo@v3 38 | with: 39 | hugo-version: 'latest' 40 | extended: true 41 | - name: Build with Hugo 42 | working-directory: docs 43 | env: 44 | # For maximum backward compatibility with Hugo modules 45 | HUGO_ENVIRONMENT: production 46 | HUGO_ENV: production 47 | run: | 48 | npm i -D postcss postcss-cli autoprefixer 49 | hugo mod get 50 | hugo \ 51 | --minify \ 52 | --baseURL "${{ steps.pages.outputs.base_url }}/" 53 | - name: Upload artifact 54 | uses: actions/upload-pages-artifact@v3 55 | with: 56 | path: ./docs/public 57 | 58 | # Deployment job 59 | deploy: 60 | environment: 61 | name: github-pages 62 | url: ${{ steps.deployment.outputs.page_url }} 63 | runs-on: ubuntu-latest 64 | defaults: 65 | run: 66 | working-directory: docs 67 | needs: build 68 | steps: 69 | - name: Deploy to GitHub Pages 70 | id: deployment 71 | uses: actions/deploy-pages@v4 72 | -------------------------------------------------------------------------------- /.github/workflows/mirror.yml: -------------------------------------------------------------------------------- 1 | name: Mirror 2 | 3 | on: 4 | push: 5 | branches: [main, master] 6 | workflow_dispatch: 7 | 8 | jobs: 9 | codeberg: 10 | if: github.repository_owner == 'jiacai2050' 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v3 14 | with: 15 | fetch-depth: 0 16 | - uses: pixta-dev/repository-mirroring-action@v1 17 | with: 18 | target_repo_url: https://${{ secrets.CBTOKEN }}@codeberg.org/${{ github.repository }}.git 19 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | paths: 7 | - "**.zig" 8 | - ".github/build-release.sh" 9 | - ".github/workflows/CI.yml" 10 | - ".github/workflows/release.yml" 11 | push: 12 | branches: 13 | - main 14 | paths: 15 | - "**.zig" 16 | - ".github/workflows/release.yml" 17 | tags: 18 | - "v*" 19 | 20 | 21 | concurrency: 22 | group: ${{ github.workflow }}-${{ github.ref }} 23 | cancel-in-progress: true 24 | 25 | permissions: 26 | contents: write 27 | 28 | jobs: 29 | upload-assets: 30 | strategy: 31 | fail-fast: false 32 | matrix: 33 | os: [ubuntu-latest, macos-latest] 34 | runs-on: ${{ matrix.os }} 35 | steps: 36 | - name: Checkout 37 | uses: actions/checkout@v4 38 | - uses: mlugg/setup-zig@v1 39 | with: 40 | version: 0.14.0 41 | - name: Set env(release) 42 | if: startsWith(github.ref, 'refs/tags/') 43 | run: | 44 | echo "RELEASE_VERSION=${{ github.ref_name }}" >> $GITHUB_ENV 45 | echo "OUT_DIR=/tmp/zigcli" >> $GITHUB_ENV 46 | - name: Set env(dev) 47 | if: "!startsWith(github.ref, 'refs/tags/')" 48 | run: | 49 | echo "RELEASE_VERSION=unknown" >> $GITHUB_ENV 50 | echo "OUT_DIR=/tmp/zigcli" >> $GITHUB_ENV 51 | - name: Build(Ubuntu) 52 | if: matrix.os == 'ubuntu-latest' 53 | run: | 54 | bash .github/build-release.sh 55 | - name: Build(MacOS) 56 | if: matrix.os == 'macos-latest' 57 | run: | 58 | mkdir -p "${OUT_DIR}" 59 | zig build -Doptimize=ReleaseSafe \ 60 | -Dgit_commit=${GIT_COMMIT} -Dbuild_date=${BUILD_DATE} -Dversion=${RELEASE_VERSION} 61 | rm -f zig-out/bin/*demo 62 | cp LICENSE README.org zig-out 63 | find zig-out 64 | pushd zig-out 65 | zip -r ${OUT_DIR}/zigcli-${RELEASE_VERSION}-aarch64-macos.zip . 66 | popd 67 | - name: Release 68 | uses: softprops/action-gh-release@v2 69 | if: startsWith(github.ref, 'refs/tags/') 70 | with: 71 | files: ${{ env.OUT_DIR }}/* 72 | - name: Upload 73 | if: "!startsWith(github.ref, 'refs/tags/')" 74 | uses: actions/upload-artifact@v4 75 | with: 76 | name: zigcli-${{ matrix.os }} 77 | path: ${{ env.OUT_DIR }} 78 | -------------------------------------------------------------------------------- /.github/workflows/simargs.yml: -------------------------------------------------------------------------------- 1 | name: Simargs CI 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | paths: 7 | - 'src/mod/simargs.zig' 8 | - ".github/workflows/simargs.yml" 9 | push: 10 | branches: 11 | - main 12 | paths: 13 | - 'src/mod/simargs.zig' 14 | - ".github/workflows/simargs.yml" 15 | 16 | 17 | jobs: 18 | memory-leak: 19 | timeout-minutes: 10 20 | runs-on: ${{ matrix.os }} 21 | strategy: 22 | fail-fast: false 23 | matrix: 24 | os: [ubuntu-latest] 25 | zig-version: [0.14.0] 26 | steps: 27 | - uses: actions/checkout@v4 28 | - uses: mlugg/setup-zig@v1 29 | with: 30 | version: ${{ matrix.zig-version }} 31 | - name: Memory leak detect 32 | run: | 33 | sudo apt update && sudo apt install -y valgrind libcurl4-openssl-dev 34 | zig build -Dcpu=baseline --verbose 35 | TEST_BINARY=./zig-out/bin/simargs-demo 36 | valgrind --leak-check=full --tool=memcheck \ 37 | --show-leak-kinds=all --error-exitcode=1 ${TEST_BINARY} --output a.out \ 38 | sub1 --a 123 hello world 39 | -------------------------------------------------------------------------------- /.github/zigfetch.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | script_dir=$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd -P) 4 | 5 | check_hash() { 6 | local pkg="$1" 7 | local expected="$2" 8 | 9 | # zig fetch --debug-hash "${pkg}" 10 | "${script_dir}/../zig-out/bin/zigfetch" "${pkg}" 11 | local actual=$("${script_dir}/../zig-out/bin/zigfetch" "${pkg}" 2>&1 | tail -1) 12 | 13 | if [ "${actual}" != "${expected}" ]; then 14 | echo "Wrong case: ${pkg}.\nExpected: ${expected}, actual: ${actual}" 15 | return 1 16 | fi 17 | 18 | return 0 19 | } 20 | 21 | check_hash "https://github.com/karlseguin/websocket.zig/archive/7c3f1149bffcde1dec98dea88a442e2b580d750a.tar.gz" \ 22 | "websocket-0.1.0-ZPISdXNIAwCXG7oHBj4zc1CfmZcDeyR6hfTEOo8_YI4r" 23 | 24 | check_hash "https://github.com/jiacai2050/zig-curl/archive/refs/tags/v0.1.1.tar.gz" \ 25 | "curl-0.1.1-P4tT4WzAAAD0MGbSfsyGV1hPdooNwZ5odcQYUB9iYlHe" 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /zig-out 2 | /zig-cache 3 | /.zig-cache 4 | *lock 5 | docs/resources 6 | docs/public 7 | README.md -------------------------------------------------------------------------------- /.tool-versions: -------------------------------------------------------------------------------- 1 | zig 0.14.0 2 | # zig master 3 | -------------------------------------------------------------------------------- /.woodpecker/ci.yml: -------------------------------------------------------------------------------- 1 | steps: 2 | - name: zig 3 | image: ziglings/ziglang:latest 4 | pull: true 5 | commands: 6 | - zig version 7 | - zig fmt --check . 8 | - zig build test --summary all 9 | 10 | when: 11 | - event: [push, cron] 12 | cron: ci* 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Jiacai Liu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: init-docs serve test fmt build 2 | 3 | build: 4 | zig build -Doptimize=ReleaseFast \ 5 | -Dbuild_date=$(shell date +"%Y-%m-%dT%H:%M:%S%z") \ 6 | -Dgit_commit=$(shell git rev-parse --short HEAD) \ 7 | -Dversion=$(shell git tag --points-at HEAD) \ 8 | --summary all 9 | 10 | fmt: 11 | zig fmt --check . 12 | 13 | clean: 14 | rm -rf zig-out .zig-cache 15 | 16 | test: 17 | zig build test --summary all 18 | 19 | ci: fmt test 20 | 21 | init-docs: 22 | cd docs && hugo mod get -u 23 | 24 | serve: 25 | cd docs && hugo serve -D 26 | 27 | 28 | zf: 29 | zig build run-zigfetch -- \ 30 | http://localhost:8000/c0c48df7567ea02458e9fc1f35c4088271b8d4a6.tar.gz 31 | -------------------------------------------------------------------------------- /README.org: -------------------------------------------------------------------------------- 1 | docs/content/_index.org -------------------------------------------------------------------------------- /build.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Build = std.Build; 3 | 4 | const macos_private_framework = "/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/PrivateFrameworks/"; 5 | 6 | pub fn build(b: *Build) !void { 7 | const optimize = b.standardOptimizeOption(.{}); 8 | const target = b.standardTargetOptions(.{}); 9 | var all_tests = std.ArrayList(*Build.Step).init(b.allocator); 10 | 11 | try addModules(b, target, optimize, &all_tests); 12 | try buildBinaries(b, optimize, target, &all_tests); 13 | try buildExamples(b, optimize, target, &all_tests); 14 | 15 | const test_all_step = b.step("test", "Run all tests"); 16 | for (all_tests.items) |step| { 17 | test_all_step.dependOn(step); 18 | } 19 | } 20 | 21 | const Source = union(enum) { 22 | bin: []const u8, 23 | mod: []const u8, 24 | ex: []const u8, 25 | 26 | const Self = @This(); 27 | 28 | fn name(self: Self) []const u8 { 29 | return switch (self) { 30 | .bin, .mod, .ex => |v| v, 31 | }; 32 | } 33 | 34 | fn path(self: Self) []const u8 { 35 | return switch (self) { 36 | .bin => |_| "src/bin", 37 | .mod => |_| "src/mod", 38 | .ex => |_| "examples", 39 | }; 40 | } 41 | 42 | fn need_test(self: Self) bool { 43 | return switch (self) { 44 | .bin, .mod => true, 45 | .ex => false, 46 | }; 47 | } 48 | }; 49 | 50 | fn addModules( 51 | b: *std.Build, 52 | target: std.Build.ResolvedTarget, 53 | optimize: std.builtin.Mode, 54 | all_tests: *std.ArrayList(*Build.Step), 55 | ) !void { 56 | inline for (.{ "pretty-table", "simargs" }) |name| { 57 | _ = b.addModule(name, .{ 58 | .root_source_file = b.path("src/mod/" ++ name ++ ".zig"), 59 | }); 60 | 61 | try all_tests.append(buildTestStep(b, .{ .mod = name }, target)); 62 | } 63 | 64 | const opt = b.addOptions(); 65 | opt.addOption( 66 | []const u8, 67 | "build_date", 68 | b.option([]const u8, "build_date", "Build date") orelse 69 | b.fmt("{d}", .{std.time.milliTimestamp()}), 70 | ); 71 | 72 | opt.addOption( 73 | []const u8, 74 | "version", 75 | b.option([]const u8, "version", "Version to release") orelse 76 | "Unknown", 77 | ); 78 | opt.addOption( 79 | []const u8, 80 | "git_commit", 81 | b.option([]const u8, "git_commit", "Git commit") orelse 82 | "Unknown", 83 | ); 84 | opt.addOption([]const u8, "build_mode", switch (optimize) { 85 | .Debug => "Dev", 86 | .ReleaseFast => "ReleaseFast", 87 | .ReleaseSmall => "ReleaseSmall", 88 | .ReleaseSafe => "ReleaseSafe", 89 | }); 90 | try b.modules.put("build_info", opt.createModule()); 91 | } 92 | 93 | fn buildExamples( 94 | b: *std.Build, 95 | optimize: std.builtin.Mode, 96 | target: std.Build.ResolvedTarget, 97 | all_tests: *std.ArrayList(*Build.Step), 98 | ) !void { 99 | inline for (.{ 100 | "simargs-demo", 101 | "pretty-table-demo", 102 | }) |name| { 103 | try buildBinary(b, .{ .ex = name }, optimize, target, all_tests); 104 | } 105 | } 106 | 107 | fn buildBinaries( 108 | b: *std.Build, 109 | optimize: std.builtin.Mode, 110 | target: std.Build.ResolvedTarget, 111 | all_tests: *std.ArrayList(*Build.Step), 112 | ) !void { 113 | inline for (.{ 114 | "zigfetch", 115 | "tree", 116 | "loc", 117 | "pidof", 118 | "yes", 119 | "night-shift", 120 | "dark-mode", 121 | "repeat", 122 | "tcp-proxy", 123 | "timeout", 124 | }) |name| { 125 | try buildBinary( 126 | b, 127 | .{ .bin = name }, 128 | optimize, 129 | target, 130 | all_tests, 131 | ); 132 | } 133 | 134 | // TODO: move util out of `bin` 135 | try all_tests.append(buildTestStep(b, .{ .bin = "util" }, target)); 136 | } 137 | 138 | fn buildBinary( 139 | b: *std.Build, 140 | comptime source: Source, 141 | optimize: std.builtin.Mode, 142 | target: std.Build.ResolvedTarget, 143 | all_tests: *std.ArrayList(*Build.Step), 144 | ) !void { 145 | if (makeCompileStep( 146 | b, 147 | source, 148 | optimize, 149 | target, 150 | )) |exe| { 151 | var deps = b.modules.iterator(); 152 | while (deps.next()) |dep| { 153 | exe.root_module.addImport(dep.key_ptr.*, dep.value_ptr.*); 154 | } 155 | 156 | b.installArtifact(exe); 157 | const run_cmd = b.addRunArtifact(exe); 158 | if (b.args) |args| { 159 | run_cmd.addArgs(args); 160 | } 161 | const prog_name = comptime source.name(); 162 | b.step("run-" ++ prog_name, "Run " ++ prog_name) 163 | .dependOn(&run_cmd.step); 164 | 165 | if (source.need_test()) { 166 | try all_tests.append(buildTestStep(b, source, target)); 167 | } 168 | } 169 | } 170 | 171 | fn buildTestStep( 172 | b: *std.Build, 173 | comptime source: Source, 174 | target: std.Build.ResolvedTarget, 175 | ) *Build.Step { 176 | const name = comptime source.name(); 177 | const path = comptime source.path(); 178 | const exe_tests = b.addTest(.{ 179 | .root_source_file = b.path(path ++ "/" ++ name ++ ".zig"), 180 | .target = target, 181 | }); 182 | const test_step = b.step("test-" ++ name, "Run " ++ name ++ " tests"); 183 | // https://github.com/ziglang/zig/issues/15009#issuecomment-1475350701 184 | test_step.dependOn(&b.addRunArtifact(exe_tests).step); 185 | return test_step; 186 | } 187 | 188 | fn makeCompileStep( 189 | b: *std.Build, 190 | comptime source: Source, 191 | optimize: std.builtin.Mode, 192 | target: std.Build.ResolvedTarget, 193 | ) ?*Build.Step.Compile { 194 | const name = comptime source.name(); 195 | const path = comptime source.path(); 196 | // We can't use `target.result.isDarwin()` alone here, 197 | // Since when cross compile to darwin on linux, there is no framework in the host! 198 | const is_darwin = @import("builtin").os.tag == .macos and target.result.os.tag == .macos; 199 | const is_win = target.result.os.tag == .windows; 200 | if (!is_darwin) { 201 | if (std.mem.eql(u8, name, "night-shift") or std.mem.eql(u8, name, "dark-mode")) { 202 | return null; 203 | } 204 | } 205 | const exe = b.addExecutable(.{ 206 | .name = name, 207 | .root_source_file = b.path(path ++ "/" ++ name ++ ".zig"), 208 | .target = target, 209 | .optimize = optimize, 210 | }); 211 | 212 | if (std.mem.eql(u8, name, "night-shift")) { 213 | exe.linkSystemLibrary("objc"); 214 | exe.addFrameworkPath(.{ .cwd_relative = macos_private_framework }); 215 | exe.linkFramework("CoreBrightness"); 216 | } else if (std.mem.eql(u8, name, "dark-mode")) { 217 | exe.addFrameworkPath(.{ .cwd_relative = macos_private_framework }); 218 | exe.linkFramework("SkyLight"); 219 | } else if (std.mem.eql(u8, name, "tcp-proxy")) { 220 | exe.linkLibC(); 221 | } else if (std.mem.eql(u8, name, "timeout")) { 222 | if (is_win) { // error: TODO windows Sigaction definition 223 | return null; 224 | } 225 | exe.linkLibC(); 226 | } else if (std.mem.eql(u8, name, "zigfetch")) { 227 | const dep_curl = b.dependency("curl", .{ 228 | .link_vendor = true, 229 | .target = target, 230 | .optimize = optimize, 231 | }); 232 | exe.root_module.addImport("curl", dep_curl.module("curl")); 233 | exe.linkLibC(); 234 | } else if (std.mem.eql(u8, name, "pidof")) { 235 | // only build for macOS 236 | if (is_darwin) { 237 | exe.linkLibC(); 238 | } else { 239 | return null; 240 | } 241 | } 242 | 243 | const install_step = b.step("install-" ++ name, "Install " ++ name); 244 | install_step.dependOn(&b.addInstallArtifact(exe, .{}).step); 245 | return exe; 246 | } 247 | -------------------------------------------------------------------------------- /build.zig.zon: -------------------------------------------------------------------------------- 1 | .{ 2 | .name = .zigcli, 3 | .fingerprint = 0x9e1fddac8cbb1039, 4 | .version = "0.2.3", 5 | .paths = .{ 6 | "src", 7 | "build.zig", 8 | "build.zig.zon", 9 | "README.org", 10 | }, 11 | .dependencies = .{ 12 | .curl = .{ 13 | .url = "https://github.com/jiacai2050/zig-curl/archive/refs/tags/v0.1.1.tar.gz", 14 | .hash = "curl-0.1.1-P4tT4WzAAAD0MGbSfsyGV1hPdooNwZ5odcQYUB9iYlHe", 15 | }, 16 | }, 17 | } 18 | -------------------------------------------------------------------------------- /docs/archetypes/default.md: -------------------------------------------------------------------------------- 1 | +++ 2 | title = '{{ replace .File.ContentBaseName "-" " " | title }}' 3 | date = {{ .Date }} 4 | draft = true 5 | +++ 6 | -------------------------------------------------------------------------------- /docs/content/_index.org: -------------------------------------------------------------------------------- 1 | #+TITLE: Introduction 2 | #+DATE: 2023-10-21T12:09:48+0800 3 | #+LASTMOD: 2025-03-09T16:59:05+0800 4 | #+TYPE: docs 5 | #+author: Jiacai Liu 6 | 7 | [[https://github.com/jiacai2050/zigcli][https://img.shields.io/github/stars/jiacai2050/zigcli.svg]] 8 | [[https://github.com/jiacai2050/loc/actions/workflows/CI.yml][https://github.com/jiacai2050/loc/actions/workflows/CI.yml/badge.svg]] 9 | [[https://github.com/jiacai2050/loc/actions/workflows/release.yml][https://github.com/jiacai2050/loc/actions/workflows/release.yml/badge.svg]] 10 | [[https://img.shields.io/badge/zig%20version-0.14.0-blue.svg]] 11 | 12 | #+begin_quote 13 | [[https://zigcli.liujiacai.net/][Zigcli]] is a toolkit for building command line programs in Zig. 14 | #+end_quote 15 | 16 | Official website: https://zigcli.liujiacai.net/ 17 | 18 | It can be imported as [[https://zigcli.liujiacai.net/packages/][Zig packages]] or used directly as [[https://zigcli.liujiacai.net/programs/][command line programs]]. 19 | 20 | * Install 21 | See [[https://zigcli.liujiacai.net/install][INSTALL]] page. 22 | * Who's Using 23 | If you're using =zigcli=, and would like to be added here, welcome to [[https://github.com/jiacai2050/zigcli/pulls][open a PR]]. 24 | 25 | * License 26 | MIT, see [[https://github.com/jiacai2050/zigcli/blob/main/LICENSE][LICENSE]] for details. 27 | -------------------------------------------------------------------------------- /docs/content/install.org: -------------------------------------------------------------------------------- 1 | #+TITLE: Install 2 | #+DATE: 2025-01-02T23:20:23+0800 3 | #+LASTMOD: 2025-03-09T16:56:22+0800 4 | #+TYPE: docs 5 | #+WEIGHT: 10 6 | #+AUTHOR: Jiacai Liu 7 | 8 | ** Packages 9 | #+begin_src bash 10 | zig fetch --save=zigcli https://github.com/jiacai2050/zigcli/archive/refs/tags/${TAG}.zip 11 | #+end_src 12 | 13 | The latest tag can be found on [[https://github.com/jiacai2050/zigcli/releases][release page]]. 14 | 15 | Then in your =build.zig=, import the module like this: 16 | 17 | #+begin_src zig 18 | const zigcli = b.dependency("zigcli", .{}); 19 | 20 | // Currently zigcli provide two packages. 21 | exe.root_module.addImport("simargs", zigcli.module("simargs")); 22 | exe.root_module.addImport("pretty-table", zigcli.module("pretty-table")); 23 | #+end_src 24 | ** CLI Programs 25 | The latest pre-built binaries are available on the [[https://github.com/jiacai2050/zigcli/releases][release page]] or you can build it from source. 26 | 27 | #+begin_src bash 28 | git clone https://github.com/jiacai2050/zigcli.git 29 | #+end_src 30 | Then build with zig 0.14.0 31 | #+begin_src bash 32 | make build 33 | #+end_src 34 | -------------------------------------------------------------------------------- /docs/content/packages/_index.org: -------------------------------------------------------------------------------- 1 | #+TITLE: Packages 2 | #+DATE: 2024-08-17T17:58:01+0800 3 | #+LASTMOD: 2025-01-01T19:26:04+0800 4 | #+WEIGHT: 10 5 | #+TYPE: docs 6 | 7 | * Available packages 8 | -------------------------------------------------------------------------------- /docs/content/packages/pretty-table.org: -------------------------------------------------------------------------------- 1 | #+TITLE: pretty-table 2 | #+DATE: 2024-08-17T17:48:29+0800 3 | #+LASTMOD: 2024-08-17T19:04:48+0800 4 | #+TYPE: docs 5 | #+WEIGHT: 20 6 | #+DESCRIPTION: Print aligned and formatted tables 7 | 8 | * Features 9 | - Many box-drawing character to choose(=ascii=, =box=, =dos=). 10 | 11 | * Usage 12 | See [[https://github.com/jiacai2050/zigcli/blob/main/examples/pretty-table-demo.zig][pretty-table-demo.zig]] 13 | 14 | #+begin_src zig 15 | const t = Table(2){ 16 | .header = [_]String{ "Language", "Files" }, 17 | .rows = &[_][2]String{ 18 | .{ "Zig", "3" }, 19 | .{ "Python", "2" }, 20 | }, 21 | .footer = [2]String{ "Total", "5" }, 22 | .mode = .box, // or .ascii, .dos 23 | }; 24 | 25 | const out = std.io.getStdOut(); 26 | try out.writer().print("{}", .{t}); 27 | #+end_src 28 | 29 | #+begin_src plaintext 30 | ┌────────┬─────┐ 31 | │Language│Files│ 32 | ├────────┼─────┤ 33 | │Zig │3 │ 34 | │Python │2 │ 35 | │C │12 │ 36 | │Ruby │5 │ 37 | ├────────┼─────┤ 38 | │Total │22 │ 39 | └────────┴─────┘ 40 | #+end_src 41 | -------------------------------------------------------------------------------- /docs/content/packages/simargs.org: -------------------------------------------------------------------------------- 1 | #+TITLE: simargs 2 | #+DATE: 2023-10-21T12:04:40+0800 3 | #+LASTMOD: 2024-08-17T19:03:45+0800 4 | #+TYPE: docs 5 | #+WEIGHT: 10 6 | #+DESCRIPTION: A simple, opinionated, struct-based argument parser in Zig, taking full advantage of [[https://kristoff.it/blog/what-is-zig-comptime/][comptime]]. 7 | 8 | * Features 9 | - Supported data type: 10 | - All [[https://ziglang.org/documentation/master/#Primitive-Types][primitive types]], such as =i8=, =f32=, =bool= 11 | - =[]const u8= 12 | - =Enum= 13 | - Optional fields and fields with default value mean they are optional arguments 14 | - Use =comptime= as possible as I can 15 | - Provide =printHelp()= out of the box 16 | - Support sub commands 17 | * Usage 18 | See [[https://github.com/jiacai2050/zigcli/blob/main/examples/simargs-demo.zig][simargs-demo.zig]]. 19 | 20 | #+begin_src bash :results verbatim :exports both 21 | # Run demo 22 | zig build run-simargs-demo -- -o /tmp/a.out --user-agent Firefox sub1 --a 123 hello world 2>&1 23 | #+end_src 24 | 25 | #+RESULTS: 26 | #+begin_example 27 | ------------------------------Program------------------------------ 28 | /Users/jiacai/gh/zigcli/.zig-cache/o/bd8a4fb104779110e787d579f1d9c6f0/simargs-demo 29 | 30 | ------------------------------Arguments------------------------------ 31 | verbose: null 32 | user-agent: simargs-demo.main__struct_1700.main__struct_1700__enum_1707.Firefox 33 | timeout: 30 34 | output: /tmp/a.out 35 | help: false 36 | __commands__: simargs-demo.main__struct_1700.main__struct_1700__union_1708{ .sub1 = simargs-demo.main__struct_1700.main__struct_1700__union_1708.main__struct_1700__union_1708__struct_1710{ .a = 123, .help = false } } 37 | 38 | ------------------------------Positionals------------------------------ 39 | 1: hello 40 | 2: world 41 | 42 | ------------------------------print_help------------------------------ 43 | USAGE: 44 | /Users/jiacai/gh/zigcli/.zig-cache/o/bd8a4fb104779110e787d579f1d9c6f0/simargs-demo [OPTIONS] [COMMANDS] 45 | 46 | COMMANDS: 47 | sub1 Subcommand 1 48 | sub2 Subcommand 2 49 | 50 | OPTIONS: 51 | -v, --verbose Make the operation more talkative 52 | -A, --user-agent STRING (valid: Chrome|Firefox|Safari)(default: Firefox) 53 | --timeout INTEGER Max time this request can cost(default: 30) 54 | -o, --output STRING Write to file instead of stdout(required) 55 | -h, --help 56 | #+end_example 57 | 58 | * Acknowledgment 59 | Blog post explaining how =simargs= is implemented: [[https://en.liujiacai.net/2022/12/14/argparser-in-zig/][What I learn by implementing argparser in Zig]]. 60 | 61 | When implement =simargs=, I refer following projects to learn how to write 62 | idiomatic Zig code. Big Thanks! 63 | - [[https://github.com/MasterQ32/zig-args/][MasterQ32/zig-args]] 64 | - [[https://github.com/Hejsil/zig-clap][Hejsil/zig-clap]] 65 | - [[https://github.com/evangrayk/zig-argparse][evangrayk/zig-argparse]] 66 | -------------------------------------------------------------------------------- /docs/content/programs/_index.org: -------------------------------------------------------------------------------- 1 | #+TITLE: Programs 2 | #+DATE: 2023-10-21T12:26:45+0800 3 | #+LASTMOD: 2025-01-04T09:42:27+0800 4 | #+TYPE: docs 5 | #+WEIGHT: 20 6 | #+DESCRIPTION: CLI programs which can be used directly 7 | 8 | * Available Programs 9 | -------------------------------------------------------------------------------- /docs/content/programs/dark-mode.org: -------------------------------------------------------------------------------- 1 | #+TITLE: dark-mode 2 | #+DATE: 2024-08-17T17:52:00+0800 3 | #+LASTMOD: 2024-09-01T11:59:08+0800 4 | #+TYPE: docs 5 | #+AUTHOR: Jiacai Liu 6 | #+DESCRIPTION: Configuring "Dark mode" for macOS. 7 | 8 | #+begin_src bash :results verbatim :exports results :wrap example :dir ../../.. 9 | ./zig-out/bin/dark-mode -h 10 | #+end_src 11 | 12 | #+RESULTS: 13 | #+begin_example 14 | USAGE: 15 | ./zig-out/bin/dark-mode [OPTIONS] [--] 16 | 17 | Available commands: 18 | status View dark mode status 19 | on Turn dark mode on 20 | off Turn dark mode off 21 | toggle Toggle dark mode 22 | 23 | OPTIONS: 24 | -v, --version Print version 25 | -h, --help Print help information 26 | #+end_example 27 | -------------------------------------------------------------------------------- /docs/content/programs/loc.org: -------------------------------------------------------------------------------- 1 | #+TITLE: loc 2 | #+DATE: 2024-08-17T17:52:51+0800 3 | #+LASTMOD: 2024-09-01T11:49:49+0800 4 | #+TYPE: docs 5 | #+DESCRIPTION: Lines of code 6 | 7 | #+begin_src bash :results verbatim :exports result :dir ../../.. 8 | ./zig-out/bin/loc 9 | #+end_src 10 | 11 | #+RESULTS: 12 | #+begin_example 13 | ┌───────────┬───────┬────────┬───────┬──────────┬────────┬──────────┐ 14 | │Language │File │Line │Code │Comment │Blank │Size │ 15 | ├───────────┼───────┼────────┼───────┼──────────┼────────┼──────────┤ 16 | │Zig │363 │10808 │9369 │1050 │389 │632.19K │ 17 | │YAML │8 │317 │292 │4 │21 │7.84K │ 18 | │TOML │1 │32 │27 │0 │5 │698.00B │ 19 | │Makefile │1 │23 │16 │0 │7 │365.00B │ 20 | │Python │1 │10 │7 │2 │1 │166.00B │ 21 | │C │1 │9 │2 │4 │3 │34.00B │ 22 | │Ruby │1 │8 │5 │2 │1 │201.00B │ 23 | │Markdown │1 │5 │5 │0 │0 │102.00B │ 24 | │CHeader │1 │2 │2 │0 │0 │44.00B │ 25 | │JSON │2 │2 │2 │0 │0 │247.00B │ 26 | ├───────────┼───────┼────────┼───────┼──────────┼────────┼──────────┤ 27 | │Total │380 │11216 │9727 │1062 │427 │641.84K │ 28 | └───────────┴───────┴────────┴───────┴──────────┴────────┴──────────┘ 29 | 30 | #+end_example 31 | -------------------------------------------------------------------------------- /docs/content/programs/night-shift.org: -------------------------------------------------------------------------------- 1 | #+TITLE: night-shift 2 | #+DATE: 2024-08-17T17:52:12+0800 3 | #+LASTMOD: 2024-09-01T11:59:23+0800 4 | #+TYPE: docs 5 | #+DESCRIPTION: Configuring "Night Shift" for macOS. 🌕🌖🌗🌘🌑 6 | 7 | #+begin_src bash :results verbatim :exports results :wrap example :dir ../../.. 8 | ./zig-out/bin/night-shift -h 9 | #+end_src 10 | 11 | #+RESULTS: 12 | #+begin_example 13 | USAGE: 14 | ./zig-out/bin/night-shift [OPTIONS] [--] 15 | 16 | Available commands by category: 17 | Manual on/off control: 18 | status View current Night Shift status 19 | on Turn Night Shift on 20 | off Turn Night Shift off 21 | toggle Toggle Night Shift 22 | 23 | Color temperature: 24 | temp View temperature preference 25 | temp <0-100> Set temperature preference 26 | 27 | Schedule: 28 | schedule View current schedule 29 | schedule sun Start schedule from sunset to sunrise 30 | schedule off Stop the current schedule 31 | schedule Start a custom schedule(HH:mm, 24-hour format) 32 | 33 | OPTIONS: 34 | -v, --version Print version 35 | -h, --help Print help information 36 | #+end_example 37 | 38 | * Acknowledgment 39 | - https://github.com/smudge/nightlight 40 | -------------------------------------------------------------------------------- /docs/content/programs/pidof.org: -------------------------------------------------------------------------------- 1 | #+TITLE: pidof 2 | #+DATE: 2024-08-17T17:52:44+0800 3 | #+LASTMOD: 2024-10-30T21:53:30+0800 4 | #+TYPE: docs 5 | #+DESCRIPTION: Linux has this command, but not in macOS, so I write it for you. 6 | 7 | Program name is case insensitive by default, pass =-S= option if you want sensitive match. 8 | 9 | #+begin_src bash :results verbatim :exports results :wrap example :dir ../../.. 10 | ./zig-out/bin/pidof -h 11 | #+end_src 12 | 13 | #+RESULTS: 14 | #+begin_example 15 | USAGE: 16 | ./zig-out/bin/pidof [OPTIONS] [--] [program] 17 | 18 | OPTIONS: 19 | -s, --single Only return the first matching pid. 20 | -d, --delimiter STRING Delimiter used if more than one PID is shown.(default: ) 21 | -S, --strict Case sensitive when matching program name. 22 | -u, --user_only Only show process belonging to current user. 23 | -v, --version Print version. 24 | -h, --help Print help message. 25 | #+end_example 26 | -------------------------------------------------------------------------------- /docs/content/programs/repeat.org: -------------------------------------------------------------------------------- 1 | #+TITLE: repeat 2 | #+DATE: 2024-08-17T17:52:32+0800 3 | #+LASTMOD: 2024-09-01T11:57:51+0800 4 | #+TYPE: docs 5 | #+DESCRIPTION: Execute a command repeatly until it succeeds. 6 | 7 | #+begin_src bash :results verbatim :exports results :wrap example :dir ../../.. 8 | ./zig-out/bin/repeat -h 9 | #+end_src 10 | 11 | #+RESULTS: 12 | #+begin_example 13 | USAGE: 14 | ./zig-out/bin/repeat [OPTIONS] [--] command 15 | 16 | OPTIONS: 17 | -m, --max INTEGER Max times to repeat 18 | -i, --interval INTEGER Pause interval(in seconds) between repeats 19 | -v, --version Print version 20 | -h, --help Print help information 21 | #+end_example 22 | -------------------------------------------------------------------------------- /docs/content/programs/tcp-proxy.org: -------------------------------------------------------------------------------- 1 | #+TITLE: tcp-proxy 2 | #+DATE: 2024-09-01T00:02:43+0800 3 | #+LASTMOD: 2024-09-02T22:06:56+0800 4 | #+TYPE: docs 5 | #+DESCRIPTION: Forward TCP requests hitting a specified port on the localhost to a different port on another host 6 | 7 | Both IPv4 and IPv6 are supported. On Linux [[https://man7.org/linux/man-pages/man2/splice.2.html][splice]] is used to improve perf, details can be found [[https://blog.cloudflare.com/sockmap-tcp-splicing-of-the-future/][here]]. 8 | 9 | #+begin_src bash :results verbatim :exports results :wrap example :dir ../../.. 10 | ./zig-out/bin/tcp-proxy -h 11 | #+end_src 12 | 13 | #+RESULTS: 14 | #+begin_example 15 | USAGE: 16 | ./zig-out/bin/tcp-proxy [OPTIONS] 17 | 18 | OPTIONS: 19 | -b, --bind_host STRING Local bind host(default: 0.0.0.0) 20 | -p, --local_port INTEGER Local bind port(default: 8081) 21 | -H, --remote_host STRING Remote host(required) 22 | -P, --remote_port INTEGER Remote port(required) 23 | --buf_size INTEGER Buffer size for tcp read/write(default: 16384) 24 | --server_threads INTEGER Server worker threads num(default: 24) 25 | -h, --help 26 | -v, --version 27 | --verbose 28 | #+end_example 29 | 30 | #+begin_src bash 31 | tcp-proxy -b 0.0.0.0 -p 8082 -H 192.168.0.2 -P 8082 32 | #+end_src 33 | This will forward tcp requests from =localhost:8082= to =192.168.0.2:8082= 34 | 35 | * Benchmark 36 | [[https://iperf.fr/][iPerf3]] is used to benchmark performance between zigcli and [[https://github.com/kklis/proxy][this]], a proxy written in C. 37 | ** All in one 38 | - server/client/proxy :: =192.168.31.142=, debian 12 39 | | | sender | receiver | 40 | |------------+--------+----------| 41 | | zigcli | 57.2 | 56.9 | 42 | | proxy in C | 56.1 | 55.9 | 43 | 44 | - Unit: Gbits/sec 45 | ** Server/client separated 46 | - server :: 192.168.31.94, macOS 47 | - proxy/client :: 192.168.31.142, debian 12 48 | 49 | | | sender | receiver | 50 | |------------+--------+----------| 51 | | zigcli | 191 | 180 | 52 | | proxy in C | 210 | 198 | 53 | 54 | - Unit: Mbits/sec 55 | 56 | ** Commands 57 | #+begin_src bash 58 | # start C proxy in foreground 59 | ./proxy -b 0.0.0.0 -l 8081 -h 192.168.31.142 -p 5201 -f 60 | # start tcp-proxy 61 | ./zig-out/bin/tcp-proxy -b 0.0.0.0 -p 8080 -H 192.168.31.142 -P 5201 62 | 63 | # server 64 | iperf3 -s 65 | # client 66 | iperf3 -c 192.168.31.142 -p 8080/8081 67 | #+end_src 68 | -------------------------------------------------------------------------------- /docs/content/programs/timeout.org: -------------------------------------------------------------------------------- 1 | #+TITLE: timeout 2 | #+DATE: 2025-01-23T22:28:53+0800 3 | #+LASTMOD: 2025-01-23T22:35:32+0800 4 | #+TYPE: docs 5 | #+DESCRIPTION: Run a command with bounded time 6 | 7 | #+begin_example 8 | timeout SECONDS COMMAND [ARG]... 9 | #+end_example 10 | 11 | Start a command, and kill it if the specified timeout expires. 12 | 13 | The =timeout= command is crucial for: 14 | 15 | - Process Control 16 | - Limits execution time of commands 17 | - Prevents resource-consuming tasks from running indefinitely 18 | - Provides automatic process termination 19 | -------------------------------------------------------------------------------- /docs/content/programs/tree.org: -------------------------------------------------------------------------------- 1 | #+TITLE: tree 2 | #+DATE: 2024-08-17T17:52:22+0800 3 | #+LASTMOD: 2024-09-01T11:56:26+0800 4 | #+TYPE: docs 5 | #+DESCRIPTION: Display the directory structure of a path in a tree-like format 6 | 7 | #+begin_src bash :results verbatim :exports results :wrap example :dir ../../.. 8 | ./zig-out/bin/tree -h 9 | #+end_src 10 | 11 | #+RESULTS: 12 | #+begin_example 13 | USAGE: 14 | ./zig-out/bin/tree [OPTIONS] [--] [directory] 15 | 16 | OPTIONS: 17 | -m, --mode STRING Line drawing characters. (valid: ascii|box|dos)(default: box) 18 | -a, --all All files are printed. 19 | -s, --size Print the size of each file in bytes along with the name. 20 | -d, --directory List directories only. 21 | -L, --level INTEGER Max display depth of the directory tree. 22 | -v, --version Print version. 23 | -h, --help Print help information. 24 | #+end_example 25 | 26 | ** Demo 27 | #+begin_src bash 28 | $ ./zig-out/bin/tree src 29 | src 30 | ├──bin 31 | │ ├──loc.zig 32 | │ ├──night-shift.zig 33 | │ ├──pidof.zig 34 | │ ├──repeat.zig 35 | │ ├──tree.zig 36 | │ ├──util.zig 37 | │ └──yes.zig 38 | └──mod 39 | ├──pretty-table.zig 40 | └──simargs.zig 41 | 42 | 1 directories, 4 files 43 | #+end_src 44 | -------------------------------------------------------------------------------- /docs/content/programs/zigfetch.org: -------------------------------------------------------------------------------- 1 | #+TITLE: zigfetch 2 | #+DATE: 2025-01-01T18:01:47+0800 3 | #+LASTMOD: 2025-02-25T22:44:10+0800 4 | #+TYPE: docs 5 | #+DESCRIPTION: Fetch zig packages, utilizing libcurl. 6 | 7 | =zigfetch= behaves similarly to =zig fetch=, but utilizing the capabilities of libcurl for its functionality. 8 | 9 | HTTP support within Zig's standard library isn't currently stable, [[https://github.com/ziglang/zig/issues/21792][this proxy issue]] make it even harder, resulting in multiple errors occurring during dependency downloads when building Zig projects. This poses a significant challenge for Chinese developers owing to [[https://en.wikipedia.org/wiki/Great_Firewall][the Great Firewall]]. 10 | 11 | {{< figure src="https://fs.liujiacai.net/cdn-img/zigcli/zig-fetch-errors.webp" >}} 12 | 13 | As a consequence, =zigfetch= was developed. It operates via libcurl to ensure that both the =http_proxy= and =https_proxy= [[https://curl.se/libcurl/c/libcurl-env.html][environment variables]] function correctly. 14 | 15 | ** Usage 16 | #+begin_src bash :results verbatim :exports result :dir ../../.. 17 | ./zig-out/bin/zigfetch --help 18 | #+end_src 19 | 20 | #+RESULTS: 21 | #+begin_example 22 | USAGE: 23 | ./zig-out/bin/zigfetch [OPTIONS] [--] [package-dir or url] 24 | 25 | OPTIONS: 26 | -h, --help Show help 27 | -V, --version Show version 28 | -v, --verbose Show verbose log 29 | -t, --timeout INTEGER Libcurl http timeout in seconds(default: 60) 30 | -n, --no-dep Disable fetch dependencies 31 | -d, --debug-hash Print hash for each file 32 | #+end_example 33 | 34 | If the argument is a local directory, =zigfetch= will attempt to open =build.zig.zon=, download dependencies specified in the =.dependencies= fields, and then calculate hashes for each package. If these hashes match those in the =.hash= fields, =zigfetch= will move them to =~/.cache/zig/p/{hash}= after completion. 35 | 36 | If =zigfetch= succeeds, =zig build= will build the project directly, assuming the dependencies already exist. 37 | 38 | ** Proxy config 39 | This is a demo for socks5 proxy setup: 40 | #+begin_src bash 41 | export http_proxy="socks5://127.0.0.1:1080" 42 | export https_proxy=$http_proxy 43 | export all_proxy=$http_proxy 44 | export GIT_SSH_COMMAND='ssh -o ProxyCommand="nc -X 5 -x 127.0.0.1:1080 %h %p"' 45 | #+end_src 46 | 47 | =GIT_SSH_COMMAND= is used for fetch =git+http(s)= dependencies. 48 | -------------------------------------------------------------------------------- /docs/content/roadmap.org: -------------------------------------------------------------------------------- 1 | #+TITLE: Roadmap 2 | #+DATE: 2024-08-17T18:07:20+0800 3 | #+LASTMOD: 2024-08-17T18:19:47+0800 4 | #+WEIGHT: 30 5 | #+TYPE: docs 6 | 7 | * Loc 8 | - Performance, at least comparable with [[https://github.com/cgag/loc][cgag/loc]] [[https://github.com/jiacai2050/loc/issues/1][#1]] 9 | - More options, such as =--exclude = 10 | - Support multiline comment 11 | -------------------------------------------------------------------------------- /docs/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/jiacai2050/zigcli/docs 2 | 3 | go 1.21 4 | 5 | require github.com/google/docsy v0.11.0 // indirect 6 | -------------------------------------------------------------------------------- /docs/go.sum: -------------------------------------------------------------------------------- 1 | github.com/FortAwesome/Font-Awesome v0.0.0-20240402185447-c0f460dca7f7/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo= 2 | github.com/FortAwesome/Font-Awesome v0.0.0-20240716171331-37eff7fa00de/go.mod h1:IUgezN/MFpCDIlFezw3L8j83oeiIuYoj28Miwr/KUYo= 3 | github.com/google/docsy v0.10.0 h1:6tMDacPwAyRWNCfvsn/9qGOZDQ8b0aRzjRZvnZPY5dg= 4 | github.com/google/docsy v0.10.0/go.mod h1:c0nIAqmRTOuJ01F85U/wJPQtc3Zj9N58Kea9bOT2AJc= 5 | github.com/google/docsy v0.11.0 h1:QnV40cc28QwS++kP9qINtrIv4hlASruhC/K3FqkHAmM= 6 | github.com/google/docsy v0.11.0/go.mod h1:hGGW0OjNuG5ZbH5JRtALY3yvN8ybbEP/v2iaK4bwOUI= 7 | github.com/twbs/bootstrap v5.3.3+incompatible/go.mod h1:fZTSrkpSf0/HkL0IIJzvVspTt1r9zuf7XlZau8kpcY0= 8 | -------------------------------------------------------------------------------- /docs/hugo.toml: -------------------------------------------------------------------------------- 1 | baseURL = 'https://zigcli.liujiacai.net/' 2 | languageCode = 'en-us' 3 | title = 'Zigcli' 4 | enableGitInfo = true 5 | 6 | [params] 7 | github_repo = 'https://github.com/jiacai2050/zigcli' 8 | github_subdir = 'docs' 9 | offlineSearch = true 10 | 11 | [module] 12 | proxy = "https://goproxy.cn,direct" 13 | [[module.imports]] 14 | path = "github.com/google/docsy" 15 | 16 | [params.copyright] 17 | authors = "Jiacai Liu | [CC BY 4.0](https://creativecommons.org/licenses/by/4.0) | " 18 | from_year = 2023 19 | 20 | [params.ui] 21 | navbar_logo = false 22 | showLightDarkModeMenu = true 23 | 24 | [markup] 25 | [markup.goldmark] 26 | [markup.goldmark.parser.attribute] 27 | block = true 28 | [markup.goldmark.renderer] 29 | unsafe = true 30 | [markup.highlight] 31 | style = "pygments" 32 | linenos = true 33 | -------------------------------------------------------------------------------- /examples/pretty-table-demo.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | const Table = @import("pretty-table").Table; 4 | const Separator = @import("pretty-table").Separator; 5 | const String = @import("pretty-table").String; 6 | 7 | pub fn main() !void { 8 | const t = Table(2){ 9 | .header = [_]String{ "Language", "Files" }, 10 | .rows = &[_][2]String{ 11 | .{ "Zig", "3" }, 12 | .{ "Python", "2" }, 13 | .{ "C", "12" }, 14 | .{ "Ruby", "5" }, 15 | }, 16 | .footer = [2]String{ "Total", "22" }, 17 | .mode = .box, 18 | }; 19 | 20 | const out = std.io.getStdOut(); 21 | try out.writer().print("{}", .{t}); 22 | } 23 | -------------------------------------------------------------------------------- /examples/simargs-demo.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const simargs = @import("simargs"); 3 | 4 | pub const std_options: std.Options = .{ 5 | .log_level = .info, 6 | }; 7 | 8 | pub fn main() !void { 9 | var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); 10 | defer arena.deinit(); 11 | const allocator = arena.allocator(); 12 | 13 | var opt = try simargs.parse(allocator, struct { 14 | // Those fields declare arguments options 15 | // only `output` is required, others are all optional 16 | verbose: ?bool, 17 | @"user-agent": enum { Chrome, Firefox, Safari } = .Firefox, 18 | timeout: ?u16 = 30, // default value 19 | output: []const u8, 20 | help: bool = false, 21 | version: bool = false, 22 | 23 | // This special field define sub_commands, 24 | // Each union item is a config struct, which is similar with top-level config struct. 25 | __commands__: union(enum) { 26 | sub1: struct { 27 | a: u64, 28 | help: bool = false, 29 | }, 30 | sub2: struct { name: []const u8 }, 31 | 32 | // Define help message for sub commands. 33 | pub const __messages__ = .{ 34 | .sub1 = "Subcommand 1", 35 | .sub2 = "Subcommand 2", 36 | }; 37 | }, 38 | 39 | // This declares option's short name 40 | pub const __shorts__ = .{ 41 | .verbose = .v, 42 | .output = .o, 43 | .@"user-agent" = .A, 44 | .help = .h, 45 | }; 46 | 47 | // This declares option's help message 48 | pub const __messages__ = .{ 49 | .verbose = "Make the operation more talkative", 50 | .output = "Write to file instead of stdout", 51 | .timeout = "Max time this request can cost", 52 | }; 53 | }, "[file]", "0.1.0"); 54 | defer opt.deinit(); 55 | 56 | const sep = "-" ** 30; 57 | std.debug.print("{s}Program{s}\n{s}\n\n", .{ sep, sep, opt.program }); 58 | std.debug.print("{s}Arguments{s}\n", .{ sep, sep }); 59 | inline for (std.meta.fields(@TypeOf(opt.args))) |fld| { 60 | const format = "{s:>10}: " ++ switch (fld.type) { 61 | []const u8 => "{s}", 62 | ?[]const u8 => "{?s}", 63 | else => "{any}", 64 | } ++ "\n"; 65 | std.debug.print(format, .{ fld.name, @field(opt.args, fld.name) }); 66 | } 67 | 68 | std.debug.print("\n{s}Positionals{s}\n", .{ sep, sep }); 69 | for (opt.positional_args, 0..) |arg, idx| { 70 | std.debug.print("{d}: {s}\n", .{ idx + 1, arg }); 71 | } 72 | 73 | // Provide a print_help util method 74 | std.debug.print("\n{s}print_help{s}\n", .{ sep, sep }); 75 | const stdout = std.io.getStdOut(); 76 | try opt.printHelp(stdout.writer()); 77 | } 78 | -------------------------------------------------------------------------------- /src/bin/dark-mode.zig: -------------------------------------------------------------------------------- 1 | //! Dark mode status, built for macOS. 2 | //! 3 | const std = @import("std"); 4 | const simargs = @import("simargs"); 5 | const util = @import("util.zig"); 6 | 7 | // https://saagarjha.com/blog/2018/12/01/scheduling-dark-mode/ 8 | extern "c" fn SLSSetAppearanceThemeLegacy(bool) void; 9 | extern "c" fn SLSGetAppearanceThemeLegacy() bool; 10 | 11 | pub fn main() !void { 12 | var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); 13 | defer arena.deinit(); 14 | const allocator = arena.allocator(); 15 | 16 | const opt = try simargs.parse(allocator, struct { 17 | version: bool = false, 18 | help: bool = false, 19 | 20 | __commands__: union(enum) { 21 | on: struct {}, 22 | off: struct {}, 23 | toggle: struct {}, 24 | status: struct {}, 25 | 26 | pub const __messages__ = .{ 27 | .on = "Turn dark mode on", 28 | .off = "Turn dark mode off", 29 | .toggle = "Toggle dark mode", 30 | .status = "View dark mode status (default)", 31 | }; 32 | } = .{ .status = .{} }, 33 | 34 | pub const __shorts__ = .{ 35 | .version = .v, 36 | .help = .h, 37 | }; 38 | pub const __messages__ = .{ 39 | .help = "Print help information", 40 | .version = "Print version", 41 | }; 42 | }, null, util.get_build_info()); 43 | defer opt.deinit(); 44 | 45 | switch (opt.args.__commands__) { 46 | .status => { 47 | const is_dark = SLSGetAppearanceThemeLegacy(); 48 | if (is_dark) { 49 | std.debug.print("on", .{}); 50 | } else { 51 | std.debug.print("off", .{}); 52 | } 53 | }, 54 | .on => { 55 | SLSSetAppearanceThemeLegacy(true); 56 | }, 57 | .off => { 58 | SLSSetAppearanceThemeLegacy(false); 59 | }, 60 | .toggle => { 61 | const is_dark = SLSGetAppearanceThemeLegacy(); 62 | SLSSetAppearanceThemeLegacy(!is_dark); 63 | }, 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/bin/loc.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const Table = @import("pretty-table").Table; 3 | const Separator = @import("pretty-table").Separator; 4 | const simargs = @import("simargs"); 5 | const util = @import("util.zig"); 6 | const StringUtil = util.StringUtil; 7 | const fs = std.fs; 8 | 9 | pub const std_options: std.Options = .{ 10 | .log_level = .info, 11 | }; 12 | 13 | const IGNORE_DIRS = [_][]const u8{ ".git", "zig-cache", "zig-out", "target", "vendor", "node_modules", "out" }; 14 | 15 | const Language = enum { 16 | Zig, 17 | C, 18 | CPP, 19 | CHeader, 20 | Go, 21 | Rust, 22 | Bash, 23 | Python, 24 | Ruby, 25 | JavaScript, 26 | Java, 27 | Makefile, 28 | Markdown, 29 | HTML, 30 | YAML, 31 | TOML, 32 | JSON, 33 | TypeScript, 34 | Swift, 35 | Other, 36 | // Used in footer 37 | Total, 38 | 39 | const Self = @This(); 40 | 41 | fn multiLineCommentBeginChars(self: Self) ?[]const u8 { 42 | return switch (self) { 43 | .Markdown, .HTML => "", 52 | .C, .CPP, .CHeader, .Java, .JavaScript => "*/", 53 | else => unreachable, 54 | }; 55 | } 56 | 57 | fn commentChars(self: Self) ?[]const u8 { 58 | return switch (self) { 59 | .Bash, .Python, .Ruby, .Makefile, .YAML, .TOML => "#", 60 | .Markdown, .HTML => null, 61 | else => "//", 62 | }; 63 | } 64 | 65 | const ExtLangMap = std.StaticStringMap(Self).initComptime(.{ 66 | .{ ".zig", .Zig }, 67 | .{ ".c", .C }, 68 | .{ ".cpp", .CPP }, 69 | .{ ".cxx", .CPP }, 70 | .{ ".cc", .CPP }, 71 | .{ ".h", .CHeader }, 72 | .{ ".go", .Go }, 73 | .{ ".rs", .Rust }, 74 | .{ ".sh", .Bash }, 75 | .{ ".py", .Python }, 76 | .{ ".rb", .Ruby }, 77 | .{ ".js", .JavaScript }, 78 | .{ ".java", .Java }, 79 | .{ ".md", .Markdown }, 80 | .{ ".markdown", .Markdown }, 81 | .{ ".html", .HTML }, 82 | .{ ".yml", .YAML }, 83 | .{ ".yaml", .YAML }, 84 | .{ ".toml", .TOML }, 85 | .{ ".json", .JSON }, 86 | .{ ".ts", .TypeScript }, 87 | .{ ".swift", .Swift }, 88 | }); 89 | const FilenameLangMap = std.StaticStringMap(Self).initComptime(.{ 90 | .{ "Makefile", .Makefile }, 91 | }); 92 | 93 | fn parse(basename: []const u8) Self { 94 | const ext = fs.path.extension(basename); 95 | if (std.mem.eql(u8, ext, "")) { 96 | return FilenameLangMap.get(basename) orelse .Other; 97 | } 98 | 99 | return ExtLangMap.get(ext) orelse .Other; 100 | } 101 | 102 | fn toString(self: Self) []const u8 { 103 | return @tagName(self); 104 | } 105 | }; 106 | 107 | const Column = enum { 108 | language, 109 | file, 110 | line, 111 | code, 112 | comment, 113 | blank, 114 | size, 115 | }; 116 | 117 | const LinesOfCode = struct { 118 | lang: Language, 119 | files: usize, 120 | codes: usize, 121 | comments: usize, 122 | blanks: usize, 123 | size: usize, 124 | 125 | const Self = @This(); 126 | 127 | const header = b: { 128 | const fieldInfos = std.meta.fields(Column); 129 | var names: [fieldInfos.len][]const u8 = undefined; 130 | for (fieldInfos, 0..) |field, i| { 131 | names[i] = [_]u8{std.ascii.toUpper(field.name[0])} ++ field.name[1..]; 132 | } 133 | break :b names; 134 | }; 135 | const LOCTable = Table(Self.header.len); 136 | const LOCTableData = [Self.header.len][]const u8; 137 | 138 | fn merge(self: *Self, other: Self) void { 139 | self.files += other.files; 140 | self.codes += other.codes; 141 | self.comments += other.comments; 142 | self.blanks += other.blanks; 143 | self.size += other.size; 144 | } 145 | 146 | fn lines(self: Self) usize { 147 | return self.blanks + self.codes + self.comments; 148 | } 149 | 150 | fn cmp(sort_col: Column, a: *Self, b: *Self) bool { 151 | return switch (sort_col) { 152 | .language => std.mem.lessThan(u8, @tagName(a.lang), @tagName(b.lang)), 153 | .file => a.files > b.files, 154 | .code => a.codes > b.codes, 155 | .comment => a.comments > b.comments, 156 | .blank => a.comments > b.comments, 157 | .size => a.size > b.size, 158 | .line => a.lines() > b.lines(), 159 | }; 160 | } 161 | 162 | fn numToString(n: usize, allocator: std.mem.Allocator) []const u8 { 163 | return std.fmt.allocPrint(allocator, "{d}", .{n}) catch unreachable; 164 | } 165 | 166 | fn toTableData(self: Self, allocator: std.mem.Allocator) Self.LOCTableData { 167 | return [_][]const u8{ 168 | self.lang.toString(), 169 | Self.numToString(self.files, allocator), 170 | Self.numToString(self.codes + self.blanks + self.comments, allocator), 171 | Self.numToString(self.codes, allocator), 172 | Self.numToString(self.comments, allocator), 173 | Self.numToString(self.blanks, allocator), 174 | StringUtil.humanSize(allocator, self.size) catch unreachable, 175 | }; 176 | } 177 | }; 178 | 179 | const LocMap = std.enums.EnumMap(Language, LinesOfCode); 180 | 181 | pub fn main() !void { 182 | var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); 183 | defer arena.deinit(); 184 | const allocator = arena.allocator(); 185 | 186 | const opt = try simargs.parse(allocator, struct { 187 | sort: Column = .line, 188 | mode: Separator.Mode = .box, 189 | padding: usize = 3, 190 | version: bool = false, 191 | help: bool = false, 192 | 193 | pub const __shorts__ = .{ 194 | .sort = .s, 195 | .mode = .m, 196 | .padding = .p, 197 | .version = .v, 198 | .help = .h, 199 | }; 200 | 201 | pub const __messages__ = .{ 202 | .help = "Print help information", 203 | .mode = "Line drawing characters", 204 | .padding = "Column padding", 205 | .version = "Print version", 206 | .sort = "Column to sort by", 207 | }; 208 | }, "[file or directory]", util.get_build_info()); 209 | defer opt.deinit(); 210 | 211 | const file_or_dir = if (opt.positional_args.len == 0) 212 | "." 213 | else 214 | opt.positional_args[0]; 215 | 216 | var loc_map = LocMap{}; 217 | var dir = fs.cwd().openDir(file_or_dir, .{ .iterate = true }) catch |err| switch (err) { 218 | error.NotDir => { 219 | try populateLoc(allocator, &loc_map, fs.cwd(), file_or_dir); 220 | return printLocMap( 221 | allocator, 222 | &loc_map, 223 | opt.args.sort, 224 | opt.args.mode, 225 | opt.args.padding, 226 | ); 227 | }, 228 | else => return err, 229 | }; 230 | defer dir.close(); 231 | try walk(allocator, &loc_map, dir); 232 | try printLocMap( 233 | allocator, 234 | &loc_map, 235 | opt.args.sort, 236 | opt.args.mode, 237 | opt.args.padding, 238 | ); 239 | } 240 | 241 | fn printLocMap( 242 | allocator: std.mem.Allocator, 243 | loc_map: *LocMap, 244 | sort_col: Column, 245 | mode: Separator.Mode, 246 | padding: usize, 247 | ) !void { 248 | var iter = loc_map.iterator(); 249 | var list = std.ArrayList(*LinesOfCode).init(allocator); 250 | var total_entry = LinesOfCode{ 251 | .lang = .Total, 252 | .codes = 0, 253 | .comments = 0, 254 | .blanks = 0, 255 | .files = 0, 256 | .size = 0, 257 | }; 258 | 259 | while (iter.next()) |entry| { 260 | try list.append(entry.value); 261 | total_entry.merge(entry.value.*); 262 | } 263 | std.sort.heap(*LinesOfCode, list.items, sort_col, LinesOfCode.cmp); 264 | 265 | var table_data = std.ArrayList(LinesOfCode.LOCTableData).init(allocator); 266 | for (list.items) |entry| { 267 | try table_data.append(entry.toTableData(allocator)); 268 | } 269 | const table = LinesOfCode.LOCTable{ 270 | .header = LinesOfCode.header, 271 | .footer = total_entry.toTableData(allocator), 272 | .rows = table_data.items, 273 | .mode = mode, 274 | .padding = padding, 275 | }; 276 | try std.io.getStdOut().writer().print("{}\n", .{table}); 277 | } 278 | 279 | fn walk(allocator: std.mem.Allocator, loc_map: *LocMap, dir: fs.Dir) anyerror!void { 280 | var it = dir.iterate(); 281 | while (try it.next()) |e| { 282 | switch (e.kind) { 283 | .file => { 284 | try populateLoc(allocator, loc_map, dir, e.name); 285 | }, 286 | .directory => { 287 | var should_ignore = false; 288 | for (IGNORE_DIRS) |ignore| { 289 | if (std.mem.eql(u8, ignore, e.name)) { 290 | should_ignore = true; 291 | break; 292 | } 293 | } 294 | if (!should_ignore) { 295 | var sub_dir = try dir.openDir(e.name, .{ .iterate = true }); 296 | defer sub_dir.close(); 297 | try walk(allocator, loc_map, sub_dir); 298 | } 299 | }, 300 | else => {}, 301 | } 302 | } 303 | } 304 | 305 | // State used when decide if this line is code,comment or blank 306 | // Two possible transitions: 307 | // 1. Normal: Unknown -> Unknown 308 | // 2. MultipleLineComment: Unknown -> [InMultipleLineComment]? -> Unknown 309 | const State = enum { 310 | Unknown, 311 | InMultipleLineComment, 312 | }; 313 | 314 | fn populateLoc(allocator: std.mem.Allocator, loc_map: *LocMap, dir: fs.Dir, basename: []const u8) anyerror!void { 315 | _ = allocator; 316 | const lang = Language.parse(basename); 317 | if (lang == Language.Other) { 318 | return; 319 | } 320 | 321 | // Why no `getOrPutValue` in EnumMap? 322 | var loc_entry = loc_map.getPtr(lang) orelse blk: { 323 | loc_map.put(lang, .{ 324 | .codes = 0, 325 | .comments = 0, 326 | .blanks = 0, 327 | .lang = lang, 328 | .files = 0, 329 | .size = 0, 330 | }); 331 | break :blk loc_map.getPtr(lang).?; 332 | }; 333 | var file = try dir.openFile(basename, .{}); 334 | defer file.close(); 335 | loc_entry.files += 1; 336 | 337 | const metadata = try file.metadata(); 338 | const file_size: usize = @truncate(metadata.size()); 339 | if (file_size == 0) { 340 | return; 341 | } 342 | loc_entry.size += file_size; 343 | 344 | var state = State.Unknown; 345 | switch (@import("builtin").os.tag) { 346 | .windows => { 347 | const rdr = file.reader(); 348 | var buf: [1024]u8 = undefined; 349 | while (rdr.readUntilDelimiterOrEof(&buf, '\n') catch |e| { 350 | std.log.err("File contains too long lines, name:{s}, err:{any}", .{ basename, e }); 351 | return; 352 | }) |line| { 353 | state = updateLineType(state, line, lang, loc_entry); 354 | } 355 | }, 356 | else => { 357 | var ptr = try std.posix.mmap( 358 | null, 359 | file_size, 360 | std.posix.PROT.READ, 361 | .{ .TYPE = .PRIVATE }, 362 | 363 | file.handle, 364 | 0, 365 | ); 366 | defer std.posix.munmap(ptr); 367 | 368 | var offset_so_far: usize = 0; 369 | while (offset_so_far < ptr.len) { 370 | var line_end = offset_so_far; 371 | while (line_end < ptr.len and ptr[line_end] != '\n') { 372 | line_end += 1; 373 | } 374 | const line = ptr[offset_so_far..line_end]; 375 | offset_so_far = line_end + 1; 376 | 377 | state = updateLineType(state, line, lang, loc_entry); 378 | } 379 | }, 380 | } 381 | } 382 | 383 | fn updateLineType( 384 | state: State, 385 | raw_line: []const u8, 386 | lang: Language, 387 | loc_entry: *LinesOfCode, 388 | ) State { 389 | const line = trimWhitespace(raw_line); 390 | if (line == null) { 391 | loc_entry.blanks += 1; 392 | // state not change 393 | return state; 394 | } 395 | 396 | return switch (state) { 397 | .Unknown => blk: { 398 | if (lang.commentChars()) |chars| { 399 | if (std.mem.startsWith(u8, line.?, chars)) { 400 | loc_entry.comments += 1; 401 | break :blk .Unknown; 402 | } 403 | } 404 | 405 | if (lang.multiLineCommentBeginChars()) |chars| { 406 | if (std.mem.startsWith(u8, line.?, chars)) { 407 | loc_entry.comments += 1; 408 | const end_chars = lang.multiLineCommentEndChars(); 409 | if (std.mem.endsWith(u8, line.?, end_chars)) { 410 | break :blk .Unknown; 411 | } 412 | 413 | break :blk .InMultipleLineComment; 414 | } 415 | } 416 | 417 | loc_entry.codes += 1; 418 | break :blk .Unknown; 419 | }, 420 | .InMultipleLineComment => blk: { 421 | loc_entry.comments += 1; 422 | const end_chars = lang.multiLineCommentEndChars(); 423 | if (std.mem.endsWith(u8, line.?, end_chars)) { 424 | break :blk .Unknown; 425 | } 426 | break :blk .InMultipleLineComment; 427 | }, 428 | }; 429 | } 430 | 431 | fn isWhitespace(c: u8) bool { 432 | for (std.ascii.whitespace) |space| { 433 | if (space == c) { 434 | return true; 435 | } 436 | } 437 | return false; 438 | } 439 | 440 | fn trimWhitespace(line: []const u8) ?[]const u8 { 441 | if (line.len == 0) { 442 | return null; 443 | } 444 | 445 | var start_idx: usize = 0; 446 | var end_idx: usize = line.len - 1; 447 | while (start_idx <= end_idx) { 448 | if (!isWhitespace(line[start_idx])) { 449 | break; 450 | } 451 | start_idx += 1; 452 | } 453 | while (end_idx >= start_idx) { 454 | if (!isWhitespace(line[end_idx])) { 455 | break; 456 | } 457 | end_idx -= 1; 458 | } 459 | 460 | return if (start_idx > end_idx) 461 | null 462 | else 463 | return line[start_idx .. end_idx + 1]; 464 | } 465 | 466 | test "trimWhitespace" { 467 | try std.testing.expect(null == trimWhitespace("")); 468 | try std.testing.expect(null == trimWhitespace(" ")); 469 | try std.testing.expect(null == trimWhitespace(" ")); 470 | try std.testing.expectEqualStrings("a", trimWhitespace("a").?); 471 | try std.testing.expectEqualStrings("a", trimWhitespace("a ").?); 472 | try std.testing.expectEqualStrings("a", trimWhitespace(" a").?); 473 | try std.testing.expectEqualStrings("a", trimWhitespace(" a ").?); 474 | } 475 | 476 | test "LOC Zig/Python/Ruby" { 477 | const allocator = std.testing.allocator; 478 | var loc_map = LocMap{}; 479 | const dir = fs.cwd(); 480 | 481 | const testcases = .{ 482 | .{ 483 | "tests/test.zig", .{ 484 | .lang = Language.Zig, 485 | .files = 1, 486 | .codes = 34, 487 | .comments = 2, 488 | .blanks = 8, 489 | .size = 1203, 490 | }, 491 | }, 492 | .{ 493 | "tests/test.py", .{ 494 | .lang = Language.Python, 495 | .files = 1, 496 | .codes = 7, 497 | .comments = 2, 498 | .blanks = 1, 499 | .size = 166, 500 | }, 501 | }, 502 | .{ 503 | "tests/test.rb", .{ 504 | .lang = Language.Ruby, 505 | .files = 1, 506 | .codes = 5, 507 | .comments = 2, 508 | .blanks = 1, 509 | .size = 201, 510 | }, 511 | }, 512 | .{ 513 | "tests/test.c", .{ 514 | .lang = Language.C, 515 | .files = 1, 516 | .codes = 2, 517 | .comments = 4, 518 | .blanks = 3, 519 | .size = 34, 520 | }, 521 | }, 522 | }; 523 | 524 | inline for (testcases) |case| { 525 | const basename = case.@"0"; 526 | const expected = case.@"1"; 527 | const lang = expected.lang; 528 | 529 | try std.testing.expectEqual(Language.parse(basename), lang); 530 | 531 | try populateLoc(allocator, &loc_map, dir, basename); 532 | var loc = loc_map.get(lang).?; 533 | // On windows, newline will be \r\n, so size is different 534 | // Zig file stays the same since it's special taken care of in .gitattributes 535 | if (.windows == @import("builtin").os.tag) { 536 | if (lang != .Zig) { 537 | loc.size = expected.size; 538 | } 539 | } 540 | inline for (std.meta.fields(@TypeOf(expected))) |field| { 541 | try std.testing.expectEqual( 542 | @field(loc, field.name), 543 | @field(expected, field.name), 544 | ); 545 | } 546 | } 547 | } 548 | -------------------------------------------------------------------------------- /src/bin/night-shift.zig: -------------------------------------------------------------------------------- 1 | //! Control Night shift in cli, build for macOS. 2 | //! 3 | const std = @import("std"); 4 | const simargs = @import("simargs"); 5 | const util = @import("util.zig"); 6 | const c = @cImport({ 7 | @cInclude("objc/objc.h"); 8 | @cInclude("objc/message.h"); 9 | }); 10 | 11 | const Time = extern struct { 12 | hour: c_int, 13 | minute: c_int, 14 | 15 | fn fromString(hhmm: []const u8) !@This() { 16 | var iter = std.mem.splitSequence(u8, hhmm, ":"); 17 | const hour = iter.next() orelse return error.MissingHour; 18 | const minute = iter.next() orelse return error.MissingMinute; 19 | 20 | return .{ 21 | .hour = std.fmt.parseInt(c_int, hour, 10) catch return error.InvalidHour, 22 | .minute = std.fmt.parseInt(c_int, minute, 10) catch return error.InvalidMinute, 23 | }; 24 | } 25 | }; 26 | 27 | const CustomSchedule = extern struct { 28 | from_time: Time, 29 | to_time: Time, 30 | }; 31 | 32 | const Schedule = union(enum) { 33 | // false means schedule is off 34 | SunSetToSunRise: bool, 35 | Custom: CustomSchedule, 36 | 37 | fn toMode(self: @This()) c_int { 38 | return switch (self) { 39 | .SunSetToSunRise => |v| if (v) 1 else 0, 40 | .Custom => 2, 41 | }; 42 | } 43 | }; 44 | 45 | // Refer https://github.com/smudge/nightlight/blob/03595a642f0876388db11b9f5a3bd8261ab178d5/src/macos/status.rs#L21 46 | const Status = extern struct { 47 | active: bool, 48 | enabled: bool, 49 | sun_schedule_permitted: bool, 50 | mode: c_int, 51 | custom_schedule: CustomSchedule, 52 | disable_flags: c_ulonglong, 53 | available: bool, 54 | 55 | const Self = @This(); 56 | 57 | fn formatSchedule(self: Self, buf: []u8) ![]const u8 { 58 | return switch (self.mode) { 59 | 0 => "Off", 60 | 1 => "SunsetToSunrise", 61 | 2 => try std.fmt.bufPrint(buf, "Custom({d}:{d}-{d}:{d})", .{ 62 | self.custom_schedule.from_time.hour, 63 | self.custom_schedule.from_time.minute, 64 | self.custom_schedule.to_time.hour, 65 | self.custom_schedule.to_time.minute, 66 | }), 67 | else => "Unknown", 68 | }; 69 | } 70 | 71 | fn display(self: Self, wtr: anytype) !void { 72 | if (!self.enabled) { 73 | try wtr.writeAll("Enabled: off"); 74 | return; 75 | } 76 | 77 | var buf = std.mem.zeroes([32]u8); 78 | try wtr.print( 79 | \\Enabled: on 80 | \\Schedule: {s} 81 | , .{try self.formatSchedule(&buf)}); 82 | } 83 | }; 84 | 85 | const Client = struct { 86 | inner: c.id, 87 | allocator: std.mem.Allocator, 88 | 89 | const Self = @This(); 90 | 91 | fn init(allocator: std.mem.Allocator) Self { 92 | // https://developer.limneos.net/?ios=14.4&framework=CoreBrightness.framework&header=CBBlueLightClient.h 93 | const clazz = c.objc_getClass("CBBlueLightClient"); 94 | const call: *fn (c.id, c.SEL) callconv(.C) c.id = @constCast(@ptrCast(&c.objc_msgSend)); 95 | 96 | return Self{ 97 | .inner = call( 98 | call(@alignCast(@ptrCast(clazz.?)), c.sel_registerName("alloc")), 99 | c.sel_registerName("init"), 100 | ), 101 | .allocator = allocator, 102 | }; 103 | } 104 | 105 | fn getStatus(self: Self) !*Status { 106 | const status = try self.allocator.create(Status); 107 | const call: *fn (c.id, c.SEL, *Status) callconv(.C) bool = 108 | @constCast(@ptrCast(&c.objc_msgSend)); 109 | const ret = call(self.inner, c.sel_registerName("getBlueLightStatus:"), status); 110 | if (!ret) { 111 | return error.getBlueLightStatus; 112 | } 113 | 114 | return status; 115 | } 116 | 117 | fn setSchedule(self: Self, schedule: Schedule) !void { 118 | { 119 | const call: *fn (c.id, c.SEL, c_int) callconv(.C) bool = @constCast(@ptrCast(&c.objc_msgSend)); 120 | const ret = call(self.inner, c.sel_registerName("setMode:"), schedule.toMode()); 121 | if (!ret) { 122 | return error.setMode; 123 | } 124 | } 125 | 126 | switch (schedule) { 127 | .SunSetToSunRise => {}, 128 | .Custom => |custom| { 129 | const ptr = try self.allocator.create(CustomSchedule); 130 | ptr.* = custom; 131 | const call: *fn (c.id, c.SEL, [*c]CustomSchedule) callconv(.C) bool = @constCast(@ptrCast(&c.objc_msgSend)); 132 | const ret = call(self.inner, c.sel_registerName("setSchedule:"), ptr); 133 | if (!ret) { 134 | return error.setSchedule; 135 | } 136 | }, 137 | } 138 | } 139 | 140 | fn setEnabled(self: Self, enabled: bool) !void { 141 | const call: *fn (c.id, c.SEL, bool) callconv(.C) bool = @constCast(@ptrCast(&c.objc_msgSend)); 142 | const ret = call(self.inner, c.sel_registerName("setEnabled:"), enabled); 143 | if (!ret) { 144 | return error.getStrength; 145 | } 146 | } 147 | 148 | fn turnOn(self: Self) !void { 149 | return self.setEnabled(true); 150 | } 151 | 152 | fn turnOff(self: Self) !void { 153 | return self.setEnabled(false); 154 | } 155 | 156 | fn getStrength(self: Self) !f32 { 157 | var strength: f32 = 0; 158 | const call: *fn (c.id, c.SEL, *f32) callconv(.C) bool = @constCast(@ptrCast(&c.objc_msgSend)); 159 | const ret = call(self.inner, c.sel_registerName("getStrength:"), &strength); 160 | if (!ret) { 161 | return error.getStrength; 162 | } 163 | 164 | return strength; 165 | } 166 | 167 | fn setStrength(self: Self, strength: f32) !void { 168 | const call: *fn (c.id, c.SEL, f32, bool) callconv(.C) bool = @constCast(@ptrCast(&c.objc_msgSend)); 169 | const ret = call(self.inner, c.sel_registerName("setStrength:commit:"), strength, true); 170 | if (!ret) { 171 | return error.setStrength; 172 | } 173 | } 174 | 175 | fn destroyStatus(self: Self, status: *Status) void { 176 | self.allocator.destroy(status); 177 | } 178 | }; 179 | 180 | const Command = enum { 181 | Status, 182 | On, 183 | Off, 184 | Toggle, 185 | Temp, 186 | Schedule, 187 | 188 | const FromString = std.StaticStringMap(Command).initComptime(.{ 189 | .{ "status", .Status }, 190 | .{ "on", .On }, 191 | .{ "off", .Off }, 192 | .{ "toggle", .Toggle }, 193 | .{ "temp", .Temp }, 194 | .{ "schedule", .Schedule }, 195 | }); 196 | }; 197 | 198 | pub fn main() !void { 199 | var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); 200 | defer arena.deinit(); 201 | const allocator = arena.allocator(); 202 | 203 | const opt = try simargs.parse(allocator, struct { 204 | version: bool = false, 205 | help: bool = false, 206 | 207 | pub const __shorts__ = .{ 208 | .version = .v, 209 | .help = .h, 210 | }; 211 | 212 | pub const __messages__ = .{ 213 | .help = "Print help information", 214 | .version = "Print version", 215 | }; 216 | }, 217 | \\ 218 | \\ 219 | \\ Available commands by category: 220 | \\ Manual on/off control: 221 | \\ status View current Night Shift status 222 | \\ on Turn Night Shift on 223 | \\ off Turn Night Shift off 224 | \\ toggle Toggle Night Shift 225 | \\ 226 | \\ Color temperature: 227 | \\ temp View temperature preference 228 | \\ temp <0-100> Set temperature preference 229 | \\ 230 | \\ Schedule: 231 | \\ schedule View current schedule 232 | \\ schedule sun Start schedule from sunset to sunrise 233 | \\ schedule off Stop the current schedule 234 | \\ schedule Start a custom schedule(HH:mm, 24-hour format) 235 | , util.get_build_info()); 236 | defer opt.deinit(); 237 | 238 | var args_iter = util.SliceIter([]const u8).init(opt.positional_args); 239 | const cmd: Command = if (args_iter.next()) |v| 240 | Command.FromString.get(v) orelse return error.UnknownCommand 241 | else 242 | .Status; 243 | 244 | const client = Client.init(allocator); 245 | var wtr = std.io.getStdOut().writer(); 246 | 247 | switch (cmd) { 248 | .Status => { 249 | var status = try client.getStatus(); 250 | defer client.destroyStatus(status); 251 | try status.display(wtr); 252 | if (status.enabled) { 253 | try wtr.print( 254 | \\ 255 | \\Temperature: {d:.0} 256 | , .{try client.getStrength() * 100}); 257 | } 258 | }, 259 | .Temp => { 260 | if (args_iter.next()) |v| { 261 | const strength = try std.fmt.parseFloat(f32, v); 262 | try client.setStrength(strength / 100.0); 263 | } else { 264 | const strength = try client.getStrength(); 265 | try wtr.print("{d:.0}\n", .{strength * 100}); 266 | } 267 | }, 268 | .Toggle => { 269 | const status = try client.getStatus(); 270 | if (status.enabled) { 271 | try client.turnOff(); 272 | } else { 273 | try client.turnOn(); 274 | } 275 | }, 276 | .On => { 277 | try client.turnOn(); 278 | }, 279 | .Off => { 280 | try client.turnOff(); 281 | }, 282 | .Schedule => { 283 | const sub_cmd = args_iter.next() orelse { 284 | var status = try client.getStatus(); 285 | defer client.destroyStatus(status); 286 | var buf = std.mem.zeroes([32]u8); 287 | try wtr.writeAll(try status.formatSchedule(&buf)); 288 | return; 289 | }; 290 | 291 | if (std.mem.eql(u8, "off", sub_cmd)) { 292 | try client.setSchedule(.{ .SunSetToSunRise = false }); 293 | } else if (std.mem.eql(u8, "sun", sub_cmd)) { 294 | try client.setSchedule(.{ .SunSetToSunRise = true }); 295 | } else { 296 | const from = sub_cmd; 297 | const to = args_iter.next() orelse return error.MissingTo; 298 | const schedule = Schedule{ .Custom = .{ 299 | .from_time = try Time.fromString(from), 300 | .to_time = try Time.fromString(to), 301 | } }; 302 | try client.setSchedule(schedule); 303 | } 304 | }, 305 | } 306 | } 307 | -------------------------------------------------------------------------------- /src/bin/pidof.zig: -------------------------------------------------------------------------------- 1 | //! Pidof for macOS 2 | //! 3 | //! https://man7.org/linux/man-pages/man1/pidof.1.html 4 | 5 | const std = @import("std"); 6 | const simargs = @import("simargs"); 7 | const util = @import("util.zig"); 8 | const c = @cImport({ 9 | @cInclude("sys/sysctl.h"); 10 | @cInclude("unistd.h"); 11 | }); 12 | 13 | pub const Options = struct { 14 | single: bool = false, 15 | delimiter: []const u8 = " ", 16 | strict: bool = false, 17 | user_only: bool = false, 18 | version: bool = false, 19 | help: bool = false, 20 | 21 | pub const __shorts__ = .{ 22 | .single = .s, 23 | .delimiter = .d, 24 | .strict = .S, 25 | .user_only = .u, 26 | .version = .v, 27 | .help = .h, 28 | }; 29 | pub const __messages__ = .{ 30 | .single = "Only return the first matching pid.", 31 | .delimiter = "Delimiter used if more than one PID is shown.", 32 | .strict = "Case sensitive when matching program name.", 33 | .user_only = "Only show process belonging to current user.", 34 | .version = "Print version.", 35 | .help = "Print help message.", 36 | }; 37 | }; 38 | 39 | pub fn searchPids(allocator: std.mem.Allocator, opt: Options, program: []const u8) !std.ArrayList(c.pid_t) { 40 | var mib = [_]c_int{ 41 | c.CTL_KERN, 42 | c.KERN_PROC, 43 | c.KERN_PROC_ALL, 44 | }; 45 | var procSize: usize = 0; 46 | // sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); 47 | var rc = c.sysctl(&mib, mib.len, null, &procSize, null, 0); 48 | if (rc != 0) { 49 | std.log.err("get proc size, err:{any}", .{std.posix.errno(rc)}); 50 | return error.sysctl; 51 | } 52 | 53 | const procList = try allocator.alloc(c.struct_kinfo_proc, procSize / @sizeOf(c.struct_kinfo_proc)); 54 | // https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man3/sysctl.3.html 55 | rc = c.sysctl(&mib, mib.len, @ptrCast(procList), &procSize, null, 0); 56 | if (rc != 0) { 57 | std.log.err("get proc list failed, err:{any}", .{std.posix.errno(rc)}); 58 | return error.sysctl; 59 | } 60 | 61 | // procSize may change between two calls of sysctl, so we cannot iterate 62 | // procList directly with for(procList) |proc|. 63 | var pids = std.ArrayList(c.pid_t).init(allocator); 64 | const uid = if (opt.user_only) c.getuid() else null; 65 | for (0..procSize / @sizeOf(c.struct_kinfo_proc)) |i| { 66 | if (opt.single and pids.items.len == 1) { 67 | break; 68 | } 69 | const proc = procList[i]; 70 | if (uid) |id| { 71 | if (id != proc.kp_eproc.e_pcred.p_ruid) { 72 | continue; 73 | } 74 | } 75 | 76 | const name = std.mem.sliceTo(&proc.kp_proc.p_comm, 0); 77 | if (opt.strict) { 78 | if (std.mem.eql(u8, name, program)) { 79 | try pids.append(proc.kp_proc.p_pid); 80 | } 81 | } else { 82 | if (std.ascii.eqlIgnoreCase(name, program)) { 83 | try pids.append(proc.kp_proc.p_pid); 84 | } 85 | } 86 | } 87 | 88 | return pids; 89 | } 90 | 91 | pub fn main() !void { 92 | var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); 93 | defer arena.deinit(); 94 | const allocator = arena.allocator(); 95 | 96 | const opt = try simargs.parse(allocator, Options, "[program]", util.get_build_info()); 97 | defer opt.deinit(); 98 | 99 | if (opt.positional_args.len == 0) { 100 | std.log.err("program is not given", .{}); 101 | std.posix.exit(1); 102 | } 103 | 104 | const program = opt.positional_args[0]; 105 | const pids = try searchPids(allocator, opt.args, program); 106 | if (pids.items.len == 0) { 107 | std.posix.exit(1); 108 | } 109 | 110 | var stdout = std.io.getStdOut().writer(); 111 | for (pids.items, 0..) |pid, i| { 112 | if (i > 0) { 113 | try stdout.writeAll(opt.args.delimiter); 114 | } 115 | try stdout.print("{d}", .{pid}); 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /src/bin/pkg/Manifest.zig: -------------------------------------------------------------------------------- 1 | pub const max_bytes = 10 * 1024 * 1024; 2 | pub const basename = "build.zig.zon"; 3 | pub const Hash = std.crypto.hash.sha2.Sha256; 4 | pub const Digest = [Hash.digest_length]u8; 5 | pub const multihash_len = 1 + 1 + Hash.digest_length; 6 | pub const multihash_hex_digest_len = 2 * multihash_len; 7 | pub const MultiHashHexDigest = [multihash_hex_digest_len]u8; 8 | 9 | pub const Fingerprint = packed struct(u64) { 10 | id: u32, 11 | checksum: u32, 12 | 13 | pub fn generate(name: []const u8) Fingerprint { 14 | return .{ 15 | .id = std.crypto.random.intRangeLessThan(u32, 1, 0xffffffff), 16 | .checksum = std.hash.Crc32.hash(name), 17 | }; 18 | } 19 | 20 | pub fn validate(n: Fingerprint, name: []const u8) bool { 21 | switch (n.id) { 22 | 0x00000000, 0xffffffff => return false, 23 | else => return std.hash.Crc32.hash(name) == n.checksum, 24 | } 25 | } 26 | 27 | pub fn int(n: Fingerprint) u64 { 28 | return @bitCast(n); 29 | } 30 | }; 31 | 32 | pub const Dependency = struct { 33 | location: Location, 34 | location_tok: Ast.TokenIndex, 35 | hash: ?[]const u8, 36 | hash_tok: Ast.TokenIndex, 37 | node: Ast.Node.Index, 38 | name_tok: Ast.TokenIndex, 39 | lazy: bool, 40 | 41 | pub const Location = union(enum) { 42 | url: []const u8, 43 | path: []const u8, 44 | }; 45 | }; 46 | 47 | pub const ErrorMessage = struct { 48 | msg: []const u8, 49 | tok: Ast.TokenIndex, 50 | off: u32, 51 | }; 52 | 53 | pub const MultihashFunction = enum(u16) { 54 | identity = 0x00, 55 | sha1 = 0x11, 56 | @"sha2-256" = 0x12, 57 | @"sha2-512" = 0x13, 58 | @"sha3-512" = 0x14, 59 | @"sha3-384" = 0x15, 60 | @"sha3-256" = 0x16, 61 | @"sha3-224" = 0x17, 62 | @"sha2-384" = 0x20, 63 | @"sha2-256-trunc254-padded" = 0x1012, 64 | @"sha2-224" = 0x1013, 65 | @"sha2-512-224" = 0x1014, 66 | @"sha2-512-256" = 0x1015, 67 | @"blake2b-256" = 0xb220, 68 | _, 69 | }; 70 | 71 | pub const multihash_function: MultihashFunction = switch (Hash) { 72 | std.crypto.hash.sha2.Sha256 => .@"sha2-256", 73 | else => @compileError("unreachable"), 74 | }; 75 | comptime { 76 | // We avoid unnecessary uleb128 code in hexDigest by asserting here the 77 | // values are small enough to be contained in the one-byte encoding. 78 | assert(@intFromEnum(multihash_function) < 127); 79 | assert(Hash.digest_length < 127); 80 | } 81 | 82 | name: []const u8, 83 | id: u32, 84 | version: std.SemanticVersion, 85 | version_node: Ast.Node.Index, 86 | dependencies: std.StringArrayHashMapUnmanaged(Dependency), 87 | dependencies_node: Ast.Node.Index, 88 | paths: std.StringArrayHashMapUnmanaged(void), 89 | minimum_zig_version: ?std.SemanticVersion, 90 | 91 | errors: []ErrorMessage, 92 | arena_state: std.heap.ArenaAllocator.State, 93 | 94 | pub const ParseOptions = struct { 95 | allow_missing_paths_field: bool = false, 96 | }; 97 | 98 | pub const Error = Allocator.Error; 99 | 100 | pub fn parse(gpa: Allocator, ast: Ast, options: ParseOptions) Error!Manifest { 101 | const node_tags = ast.nodes.items(.tag); 102 | const node_datas = ast.nodes.items(.data); 103 | assert(node_tags[0] == .root); 104 | const main_node_index = node_datas[0].lhs; 105 | 106 | var arena_instance = std.heap.ArenaAllocator.init(gpa); 107 | errdefer arena_instance.deinit(); 108 | 109 | var p: Parse = .{ 110 | .gpa = gpa, 111 | .ast = ast, 112 | .arena = arena_instance.allocator(), 113 | .errors = .{}, 114 | 115 | .name = undefined, 116 | .id = undefined, 117 | .version = undefined, 118 | .version_node = 0, 119 | .dependencies = .{}, 120 | .dependencies_node = 0, 121 | .paths = .{}, 122 | .allow_missing_paths_field = options.allow_missing_paths_field, 123 | .minimum_zig_version = null, 124 | .buf = .{}, 125 | }; 126 | defer p.buf.deinit(gpa); 127 | defer p.errors.deinit(gpa); 128 | defer p.dependencies.deinit(gpa); 129 | defer p.paths.deinit(gpa); 130 | 131 | p.parseRoot(main_node_index) catch |err| switch (err) { 132 | error.ParseFailure => assert(p.errors.items.len > 0), 133 | else => |e| return e, 134 | }; 135 | 136 | return .{ 137 | .name = try p.arena.dupe(u8, p.name), 138 | .id = p.id, 139 | .version = p.version, 140 | .version_node = p.version_node, 141 | .dependencies = try p.dependencies.clone(p.arena), 142 | .dependencies_node = p.dependencies_node, 143 | .paths = try p.paths.clone(p.arena), 144 | .minimum_zig_version = p.minimum_zig_version, 145 | .errors = try p.arena.dupe(ErrorMessage, p.errors.items), 146 | .arena_state = arena_instance.state, 147 | }; 148 | } 149 | 150 | pub fn deinit(man: *Manifest, gpa: Allocator) void { 151 | man.arena_state.promote(gpa).deinit(); 152 | man.* = undefined; 153 | } 154 | 155 | pub fn copyErrorsIntoBundle( 156 | man: Manifest, 157 | ast: Ast, 158 | /// ErrorBundle null-terminated string index 159 | src_path: u32, 160 | eb: *std.zig.ErrorBundle.Wip, 161 | ) Allocator.Error!void { 162 | const token_starts = ast.tokens.items(.start); 163 | 164 | for (man.errors) |msg| { 165 | const start_loc = ast.tokenLocation(0, msg.tok); 166 | 167 | try eb.addRootErrorMessage(.{ 168 | .msg = try eb.addString(msg.msg), 169 | .src_loc = try eb.addSourceLocation(.{ 170 | .src_path = src_path, 171 | .span_start = token_starts[msg.tok], 172 | .span_end = @intCast(token_starts[msg.tok] + ast.tokenSlice(msg.tok).len), 173 | .span_main = token_starts[msg.tok] + msg.off, 174 | .line = @intCast(start_loc.line), 175 | .column = @intCast(start_loc.column), 176 | .source_line = try eb.addString(ast.source[start_loc.line_start..start_loc.line_end]), 177 | }), 178 | }); 179 | } 180 | } 181 | 182 | const hex_charset = "0123456789abcdef"; 183 | 184 | pub fn hex64(x: u64) [16]u8 { 185 | var result: [16]u8 = undefined; 186 | var i: usize = 0; 187 | while (i < 8) : (i += 1) { 188 | const byte = @as(u8, @truncate(x >> @as(u6, @intCast(8 * i)))); 189 | result[i * 2 + 0] = hex_charset[byte >> 4]; 190 | result[i * 2 + 1] = hex_charset[byte & 15]; 191 | } 192 | return result; 193 | } 194 | 195 | test hex64 { 196 | const s = "[" ++ hex64(0x12345678_abcdef00) ++ "]"; 197 | try std.testing.expectEqualStrings("[00efcdab78563412]", s); 198 | } 199 | 200 | pub fn hexDigest(digest: Digest) MultiHashHexDigest { 201 | var result: MultiHashHexDigest = undefined; 202 | 203 | result[0] = hex_charset[@intFromEnum(multihash_function) >> 4]; 204 | result[1] = hex_charset[@intFromEnum(multihash_function) & 15]; 205 | 206 | result[2] = hex_charset[Hash.digest_length >> 4]; 207 | result[3] = hex_charset[Hash.digest_length & 15]; 208 | 209 | for (digest, 0..) |byte, i| { 210 | result[4 + i * 2] = hex_charset[byte >> 4]; 211 | result[5 + i * 2] = hex_charset[byte & 15]; 212 | } 213 | return result; 214 | } 215 | 216 | const Parse = struct { 217 | gpa: Allocator, 218 | ast: Ast, 219 | arena: Allocator, 220 | buf: std.ArrayListUnmanaged(u8), 221 | errors: std.ArrayListUnmanaged(ErrorMessage), 222 | 223 | name: []const u8, 224 | id: u32, 225 | version: std.SemanticVersion, 226 | version_node: Ast.Node.Index, 227 | dependencies: std.StringArrayHashMapUnmanaged(Dependency), 228 | dependencies_node: Ast.Node.Index, 229 | paths: std.StringArrayHashMapUnmanaged(void), 230 | allow_missing_paths_field: bool, 231 | minimum_zig_version: ?std.SemanticVersion, 232 | 233 | const InnerError = error{ ParseFailure, OutOfMemory }; 234 | 235 | fn parseRoot(p: *Parse, node: Ast.Node.Index) !void { 236 | const ast = p.ast; 237 | const main_tokens = ast.nodes.items(.main_token); 238 | const main_token = main_tokens[node]; 239 | 240 | var buf: [2]Ast.Node.Index = undefined; 241 | const struct_init = ast.fullStructInit(&buf, node) orelse { 242 | return fail(p, main_token, "expected top level expression to be a struct", .{}); 243 | }; 244 | 245 | var have_name = false; 246 | var have_version = false; 247 | var have_included_paths = false; 248 | var fingerprint: ?Fingerprint = null; 249 | 250 | for (struct_init.ast.fields) |field_init| { 251 | const name_token = ast.firstToken(field_init) - 2; 252 | const field_name = try identifierTokenString(p, name_token); 253 | // We could get fancy with reflection and comptime logic here but doing 254 | // things manually provides an opportunity to do any additional verification 255 | // that is desirable on a per-field basis. 256 | if (mem.eql(u8, field_name, "dependencies")) { 257 | p.dependencies_node = field_init; 258 | try parseDependencies(p, field_init); 259 | } else if (mem.eql(u8, field_name, "paths")) { 260 | have_included_paths = true; 261 | try parseIncludedPaths(p, field_init); 262 | } else if (mem.eql(u8, field_name, "name")) { 263 | p.name = try parseName(p, field_init); 264 | have_name = true; 265 | } else if (mem.eql(u8, field_name, "fingerprint")) { 266 | fingerprint = try parseFingerprint(p, field_init); 267 | } else if (mem.eql(u8, field_name, "version")) { 268 | p.version_node = field_init; 269 | const version_text = try parseString(p, field_init); 270 | p.version = std.SemanticVersion.parse(version_text) catch |err| v: { 271 | try appendError(p, main_tokens[field_init], "unable to parse semantic version: {s}", .{@errorName(err)}); 272 | break :v undefined; 273 | }; 274 | have_version = true; 275 | } else if (mem.eql(u8, field_name, "minimum_zig_version")) { 276 | const version_text = try parseString(p, field_init); 277 | p.minimum_zig_version = std.SemanticVersion.parse(version_text) catch |err| v: { 278 | try appendError(p, main_tokens[field_init], "unable to parse semantic version: {s}", .{@errorName(err)}); 279 | break :v null; 280 | }; 281 | } else { 282 | // Ignore unknown fields so that we can add fields in future zig 283 | // versions without breaking older zig versions. 284 | } 285 | } 286 | 287 | if (!have_name) { 288 | try appendError(p, main_token, "missing top-level 'name' field", .{}); 289 | } else { 290 | if (fingerprint) |n| { 291 | if (!n.validate(p.name)) { 292 | return fail(p, main_token, "invalid fingerprint: 0x{x}; if this is a new or forked package, use this value: 0x{x}", .{ 293 | n.int(), Fingerprint.generate(p.name).int(), 294 | }); 295 | } 296 | p.id = n.id; 297 | } else { 298 | p.id = 0; 299 | } 300 | } 301 | 302 | if (!have_version) { 303 | try appendError(p, main_token, "missing top-level 'version' field", .{}); 304 | } 305 | 306 | if (!have_included_paths) { 307 | if (p.allow_missing_paths_field) { 308 | try p.paths.put(p.gpa, "", {}); 309 | } else { 310 | try appendError(p, main_token, "missing top-level 'paths' field", .{}); 311 | } 312 | } 313 | } 314 | 315 | fn parseDependencies(p: *Parse, node: Ast.Node.Index) !void { 316 | const ast = p.ast; 317 | const main_tokens = ast.nodes.items(.main_token); 318 | 319 | var buf: [2]Ast.Node.Index = undefined; 320 | const struct_init = ast.fullStructInit(&buf, node) orelse { 321 | const tok = main_tokens[node]; 322 | return fail(p, tok, "expected dependencies expression to be a struct", .{}); 323 | }; 324 | 325 | for (struct_init.ast.fields) |field_init| { 326 | const name_token = ast.firstToken(field_init) - 2; 327 | const dep_name = try identifierTokenString(p, name_token); 328 | const dep = try parseDependency(p, field_init); 329 | try p.dependencies.put(p.gpa, dep_name, dep); 330 | } 331 | } 332 | 333 | fn parseDependency(p: *Parse, node: Ast.Node.Index) !Dependency { 334 | const ast = p.ast; 335 | const main_tokens = ast.nodes.items(.main_token); 336 | 337 | var buf: [2]Ast.Node.Index = undefined; 338 | const struct_init = ast.fullStructInit(&buf, node) orelse { 339 | const tok = main_tokens[node]; 340 | return fail(p, tok, "expected dependency expression to be a struct", .{}); 341 | }; 342 | 343 | var dep: Dependency = .{ 344 | .location = undefined, 345 | .location_tok = 0, 346 | .hash = null, 347 | .hash_tok = 0, 348 | .node = node, 349 | .name_tok = 0, 350 | .lazy = false, 351 | }; 352 | var has_location = false; 353 | 354 | for (struct_init.ast.fields) |field_init| { 355 | const name_token = ast.firstToken(field_init) - 2; 356 | dep.name_tok = name_token; 357 | const field_name = try identifierTokenString(p, name_token); 358 | // We could get fancy with reflection and comptime logic here but doing 359 | // things manually provides an opportunity to do any additional verification 360 | // that is desirable on a per-field basis. 361 | if (mem.eql(u8, field_name, "url")) { 362 | if (has_location) { 363 | return fail(p, main_tokens[field_init], "dependency should specify only one of 'url' and 'path' fields.", .{}); 364 | } 365 | dep.location = .{ 366 | .url = parseString(p, field_init) catch |err| switch (err) { 367 | error.ParseFailure => continue, 368 | else => |e| return e, 369 | }, 370 | }; 371 | has_location = true; 372 | dep.location_tok = main_tokens[field_init]; 373 | } else if (mem.eql(u8, field_name, "path")) { 374 | if (has_location) { 375 | return fail(p, main_tokens[field_init], "dependency should specify only one of 'url' and 'path' fields.", .{}); 376 | } 377 | dep.location = .{ 378 | .path = parseString(p, field_init) catch |err| switch (err) { 379 | error.ParseFailure => continue, 380 | else => |e| return e, 381 | }, 382 | }; 383 | has_location = true; 384 | dep.location_tok = main_tokens[field_init]; 385 | } else if (mem.eql(u8, field_name, "hash")) { 386 | dep.hash = parseHash(p, field_init) catch |err| switch (err) { 387 | error.ParseFailure => continue, 388 | else => |e| return e, 389 | }; 390 | dep.hash_tok = main_tokens[field_init]; 391 | } else if (mem.eql(u8, field_name, "lazy")) { 392 | dep.lazy = parseBool(p, field_init) catch |err| switch (err) { 393 | error.ParseFailure => continue, 394 | else => |e| return e, 395 | }; 396 | } else { 397 | // Ignore unknown fields so that we can add fields in future zig 398 | // versions without breaking older zig versions. 399 | } 400 | } 401 | 402 | if (!has_location) { 403 | try appendError(p, main_tokens[node], "dependency requires location field, one of 'url' or 'path'.", .{}); 404 | } 405 | 406 | return dep; 407 | } 408 | 409 | fn parseIncludedPaths(p: *Parse, node: Ast.Node.Index) !void { 410 | const ast = p.ast; 411 | const main_tokens = ast.nodes.items(.main_token); 412 | 413 | var buf: [2]Ast.Node.Index = undefined; 414 | const array_init = ast.fullArrayInit(&buf, node) orelse { 415 | const tok = main_tokens[node]; 416 | return fail(p, tok, "expected paths expression to be a list of strings", .{}); 417 | }; 418 | 419 | for (array_init.ast.elements) |elem_node| { 420 | const path_string = try parseString(p, elem_node); 421 | // This is normalized so that it can be used in string comparisons 422 | // against file system paths. 423 | const normalized = try std.fs.path.resolve(p.arena, &.{path_string}); 424 | try p.paths.put(p.gpa, normalized, {}); 425 | } 426 | } 427 | 428 | fn parseBool(p: *Parse, node: Ast.Node.Index) !bool { 429 | const ast = p.ast; 430 | const node_tags = ast.nodes.items(.tag); 431 | const main_tokens = ast.nodes.items(.main_token); 432 | if (node_tags[node] != .identifier) { 433 | return fail(p, main_tokens[node], "expected identifier", .{}); 434 | } 435 | const ident_token = main_tokens[node]; 436 | const token_bytes = ast.tokenSlice(ident_token); 437 | if (mem.eql(u8, token_bytes, "true")) { 438 | return true; 439 | } else if (mem.eql(u8, token_bytes, "false")) { 440 | return false; 441 | } else { 442 | return fail(p, ident_token, "expected boolean", .{}); 443 | } 444 | } 445 | 446 | fn parseName(p: *Parse, node: Ast.Node.Index) ![]const u8 { 447 | const ast = p.ast; 448 | const node_tags = ast.nodes.items(.tag); 449 | const main_tokens = ast.nodes.items(.main_token); 450 | const main_token = main_tokens[node]; 451 | 452 | if (node_tags[node] == .enum_literal) { 453 | const ident_name = ast.tokenSlice(main_token); 454 | if (mem.startsWith(u8, ident_name, "@")) 455 | return fail(p, main_token, "name must be a valid bare zig identifier", .{}); 456 | 457 | return ident_name; 458 | } 459 | 460 | // try string name, used before zig 0.14. 461 | return p.parseString(node); 462 | } 463 | 464 | fn parseFingerprint(p: *Parse, node: Ast.Node.Index) !Fingerprint { 465 | const ast = p.ast; 466 | const node_tags = ast.nodes.items(.tag); 467 | const main_tokens = ast.nodes.items(.main_token); 468 | const main_token = main_tokens[node]; 469 | 470 | if (node_tags[node] != .number_literal) { 471 | return fail(p, main_token, "expected integer literal", .{}); 472 | } 473 | const token_bytes = ast.tokenSlice(main_token); 474 | const parsed = std.zig.parseNumberLiteral(token_bytes); 475 | switch (parsed) { 476 | .int => |n| return @bitCast(n), 477 | .big_int, .float => return fail(p, main_token, "expected u64 integer literal, found {s}", .{ 478 | @tagName(parsed), 479 | }), 480 | .failure => |err| return fail(p, main_token, "bad integer literal: {s}", .{@tagName(err)}), 481 | } 482 | } 483 | 484 | fn parseString(p: *Parse, node: Ast.Node.Index) ![]const u8 { 485 | const ast = p.ast; 486 | const node_tags = ast.nodes.items(.tag); 487 | const main_tokens = ast.nodes.items(.main_token); 488 | if (node_tags[node] != .string_literal) { 489 | return fail(p, main_tokens[node], "expected string literal", .{}); 490 | } 491 | const str_lit_token = main_tokens[node]; 492 | const token_bytes = ast.tokenSlice(str_lit_token); 493 | p.buf.clearRetainingCapacity(); 494 | try parseStrLit(p, str_lit_token, &p.buf, token_bytes, 0); 495 | const duped = try p.arena.dupe(u8, p.buf.items); 496 | return duped; 497 | } 498 | 499 | fn parseHash(p: *Parse, node: Ast.Node.Index) ![]const u8 { 500 | const h = try parseString(p, node); 501 | return h; 502 | } 503 | 504 | /// TODO: try to DRY this with AstGen.identifierTokenString 505 | fn identifierTokenString(p: *Parse, token: Ast.TokenIndex) InnerError![]const u8 { 506 | const ast = p.ast; 507 | const token_tags = ast.tokens.items(.tag); 508 | assert(token_tags[token] == .identifier); 509 | const ident_name = ast.tokenSlice(token); 510 | if (!mem.startsWith(u8, ident_name, "@")) { 511 | return ident_name; 512 | } 513 | p.buf.clearRetainingCapacity(); 514 | try parseStrLit(p, token, &p.buf, ident_name, 1); 515 | const duped = try p.arena.dupe(u8, p.buf.items); 516 | return duped; 517 | } 518 | 519 | /// TODO: try to DRY this with AstGen.parseStrLit 520 | fn parseStrLit( 521 | p: *Parse, 522 | token: Ast.TokenIndex, 523 | buf: *std.ArrayListUnmanaged(u8), 524 | bytes: []const u8, 525 | offset: u32, 526 | ) InnerError!void { 527 | const raw_string = bytes[offset..]; 528 | var buf_managed = buf.toManaged(p.gpa); 529 | const result = std.zig.string_literal.parseWrite(buf_managed.writer(), raw_string); 530 | buf.* = buf_managed.moveToUnmanaged(); 531 | switch (try result) { 532 | .success => {}, 533 | .failure => |e| { 534 | std.log.err("parse str lit failed, err:{any}, token:{any}, bytes:{any}, offset:{any}", .{ 535 | e, 536 | token, 537 | bytes, 538 | offset, 539 | }); 540 | return error.ParseFailure; 541 | }, 542 | } 543 | } 544 | 545 | fn fail( 546 | p: *Parse, 547 | tok: Ast.TokenIndex, 548 | comptime fmt: []const u8, 549 | args: anytype, 550 | ) InnerError { 551 | try appendError(p, tok, fmt, args); 552 | return error.ParseFailure; 553 | } 554 | 555 | fn appendError(p: *Parse, tok: Ast.TokenIndex, comptime fmt: []const u8, args: anytype) !void { 556 | return appendErrorOff(p, tok, 0, fmt, args); 557 | } 558 | 559 | fn appendErrorOff( 560 | p: *Parse, 561 | tok: Ast.TokenIndex, 562 | byte_offset: u32, 563 | comptime fmt: []const u8, 564 | args: anytype, 565 | ) Allocator.Error!void { 566 | try p.errors.append(p.gpa, .{ 567 | .msg = try std.fmt.allocPrint(p.arena, fmt, args), 568 | .tok = tok, 569 | .off = byte_offset, 570 | }); 571 | } 572 | }; 573 | 574 | const Manifest = @This(); 575 | const std = @import("std"); 576 | const mem = std.mem; 577 | const Allocator = std.mem.Allocator; 578 | const assert = std.debug.assert; 579 | const Ast = std.zig.Ast; 580 | const testing = std.testing; 581 | 582 | test "basic" { 583 | const gpa = testing.allocator; 584 | 585 | const example = 586 | \\.{ 587 | \\ .name = "foo", 588 | \\ .version = "3.2.1", 589 | \\ .paths = .{""}, 590 | \\ .dependencies = .{ 591 | \\ .bar = .{ 592 | \\ .url = "https://example.com/baz.tar.gz", 593 | \\ .hash = "1220f1b680b6065fcfc94fe777f22e73bcb7e2767e5f4d99d4255fe76ded69c7a35f", 594 | \\ }, 595 | \\ }, 596 | \\} 597 | ; 598 | 599 | var ast = try Ast.parse(gpa, example, .zon); 600 | defer ast.deinit(gpa); 601 | 602 | try testing.expect(ast.errors.len == 0); 603 | 604 | var manifest = try Manifest.parse(gpa, ast, .{}); 605 | defer manifest.deinit(gpa); 606 | 607 | try testing.expect(manifest.errors.len == 0); 608 | try testing.expectEqualStrings("foo", manifest.name); 609 | 610 | try testing.expectEqual(@as(std.SemanticVersion, .{ 611 | .major = 3, 612 | .minor = 2, 613 | .patch = 1, 614 | }), manifest.version); 615 | 616 | try testing.expect(manifest.dependencies.count() == 1); 617 | try testing.expectEqualStrings("bar", manifest.dependencies.keys()[0]); 618 | try testing.expectEqualStrings( 619 | "https://example.com/baz.tar.gz", 620 | manifest.dependencies.values()[0].location.url, 621 | ); 622 | try testing.expectEqualStrings( 623 | "1220f1b680b6065fcfc94fe777f22e73bcb7e2767e5f4d99d4255fe76ded69c7a35f", 624 | manifest.dependencies.values()[0].hash orelse return error.TestFailed, 625 | ); 626 | 627 | try testing.expect(manifest.minimum_zig_version == null); 628 | } 629 | 630 | test "minimum_zig_version" { 631 | const gpa = testing.allocator; 632 | 633 | const example = 634 | \\.{ 635 | \\ .name = "foo", 636 | \\ .version = "3.2.1", 637 | \\ .paths = .{""}, 638 | \\ .minimum_zig_version = "0.11.1", 639 | \\} 640 | ; 641 | 642 | var ast = try Ast.parse(gpa, example, .zon); 643 | defer ast.deinit(gpa); 644 | 645 | try testing.expect(ast.errors.len == 0); 646 | 647 | var manifest = try Manifest.parse(gpa, ast, .{}); 648 | defer manifest.deinit(gpa); 649 | 650 | try testing.expect(manifest.errors.len == 0); 651 | try testing.expect(manifest.dependencies.count() == 0); 652 | 653 | try testing.expect(manifest.minimum_zig_version != null); 654 | 655 | try testing.expectEqual(@as(std.SemanticVersion, .{ 656 | .major = 0, 657 | .minor = 11, 658 | .patch = 1, 659 | }), manifest.minimum_zig_version.?); 660 | } 661 | 662 | test "minimum_zig_version - invalid version" { 663 | const gpa = testing.allocator; 664 | 665 | const example = 666 | \\.{ 667 | \\ .name = "foo", 668 | \\ .version = "3.2.1", 669 | \\ .minimum_zig_version = "X.11.1", 670 | \\ .paths = .{""}, 671 | \\} 672 | ; 673 | 674 | var ast = try Ast.parse(gpa, example, .zon); 675 | defer ast.deinit(gpa); 676 | 677 | try testing.expect(ast.errors.len == 0); 678 | 679 | var manifest = try Manifest.parse(gpa, ast, .{}); 680 | defer manifest.deinit(gpa); 681 | 682 | try testing.expect(manifest.errors.len == 1); 683 | try testing.expect(manifest.dependencies.count() == 0); 684 | 685 | try testing.expect(manifest.minimum_zig_version == null); 686 | } 687 | -------------------------------------------------------------------------------- /src/bin/pkg/package.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const manifest = @import("./Manifest.zig"); 3 | const MultihashFunction = manifest.MultihashFunction; 4 | const multihash_function = manifest.multihash_function; 5 | const multihash_hex_digest_len = manifest.multihash_hex_digest_len; 6 | 7 | pub const Hash = struct { 8 | /// Maximum size of a package hash. Unused bytes at the end are 9 | /// filled with zeroes. 10 | bytes: [max_len]u8, 11 | 12 | pub const Algo = std.crypto.hash.sha2.Sha256; 13 | pub const Digest = [Algo.digest_length]u8; 14 | 15 | /// Example: "nnnn-vvvv-hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh" 16 | pub const max_len = 32 + 1 + 32 + 1 + (32 + 32 + 200) / 6; 17 | 18 | pub fn fromSlice(s: []const u8) Hash { 19 | var result: Hash = undefined; 20 | @memcpy(result.bytes[0..s.len], s); 21 | @memset(result.bytes[s.len..], 0); 22 | return result; 23 | } 24 | 25 | pub fn toSlice(ph: *const Hash) []const u8 { 26 | var end: usize = ph.bytes.len; 27 | while (true) { 28 | end -= 1; 29 | if (ph.bytes[end] != 0) return ph.bytes[0 .. end + 1]; 30 | } 31 | } 32 | 33 | pub fn eql(a: *const Hash, b: *const Hash) bool { 34 | return std.mem.eql(u8, &a.bytes, &b.bytes); 35 | } 36 | 37 | /// Distinguishes whether the legacy multihash format is being stored here. 38 | pub fn isOld(h: *const Hash) bool { 39 | if (h.bytes.len < 2) return false; 40 | const their_multihash_func = std.fmt.parseInt(u8, h.bytes[0..2], 16) catch return false; 41 | if (@as(MultihashFunction, @enumFromInt(their_multihash_func)) != multihash_function) return false; 42 | if (h.toSlice().len != multihash_hex_digest_len) return false; 43 | return std.mem.indexOfScalar(u8, &h.bytes, '-') == null; 44 | } 45 | 46 | test isOld { 47 | const h: Hash = .fromSlice("1220138f4aba0c01e66b68ed9e1e1e74614c06e4743d88bc58af4f1c3dd0aae5fea7"); 48 | try std.testing.expect(h.isOld()); 49 | } 50 | 51 | /// Produces "$name-$semver-$hashplus". 52 | /// * name is the name field from build.zig.zon, asserted to be at most 32 53 | /// bytes and assumed be a valid zig identifier 54 | /// * semver is the version field from build.zig.zon, asserted to be at 55 | /// most 32 bytes 56 | /// * hashplus is the following 33-byte array, base64 encoded using -_ to make 57 | /// it filesystem safe: 58 | /// - (4 bytes) LE u32 Package ID 59 | /// - (4 bytes) LE u32 total decompressed size in bytes, overflow saturated 60 | /// - (25 bytes) truncated SHA-256 digest of hashed files of the package 61 | pub fn init(digest: Digest, name: []const u8, ver: []const u8, id: u32, size: u32) Hash { 62 | var result: Hash = undefined; 63 | var buf: std.ArrayListUnmanaged(u8) = .initBuffer(&result.bytes); 64 | buf.appendSliceAssumeCapacity(name); 65 | buf.appendAssumeCapacity('-'); 66 | buf.appendSliceAssumeCapacity(ver); 67 | buf.appendAssumeCapacity('-'); 68 | var hashplus: [33]u8 = undefined; 69 | std.mem.writeInt(u32, hashplus[0..4], id, .little); 70 | std.mem.writeInt(u32, hashplus[4..8], size, .little); 71 | hashplus[8..].* = digest[0..25].*; 72 | _ = std.base64.url_safe_no_pad.Encoder.encode(buf.addManyAsArrayAssumeCapacity(44), &hashplus); 73 | @memset(buf.unusedCapacitySlice(), 0); 74 | return result; 75 | } 76 | 77 | /// Produces a unique hash based on the path provided. The result should 78 | /// not be user-visible. 79 | pub fn initPath(sub_path: []const u8, is_global: bool) Hash { 80 | var result: Hash = .{ .bytes = @splat(0) }; 81 | var i: usize = 0; 82 | if (is_global) { 83 | result.bytes[0] = '/'; 84 | i += 1; 85 | } 86 | if (i + sub_path.len <= result.bytes.len) { 87 | @memcpy(result.bytes[i..][0..sub_path.len], sub_path); 88 | return result; 89 | } 90 | var bin_digest: [Algo.digest_length]u8 = undefined; 91 | Algo.hash(sub_path, &bin_digest, .{}); 92 | _ = std.fmt.bufPrint(result.bytes[i..], "{}", .{std.fmt.fmtSliceHexLower(&bin_digest)}) catch unreachable; 93 | return result; 94 | } 95 | }; 96 | -------------------------------------------------------------------------------- /src/bin/repeat.zig: -------------------------------------------------------------------------------- 1 | //! Repeat a command until it succeeds. 2 | 3 | const std = @import("std"); 4 | const simargs = @import("simargs"); 5 | const util = @import("util.zig"); 6 | const os = std.os; 7 | const process = std.process; 8 | const mem = std.mem; 9 | const time = std.time; 10 | 11 | pub fn main() !void { 12 | var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); 13 | defer arena.deinit(); 14 | const allocator = arena.allocator(); 15 | 16 | const opt = try simargs.parse(allocator, struct { 17 | max: ?usize, 18 | interval: ?usize, 19 | version: bool = false, 20 | help: bool = false, 21 | 22 | pub const __shorts__ = .{ 23 | .max = .m, 24 | .interval = .i, 25 | .version = .v, 26 | .help = .h, 27 | }; 28 | 29 | pub const __messages__ = .{ 30 | .max = "Max times to repeat", 31 | .interval = "Pause interval(in seconds) between repeats", 32 | .version = "Print version", 33 | .help = "Print help information", 34 | }; 35 | }, "command", util.get_build_info()); 36 | defer opt.deinit(); 37 | 38 | const argv = if (opt.positional_args.len == 0) { 39 | return error.NoCommand; 40 | } else opt.positional_args; 41 | 42 | var keep_running = true; 43 | var i: usize = 0; 44 | while (keep_running) { 45 | i += 1; 46 | if (opt.args.max) |max| { 47 | if (max != 0 and i >= max) { 48 | keep_running = false; 49 | } 50 | } 51 | const term = try run(allocator, argv); 52 | switch (term) { 53 | .Exited => |rc| { 54 | if (rc == 0) { 55 | keep_running = false; 56 | } 57 | }, 58 | else => {}, 59 | } 60 | 61 | if (keep_running) { 62 | if (opt.args.interval) |pause| { 63 | time.sleep(pause * time.ns_per_s); 64 | } 65 | } 66 | } 67 | } 68 | 69 | fn run(allocator: mem.Allocator, argv: []const []const u8) !process.Child.Term { 70 | var child = process.Child.init(argv, allocator); 71 | // By default, child will inherit stdout & stderr from its parents, 72 | // so child's output will be redirect to output of parents. 73 | return try child.spawnAndWait(); 74 | } 75 | -------------------------------------------------------------------------------- /src/bin/tcp-proxy.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const simargs = @import("simargs"); 3 | const util = @import("util.zig"); 4 | const debugPrint = util.debugPrint; 5 | const net = std.net; 6 | const mem = std.mem; 7 | 8 | pub const std_options: std.Options = .{ 9 | .log_level = .debug, 10 | }; 11 | 12 | const isLinux = util.isLinux(); 13 | const isWindows = util.isWindows(); 14 | 15 | pub fn main() !void { 16 | const allocator = std.heap.page_allocator; 17 | 18 | const opt = try simargs.parse(allocator, struct { 19 | bind_host: []const u8 = "0.0.0.0", 20 | local_port: u16 = 8081, 21 | remote_host: []const u8, 22 | remote_port: u16, 23 | buf_size: usize = 1024 * 16, 24 | server_threads: u32 = 24, 25 | help: bool = false, 26 | version: bool = false, 27 | verbose: bool = false, 28 | 29 | pub const __shorts__ = .{ 30 | .bind_host = .b, 31 | .local_port = .p, 32 | .remote_host = .H, 33 | .remote_port = .P, 34 | .help = .h, 35 | .version = .v, 36 | }; 37 | 38 | pub const __messages__ = .{ 39 | .bind_host = "Local bind host", 40 | .local_port = "Local bind port", 41 | .remote_host = "Remote host", 42 | .remote_port = "Remote port", 43 | .buf_size = "Buffer size for tcp read/write", 44 | .server_threads = "Server worker threads num", 45 | }; 46 | }, null, util.get_build_info()); 47 | 48 | if (opt.args.verbose) { 49 | util.enableVerbose.call(); 50 | } 51 | 52 | const bind_addr = try parseIp(opt.args.bind_host, opt.args.local_port); 53 | const remote_addr = try parseIp(opt.args.remote_host, opt.args.remote_port); 54 | var server = try bind_addr.listen(.{ 55 | .kernel_backlog = 128, 56 | .reuse_address = true, 57 | }); 58 | std.log.info("Tcp proxy listen on {any}", .{bind_addr}); 59 | 60 | var pool = try allocator.create(std.Thread.Pool); 61 | defer pool.deinit(); 62 | 63 | try pool.init(.{ 64 | .allocator = allocator, 65 | .n_jobs = opt.args.server_threads, 66 | }); 67 | while (true) { 68 | const client = try server.accept(); 69 | debugPrint("Got new connection, addr:{any}", .{client.address}); 70 | 71 | const proxy = Proxy.init(allocator, client, remote_addr, opt.args.buf_size) catch |e| { 72 | std.log.err("Init proxy failed, remote:{any}, err:{any}", .{ remote_addr, e }); 73 | client.stream.close(); 74 | continue; 75 | }; 76 | proxy.nonblockingWork(pool) catch |e| { 77 | std.log.err("Proxy do work failed, remote:{any}, err:{any}", .{ remote_addr, e }); 78 | }; 79 | } 80 | } 81 | 82 | const Pipes = struct { 83 | src_to_remote: [2]std.posix.fd_t, 84 | remote_to_src: [2]std.posix.fd_t, 85 | }; 86 | 87 | const DoubleBuf = struct { 88 | src_to_remote: []u8, 89 | remote_to_src: []u8, 90 | }; 91 | const CopyContext = if (isLinux) Pipes else DoubleBuf; 92 | 93 | const Proxy = struct { 94 | source: net.Server.Connection, 95 | remote_conn: net.Stream, 96 | remote_addr: net.Address, 97 | allocator: mem.Allocator, 98 | 99 | context: CopyContext, 100 | 101 | pub fn init(allocator: mem.Allocator, source: net.Server.Connection, remote: net.Address, buf_size: usize) !Proxy { 102 | // this may block 103 | const remote_conn = try net.tcpConnectToAddress(remote); 104 | const context = if (isLinux) Pipes{ 105 | .src_to_remote = try std.posix.pipe(), 106 | .remote_to_src = try std.posix.pipe(), 107 | } else blk: { 108 | const buf = try allocator.alloc(u8, buf_size * 2); 109 | break :blk DoubleBuf{ 110 | .src_to_remote = buf[0..buf_size], 111 | .remote_to_src = buf[buf_size..], 112 | }; 113 | }; 114 | return .{ 115 | .allocator = allocator, 116 | .source = source, 117 | .remote_conn = remote_conn, 118 | .remote_addr = remote, 119 | .context = context, 120 | }; 121 | } 122 | 123 | fn copyStreamLinux( 124 | fds: [2]std.posix.fd_t, 125 | src: net.Stream, 126 | src_addr: net.Address, 127 | dst: net.Stream, 128 | dst_addr: net.Address, 129 | ) void { 130 | const c = @cImport({ 131 | // https://man7.org/linux/man-pages/man2/splice.2.html 132 | @cDefine("_GNU_SOURCE", {}); 133 | @cInclude("fcntl.h"); 134 | }); 135 | while (true) { 136 | const rc = c.splice(src.handle, null, fds[1], null, util.MAX_I32, c.SPLICE_F_NONBLOCK | c.SPLICE_F_MOVE); 137 | const read = util.checkCErr(rc) catch { 138 | std.log.err("Read stream into pipe failed, addr:{any}, err:{any}", .{ src_addr, std.posix.errno(rc) }); 139 | return; 140 | }; 141 | if (read == 0) { 142 | return; 143 | } 144 | 145 | const rc2 = c.splice(fds[0], null, dst.handle, null, util.MAX_I32, c.SPLICE_F_MOVE); 146 | _ = util.checkCErr(rc2) catch { 147 | std.log.err("Write stream from pipe failed, addr:{any}, err:{any}", .{ dst_addr, std.posix.errno(rc2) }); 148 | return; 149 | }; 150 | } 151 | } 152 | 153 | fn copyStream( 154 | buf: []u8, 155 | src: net.Stream, 156 | src_addr: net.Address, 157 | dst: net.Stream, 158 | dst_addr: net.Address, 159 | ) void { 160 | while (true) { 161 | const read = src.read(buf) catch |e| { 162 | if (e != error.NotOpenForReading) { 163 | std.log.err("Read stream failed, addr:{any}, err:{any}", .{ src_addr, e }); 164 | } 165 | return; 166 | }; 167 | if (read == 0) { 168 | return; 169 | } 170 | 171 | _ = dst.writeAll(buf[0..read]) catch |e| { 172 | std.log.err("Write stream failed, addr:{any}, err:{any}", .{ dst_addr, e }); 173 | return; 174 | }; 175 | } 176 | } 177 | 178 | pub fn nonblockingWork( 179 | self: Proxy, 180 | pool: *std.Thread.Pool, 181 | ) !void { 182 | const copyFn = if (isLinux) 183 | Proxy.copyStreamLinux 184 | else 185 | Proxy.copyStream; 186 | { 187 | errdefer self.deinit(); 188 | // task1. copy source to remote 189 | try pool.spawn(struct { 190 | fn run( 191 | proxy: Proxy, 192 | ) void { 193 | copyFn( 194 | proxy.context.src_to_remote, 195 | proxy.source.stream, 196 | proxy.source.address, 197 | proxy.remote_conn, 198 | proxy.remote_addr, 199 | ); 200 | // When source disconnected, `source.read` will return 0, this means copyStream will return, 201 | // and task1 is finished. When we close remote conn here, task2 below will also exit. 202 | // If task2 exit earlier than task1, copyStream in task1 will also return, so we don't leak resources. 203 | proxy.deinit(); 204 | } 205 | }.run, .{self}); 206 | } 207 | 208 | // task2. copy remote to source 209 | try pool.spawn(struct { 210 | fn run( 211 | proxy: Proxy, 212 | ) void { 213 | copyFn( 214 | proxy.context.remote_to_src, 215 | proxy.remote_conn, 216 | proxy.remote_addr, 217 | proxy.source.stream, 218 | proxy.source.address, 219 | ); 220 | } 221 | }.run, .{self}); 222 | } 223 | 224 | fn deinit(self: Proxy) void { 225 | debugPrint("Close proxy, src:{any}, remote:{any}.", .{ self.source.address, self.remote_addr }); 226 | 227 | self.source.stream.close(); 228 | self.remote_conn.close(); 229 | if (isLinux) { 230 | std.posix.close(self.context.src_to_remote[0]); 231 | std.posix.close(self.context.src_to_remote[1]); 232 | std.posix.close(self.context.remote_to_src[0]); 233 | std.posix.close(self.context.remote_to_src[1]); 234 | } else { 235 | self.allocator.free(self.context.src_to_remote); 236 | } 237 | } 238 | }; 239 | 240 | // resolveIp can't be used in windows, so add this hack! 241 | // 0.13.0\x64\lib\std\net.zig:756:5: error: std.net.if_nametoindex unimplemented for this OS 242 | fn parseIp(name: []const u8, port: u16) !net.Address { 243 | return if (isWindows) 244 | net.Address.parseIp4(name, port) catch 245 | try net.Address.parseIp6(name, port) 246 | else 247 | try net.Address.resolveIp(name, port); 248 | } 249 | -------------------------------------------------------------------------------- /src/bin/timeout.zig: -------------------------------------------------------------------------------- 1 | //! Run a command with bounded time 2 | //! https://github.com/coreutils/coreutils/blob/v9.6/src/timeout.c 3 | 4 | const std = @import("std"); 5 | const posix = std.posix; 6 | const Child = std.process.Child; 7 | 8 | pub var child: Child = undefined; 9 | pub var spawn_success = false; 10 | 11 | pub fn main() !void { 12 | posix.sigaction(posix.SIG.ALRM, &posix.Sigaction{ 13 | .handler = .{ 14 | .handler = struct { 15 | pub fn handler(got: c_int) callconv(.C) void { 16 | std.debug.assert(got == posix.SIG.ALRM); 17 | _ = child.kill() catch |e| { 18 | std.log.err("Kill child failed, err:{any}", .{e}); 19 | return; 20 | }; 21 | posix.exit(124); // timeout 22 | } 23 | }.handler, 24 | }, 25 | .mask = posix.empty_sigset, 26 | .flags = 0, 27 | }, null); 28 | 29 | var gpa = std.heap.GeneralPurposeAllocator(.{}){}; 30 | defer if (gpa.deinit() != .ok) @panic("leak"); 31 | const allocator = gpa.allocator(); 32 | 33 | const args = try std.process.argsAlloc(allocator); 34 | defer std.process.argsFree(allocator, args); 35 | 36 | if (args.len < 3) { 37 | std.debug.print( 38 | \\Usage: 39 | \\ {s} SECONDS COMMAND [ARG]... 40 | \\ 41 | , .{args[0]}); 42 | posix.exit(1); 43 | } 44 | 45 | const ttl_seconds = try std.fmt.parseInt(c_uint, args[1], 10); 46 | const cmds = args[2..]; 47 | const ret = std.c.alarm(ttl_seconds); 48 | if (ret != 0) { 49 | std.log.err("Set alarm signal failed, retcode:{d}", .{ret}); 50 | posix.exit(1); 51 | } 52 | 53 | child = Child.init(cmds, allocator); 54 | try child.spawn(); 55 | spawn_success = true; 56 | const term = try child.wait(); 57 | switch (term) { 58 | .Exited => |status| { 59 | posix.exit(status); 60 | }, 61 | else => { 62 | std.log.err("Child internal error, term:{any}", .{term}); 63 | posix.exit(125); 64 | }, 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /src/bin/tree.zig: -------------------------------------------------------------------------------- 1 | //! Tree(1) in Zig 2 | //! https://linux.die.net/man/1/tree 3 | //! 4 | //! Order: 5 | //! - Files first, directory last 6 | //! - Asc 7 | 8 | const std = @import("std"); 9 | const simargs = @import("simargs"); 10 | const util = @import("util.zig"); 11 | const StringUtil = util.StringUtil; 12 | const process = std.process; 13 | const fs = std.fs; 14 | const mem = std.mem; 15 | const testing = std.testing; 16 | const fmt = std.fmt; 17 | 18 | pub const std_options: std.Options = .{ 19 | .log_level = .info, 20 | }; 21 | 22 | const Mode = enum { 23 | ascii, 24 | box, 25 | dos, 26 | }; 27 | 28 | const Position = enum { 29 | Normal, 30 | // last file in current dir 31 | Last, 32 | UpperNormal, 33 | // last file is upper dir 34 | UpperLast, 35 | }; 36 | 37 | const PREFIX_ARR = [_][4][]const u8{ // mode -> position 38 | .{ "|--", "\\--", "| ", " " }, 39 | .{ "├──", "└──", "│ ", " " }, 40 | // https://en.m.wikipedia.org/wiki/Box-drawing_character#DOS 41 | .{ "╠══", "╚══", "║ ", " " }, 42 | }; 43 | 44 | fn getPrefix(mode: Mode, pos: Position) []const u8 { 45 | return PREFIX_ARR[@intFromEnum(mode)][@intFromEnum(pos)]; 46 | } 47 | 48 | pub const WalkOptions = struct { 49 | mode: Mode = .box, 50 | all: bool = false, 51 | size: bool = false, 52 | directory: bool = false, 53 | level: ?usize, 54 | version: bool = false, 55 | help: bool = false, 56 | 57 | pub const __shorts__ = .{ 58 | .all = .a, 59 | .mode = .m, 60 | .size = .s, 61 | .directory = .d, 62 | .level = .L, 63 | .version = .v, 64 | .help = .h, 65 | }; 66 | 67 | pub const __messages__ = .{ 68 | .mode = "Line drawing characters.", 69 | .all = "All files are printed.", 70 | .size = "Print the size of each file in bytes along with the name.", 71 | .directory = "List directories only.", 72 | .level = "Max display depth of the directory tree.", 73 | .version = "Print version.", 74 | .help = "Print help information.", 75 | }; 76 | }; 77 | 78 | pub fn main() anyerror!void { 79 | var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); 80 | defer arena.deinit(); 81 | const allocator = arena.allocator(); 82 | 83 | const opt = try simargs.parse( 84 | allocator, 85 | WalkOptions, 86 | "[directory]", 87 | util.get_build_info(), 88 | ); 89 | defer opt.deinit(); 90 | 91 | const root_dir = if (opt.positional_args.len == 0) 92 | "." 93 | else 94 | opt.positional_args[0]; 95 | 96 | var writer = std.io.bufferedWriter(std.io.getStdOut().writer()); 97 | _ = try writer.write(root_dir); 98 | _ = try writer.write("\n"); 99 | 100 | var dir = try fs.cwd().openDir(root_dir, .{ .iterate = true }); 101 | defer dir.close(); 102 | var iter = dir.iterate(); 103 | const ret = try walk(allocator, opt.args, &iter, &writer, "", 1); 104 | 105 | _ = try writer.write(try std.fmt.allocPrint(allocator, "\n{d} directories, {d} files\n", .{ 106 | ret.directories, 107 | ret.files, 108 | })); 109 | try writer.flush(); 110 | } 111 | 112 | fn stringLessThan(a: []const u8, b: []const u8) bool { 113 | var i: usize = 0; 114 | while (i < a.len and i < b.len) { 115 | if (a[i] != b[i]) { 116 | return a[i] < b[i]; 117 | } 118 | i += 1; 119 | } 120 | return a.len < b.len; 121 | } 122 | 123 | test "testing string lessThan" { 124 | const testcases = .{ 125 | .{ "a", "a", false }, 126 | .{ "a", "aa", true }, 127 | .{ "a", "b", true }, 128 | .{ "b", "a", false }, 129 | .{ "a", "A", false }, // A > a 130 | }; 131 | inline for (testcases) |case| { 132 | try testing.expectEqual(case.@"2", stringLessThan(case.@"0", case.@"1")); 133 | } 134 | } 135 | 136 | const WalkResult = struct { 137 | files: usize, 138 | directories: usize, 139 | 140 | fn add(self: *@This(), other: @This()) void { 141 | self.directories += other.directories; 142 | self.files += other.files; 143 | } 144 | }; 145 | 146 | fn walk( 147 | allocator: mem.Allocator, 148 | walk_ctx: anytype, 149 | iter: *fs.Dir.Iterator, 150 | writer: anytype, 151 | prefix: []const u8, 152 | level: usize, 153 | ) !WalkResult { 154 | var ret = WalkResult{ .files = 0, .directories = 0 }; 155 | if (walk_ctx.level) |max| { 156 | if (level > max) { 157 | return ret; 158 | } 159 | } 160 | 161 | var files = std.ArrayList(fs.Dir.Entry).init(allocator); 162 | defer { 163 | for (files.items) |entry| { 164 | allocator.free(entry.name); 165 | } 166 | files.deinit(); 167 | } 168 | 169 | while (try iter.next()) |entry| { 170 | const dupe_name = try allocator.dupe(u8, entry.name); 171 | errdefer allocator.free(dupe_name); 172 | 173 | if (walk_ctx.directory) { 174 | if (entry.kind != .directory) { 175 | continue; 176 | } 177 | } 178 | 179 | if (!walk_ctx.all) { 180 | if ('.' == entry.name[0]) { 181 | continue; 182 | } 183 | } 184 | 185 | try files.append(.{ .name = dupe_name, .kind = entry.kind }); 186 | } 187 | 188 | std.sort.heap(fs.Dir.Entry, files.items, {}, struct { 189 | fn lessThan(ctx: void, a: fs.Dir.Entry, b: fs.Dir.Entry) bool { 190 | _ = ctx; 191 | 192 | // file < directory 193 | if (a.kind != b.kind) { 194 | if (a.kind == .directory) { 195 | return false; 196 | } 197 | if (b.kind == .directory) { 198 | return true; 199 | } 200 | } 201 | 202 | return stringLessThan(a.name, b.name); 203 | } 204 | }.lessThan); 205 | 206 | var buf: [fs.max_path_bytes]u8 = undefined; 207 | for (files.items, 0..) |entry, i| { 208 | _ = try writer.write(prefix); 209 | 210 | if (i < files.items.len - 1) { 211 | _ = try writer.write(getPrefix(walk_ctx.mode, Position.Normal)); 212 | } else { 213 | _ = try writer.write(getPrefix(walk_ctx.mode, Position.Last)); 214 | } 215 | _ = try writer.write(entry.name); 216 | 217 | if (walk_ctx.size) { 218 | const stat = try iter.dir.statFile(entry.name); 219 | _ = try writer.write(" ["); 220 | _ = try writer.write(try StringUtil.humanSize(allocator, stat.size)); 221 | _ = try writer.write("]"); 222 | } 223 | switch (entry.kind) { 224 | .directory => { 225 | _ = try writer.write("\n"); 226 | ret.directories += 1; 227 | var sub_dir = try iter.dir.openDir(entry.name, .{ .iterate = true }); 228 | defer sub_dir.close(); 229 | var sub_iter_dir = sub_dir.iterate(); 230 | 231 | const new_prefix = 232 | if (i < files.items.len - 1) 233 | try std.fmt.allocPrint(allocator, "{s}{s}", .{ prefix, getPrefix(walk_ctx.mode, Position.UpperNormal) }) 234 | else 235 | try std.fmt.allocPrint(allocator, "{s}{s}", .{ prefix, getPrefix(walk_ctx.mode, Position.UpperLast) }); 236 | 237 | ret.add(try walk(allocator, walk_ctx, &sub_iter_dir, writer, new_prefix, level + 1)); 238 | }, 239 | .sym_link => { 240 | ret.files += 1; 241 | const linked_name = try iter.dir.readLink(entry.name, &buf); 242 | _ = try writer.write(" -> "); 243 | _ = try writer.write(linked_name); 244 | _ = try writer.write("\n"); 245 | }, 246 | else => { 247 | ret.files += 1; 248 | _ = try writer.write("\n"); 249 | }, 250 | } 251 | } 252 | 253 | return ret; 254 | } 255 | -------------------------------------------------------------------------------- /src/bin/util.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const info = @import("build_info"); 3 | const builtin = @import("builtin"); 4 | const mem = std.mem; 5 | const fmt = std.fmt; 6 | 7 | pub const MAX_I32: i32 = std.math.maxInt(i32); 8 | pub const StringUtil = struct { 9 | const SIZE_UNIT = [_][]const u8{ "B", "K", "M", "G", "T" }; 10 | 11 | pub fn humanSize(allocator: mem.Allocator, n: u64) ![]const u8 { 12 | var remaining: f64 = @floatFromInt(n); 13 | var i: usize = 0; 14 | while (remaining > 1024) { 15 | remaining /= 1024; 16 | i += 1; 17 | } 18 | return fmt.allocPrint(allocator, "{d:.2}{s}", .{ remaining, SIZE_UNIT[i] }); 19 | } 20 | }; 21 | 22 | pub fn get_build_info() []const u8 { 23 | return fmt.comptimePrint( 24 | \\Zigcli 25 | \\ - version: {s} 26 | \\ - commit: https://github.com/jiacai2050/zigcli/commit/{s} 27 | \\ 28 | \\Build Config: 29 | \\ - build date: {s} 30 | \\ - build mode: {s} 31 | \\ - zig version: {s} 32 | \\ - zig backend: {s} 33 | , .{ 34 | info.version, 35 | info.git_commit, 36 | info.build_date, 37 | info.build_mode, 38 | builtin.zig_version_string, 39 | builtin.zig_backend, 40 | }); 41 | } 42 | 43 | pub fn SliceIter(comptime T: type) type { 44 | return struct { 45 | slice: []const T, 46 | idx: usize, 47 | 48 | const Self = @This(); 49 | 50 | pub fn init(slice: []const T) Self { 51 | return .{ 52 | .slice = slice, 53 | .idx = 0, 54 | }; 55 | } 56 | 57 | pub fn next(self: *Self) ?T { 58 | if (self.idx == self.slice.len) { 59 | return null; 60 | } 61 | const value = self.slice[self.idx]; 62 | self.idx += 1; 63 | return value; 64 | } 65 | }; 66 | } 67 | 68 | test "slice iter" { 69 | var iter = SliceIter(u8).init(&[_]u8{ 1, 2, 3 }); 70 | try std.testing.expectEqual(iter.next().?, 1); 71 | try std.testing.expectEqual(iter.next().?, 2); 72 | try std.testing.expectEqual(iter.next().?, 3); 73 | try std.testing.expectEqual(iter.next(), null); 74 | } 75 | 76 | // global var, used in one binary program. 77 | var verbose: bool = false; 78 | 79 | pub var enableVerbose = std.once(struct { 80 | fn do() void { 81 | verbose = true; 82 | } 83 | }.do); 84 | 85 | pub fn debugPrint( 86 | comptime format: []const u8, 87 | args: anytype, 88 | ) void { 89 | if (verbose) { 90 | std.log.debug(format, args); 91 | } 92 | } 93 | 94 | pub fn getCpuCount() u32 { 95 | return std.Thread.getCpuCount() orelse 1; 96 | } 97 | 98 | pub fn isLinux() bool { 99 | return builtin.os.tag == .linux; 100 | } 101 | 102 | pub fn isWindows() bool { 103 | return builtin.os.tag == .windows; 104 | } 105 | 106 | pub fn checkCErr(ret: isize) !isize { 107 | if (ret < 0) { 108 | return error.CErr; 109 | } 110 | 111 | return ret; 112 | } 113 | -------------------------------------------------------------------------------- /src/bin/yes.zig: -------------------------------------------------------------------------------- 1 | //! Yes in Zig 2 | //! Output a string repeatedly until killed 3 | //! https://man7.org/linux/man-pages/man1/yes.1.html 4 | //! 5 | const std = @import("std"); 6 | 7 | const BUFFER_CAP = 32 * 1024; 8 | 9 | fn fillBuffer(buf: []u8, text: []const u8) usize { 10 | std.mem.copyForwards(u8, buf, text); 11 | std.mem.copyForwards(u8, buf[text.len..], "\n"); 12 | 13 | if (text.len + 1 > buf.len / 2) { // plus one newline 14 | return buf.len; 15 | } 16 | 17 | var buffer_size = text.len + 1; 18 | while (buffer_size < buf.len / 2) { 19 | std.mem.copyForwards(u8, buf[buffer_size..], buf[0..buffer_size]); 20 | buffer_size *= 2; 21 | } 22 | 23 | return buffer_size; 24 | } 25 | 26 | pub fn main() !void { 27 | var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); 28 | defer arena.deinit(); 29 | const allocator = arena.allocator(); 30 | 31 | var iter = try std.process.argsWithAllocator(allocator); 32 | _ = iter.next() orelse unreachable; // program 33 | const input = iter.next() orelse "y"; 34 | 35 | var buffer: [BUFFER_CAP]u8 = undefined; 36 | const size = fillBuffer(&buffer, input); 37 | const stdout = std.io.getStdOut(); 38 | var writer = stdout.writer(); 39 | while ((try writer.write(buffer[0..size])) > 0) {} 40 | } 41 | -------------------------------------------------------------------------------- /src/bin/zigfetch.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | const curl = @import("curl"); 3 | const simargs = @import("simargs"); 4 | const util = @import("util.zig"); 5 | const Manifest = @import("./pkg/Manifest.zig"); 6 | const package = @import("./pkg/package.zig"); 7 | const builtin = @import("builtin"); 8 | const fs = std.fs; 9 | const ascii = std.ascii; 10 | const log = std.log; 11 | const mem = std.mem; 12 | const print = std.debug.print; 13 | const Allocator = mem.Allocator; 14 | const Child = std.process.Child; 15 | const ArrayList = std.ArrayList; 16 | 17 | pub const std_options: std.Options = .{ 18 | .log_level = if (builtin.mode == .Debug) .debug else .info, 19 | }; 20 | 21 | const Args = struct { 22 | help: bool = false, 23 | version: bool = false, 24 | verbose: bool = false, 25 | timeout: usize = 60, 26 | @"no-dep": bool = false, 27 | @"debug-hash": bool = false, 28 | @"skip-check": bool = false, 29 | 30 | pub const __shorts__ = .{ 31 | .version = .V, 32 | .verbose = .v, 33 | .timeout = .t, 34 | .help = .h, 35 | .@"no-dep" = .n, 36 | .@"debug-hash" = .d, 37 | .@"skip-check" = .s, 38 | }; 39 | pub const __messages__ = .{ 40 | .help = "Show help", 41 | .version = "Show version", 42 | .verbose = "Show verbose log", 43 | .timeout = "Libcurl http timeout in seconds", 44 | .@"debug-hash" = "Print hash for each file", 45 | .@"no-dep" = "Disable fetch dependencies", 46 | .@"skip-check" = "Skip hash field check", 47 | }; 48 | }; 49 | 50 | var args: Args = undefined; 51 | var cache_dirname: []const u8 = undefined; 52 | var cache_dep_dir: fs.Dir = undefined; 53 | var thread_pool: std.Thread.Pool = undefined; 54 | var fetched_packages = std.StringHashMapUnmanaged(void){}; 55 | var easy: curl.Easy = undefined; 56 | 57 | pub fn main() !void { 58 | const allocator = std.heap.page_allocator; 59 | const opt = try simargs.parse( 60 | allocator, 61 | Args, 62 | "[package-dir or url]", 63 | util.get_build_info(), 64 | ); 65 | defer opt.deinit(); 66 | 67 | if (opt.positional_args.len == 0) { 68 | const stdout = std.io.getStdOut(); 69 | try opt.printHelp(stdout.writer()); 70 | return; 71 | } 72 | // Init global vars 73 | args = opt.args; 74 | const ca_bundle = try curl.allocCABundle(allocator); 75 | defer ca_bundle.deinit(); 76 | easy = try curl.Easy.init(allocator, .{ 77 | .default_timeout_ms = args.timeout * 1000, 78 | .default_user_agent = "zigfetch", 79 | .ca_bundle = ca_bundle, 80 | }); 81 | 82 | { 83 | cache_dirname = try resolveGlobalCacheDir(allocator); 84 | const p_dirname = try std.fmt.allocPrint(allocator, "{s}/p", .{cache_dirname}); 85 | cache_dep_dir = fs.openDirAbsolute(p_dirname, .{}) catch |e| switch (e) { 86 | error.FileNotFound => { 87 | log.err("{s} not exists, please create it first!", .{p_dirname}); 88 | return e; 89 | }, 90 | else => return e, 91 | }; 92 | } 93 | try thread_pool.init(.{ .allocator = allocator }); 94 | defer thread_pool.deinit(); 95 | 96 | const url_or_path = opt.positional_args[0]; 97 | defer allocator.free(cache_dirname); 98 | if (std.mem.startsWith(u8, url_or_path, "http")) { 99 | try handleHTTP(allocator, url_or_path); 100 | } else if (std.mem.startsWith(u8, url_or_path, "git+")) { 101 | try handleGit(allocator, url_or_path); 102 | } else { 103 | // it's a directory 104 | const path = try fs.path.resolve(allocator, &.{url_or_path}); 105 | defer allocator.free(path); 106 | try handleDir(allocator, path); 107 | } 108 | } 109 | 110 | fn calcHash(allocator: Allocator, dir: fs.Dir, root_dirname: []const u8, deleteIgnore: bool) anyerror![]const u8 { 111 | var manifest = try loadManifest(allocator, dir); 112 | defer if (manifest) |*m| m.deinit(allocator); 113 | 114 | const filter: Filter = .{ 115 | .include_paths = if (manifest) |m| m.paths else .{}, 116 | }; 117 | const actual_hash = try computeHash( 118 | allocator, 119 | dir, 120 | root_dirname, 121 | filter, 122 | deleteIgnore, 123 | ); 124 | const computed_package_hash = computedPackageHash(actual_hash, manifest).toSlice(); 125 | if (args.@"no-dep") { 126 | return try allocator.dupe(u8, computed_package_hash); 127 | } 128 | 129 | if (manifest) |m| { 130 | var it = m.dependencies.iterator(); 131 | while (it.next()) |entry| { 132 | const dep = entry.value_ptr; 133 | switch (dep.location) { 134 | .url => |pkg_url| { 135 | if (fetched_packages.contains(pkg_url)) { 136 | continue; 137 | } 138 | const cache_key = try std.fmt.allocPrint(allocator, "{s}", .{pkg_url}); 139 | try fetched_packages.put(allocator, cache_key, {}); 140 | 141 | if (dep.hash) |hash| { 142 | if (std.mem.startsWith(u8, pkg_url, "git+")) { 143 | _ = try cachePackageFromGit(allocator, pkg_url, hash); 144 | } else { 145 | const u = try std.fmt.allocPrintZ(allocator, "{s}", .{pkg_url}); 146 | defer allocator.free(u); 147 | 148 | _ = try cachePackageFromUrl(allocator, u, hash); 149 | } 150 | } else { 151 | log.err("{s} has no hash field, url:{s}", .{ entry.key_ptr.*, pkg_url }); 152 | } 153 | }, 154 | .path => |local_path| { 155 | log.info("Cache from dir dep: {s}", .{local_path}); 156 | var local_dir = try dir.openDir(local_path, .{ .iterate = true }); 157 | defer local_dir.close(); 158 | 159 | _ = try cachePackageFromLocal(allocator, local_dir); 160 | }, 161 | } 162 | } 163 | } 164 | 165 | return try allocator.dupe(u8, computed_package_hash); 166 | } 167 | 168 | fn handleDir(allocator: Allocator, path: []const u8) !void { 169 | log.info("Cache from dir: {s}", .{path}); 170 | try fetched_packages.put(allocator, path, {}); 171 | 172 | var dir = try fs.cwd().openDir(path, .{ .iterate = true }); 173 | defer dir.close(); 174 | 175 | const hash = try cachePackageFromLocal(allocator, dir); 176 | print("{s}", .{hash}); 177 | } 178 | 179 | fn cachePackageFromLocal( 180 | allocator: Allocator, 181 | dir: fs.Dir, 182 | ) anyerror![]const u8 { 183 | const hash = try calcHash(allocator, dir, "", false); 184 | return hash; 185 | } 186 | 187 | fn handleHTTP(allocator: Allocator, url: [:0]const u8) !void { 188 | try fetched_packages.put(allocator, url, {}); 189 | 190 | const hash = try cachePackageFromUrl(allocator, url, null); 191 | print("{s}", .{hash}); 192 | } 193 | 194 | fn cachePackageFromUrl( 195 | allocator: Allocator, 196 | url: [:0]const u8, 197 | expected_hash: ?[]const u8, 198 | ) anyerror![]const u8 { 199 | log.info("Cache from url: {s}", .{url}); 200 | if (expected_hash) |hash| blk: { 201 | cache_dep_dir.access(hash, .{}) catch { 202 | break :blk; 203 | }; 204 | // If reach here, it means it already in global caches 205 | if (args.verbose) { 206 | log.info("Already cached, skip", .{}); 207 | } 208 | return hash; 209 | } 210 | 211 | const tmp_dirname = try makeTmpDir(allocator); 212 | defer allocator.free(tmp_dirname); 213 | defer fs.deleteTreeAbsolute(tmp_dirname) catch |e| { 214 | if (args.verbose) { 215 | log.err("Delete dir({s}) failed, err:{any}", .{ tmp_dirname, e }); 216 | } 217 | }; 218 | 219 | var out_dir = try fs.openDirAbsolute(tmp_dirname, .{ .iterate = true }); 220 | defer out_dir.close(); 221 | 222 | // This is the directory we need to strip. 223 | const sub_dirname = try fetchPackage(allocator, url, out_dir); 224 | defer allocator.free(sub_dirname); 225 | 226 | var sub_dir = try out_dir.openDir(sub_dirname, .{ .iterate = true }); 227 | defer sub_dir.close(); 228 | const src_dirname = try fs.path.join(allocator, &[_][]const u8{ tmp_dirname, sub_dirname }); 229 | defer allocator.free(src_dirname); 230 | const actual_hash = try calcHash(allocator, sub_dir, sub_dirname, true); 231 | if (expected_hash) |expected| { 232 | if (args.@"skip-check") { 233 | try moveToCache(allocator, src_dirname, expected); 234 | return expected; 235 | } 236 | if (!std.mem.eql(u8, expected, actual_hash)) { 237 | log.err("Hash incorrect for {s}, expected:{s}, actual:{s}", .{ 238 | url, expected, actual_hash, 239 | }); 240 | return error.HashNotExpected; 241 | } 242 | } 243 | 244 | try moveToCache(allocator, src_dirname, actual_hash); 245 | return actual_hash; 246 | } 247 | 248 | fn handleGit(allocator: Allocator, git_url: [:0]const u8) !void { 249 | try fetched_packages.put(allocator, git_url, {}); 250 | 251 | const hash = try cachePackageFromGit(allocator, git_url, null); 252 | defer allocator.free(hash); 253 | print("{s}", .{hash}); 254 | } 255 | 256 | fn cachePackageFromGit( 257 | allocator: Allocator, 258 | git_url: []const u8, 259 | expected_hash: ?[]const u8, 260 | ) anyerror![]const u8 { 261 | const uri = try std.Uri.parse(git_url); 262 | const commit_id = if (uri.fragment) |fragment| 263 | try fragment.toRawMaybeAlloc(allocator) 264 | else 265 | return error.MissingFragment; 266 | const host = if (uri.host) |host| 267 | try host.toRawMaybeAlloc(allocator) 268 | else 269 | return error.MissingHost; 270 | 271 | // Convert this git dep to http dep, since it's more efficient. 272 | if (std.mem.eql(u8, host, "github.com") or std.mem.eql(u8, host, "codeberg.org")) { 273 | const archive_url = try std.fmt.allocPrintZ(allocator, "{s}://{s}{s}/archive/{s}.tar.gz", .{ 274 | uri.scheme["git+".len..], 275 | host, 276 | try uri.path.toRawMaybeAlloc(allocator), 277 | commit_id, 278 | }); 279 | defer allocator.free(archive_url); 280 | 281 | return cachePackageFromUrl(allocator, archive_url, expected_hash); 282 | } 283 | 284 | const repo_url = try std.fmt.allocPrint(allocator, "{s}://{s}{s}", .{ 285 | uri.scheme["git+".len..], 286 | host, 287 | try uri.path.toRawMaybeAlloc(allocator), 288 | }); 289 | defer allocator.free(repo_url); 290 | 291 | log.info("Fetch from git, repo_url:{s}, commit_id:{s}...", .{ repo_url, commit_id }); 292 | if (expected_hash) |hash| blk: { 293 | cache_dep_dir.access(hash, .{}) catch { 294 | break :blk; 295 | }; 296 | if (args.verbose) { 297 | log.info("Already cached, skip", .{}); 298 | } 299 | return hash; 300 | } 301 | 302 | const rand_int = std.crypto.random.int(u64); 303 | const tmp_dirname = try std.fmt.allocPrint(allocator, "{s}{s}zigfetch-{s}", .{ 304 | cache_dirname, 305 | fs.path.sep_str, 306 | Manifest.hex64(rand_int), 307 | }); 308 | defer allocator.free(tmp_dirname); 309 | defer fs.deleteTreeAbsolute(tmp_dirname) catch |e| { 310 | log.err("Delete dir({s}) failed, err:{any}", .{ tmp_dirname, e }); 311 | }; 312 | const clone_argv = [_][]const u8{ 313 | "git", 314 | "clone", 315 | repo_url, 316 | tmp_dirname, 317 | }; 318 | try execShell(allocator, &clone_argv); 319 | 320 | const checkout_argv = [_][]const u8{ 321 | "git", 322 | "-C", 323 | tmp_dirname, 324 | "checkout", 325 | commit_id, 326 | }; 327 | try execShell(allocator, &checkout_argv); 328 | 329 | const git_dirname = try std.fmt.allocPrint(allocator, "{s}/.git", .{ 330 | tmp_dirname, 331 | }); 332 | defer allocator.free(git_dirname); 333 | 334 | fs.deleteTreeAbsolute(git_dirname) catch |e| { 335 | log.err("Delete dir({s}) failed, err:{any}", .{ tmp_dirname, e }); 336 | return error.DeleteDotGit; 337 | }; 338 | 339 | var dir = try fs.openDirAbsolute(tmp_dirname, .{ .iterate = true }); 340 | defer dir.close(); 341 | const actual_hash = try calcHash(allocator, dir, "", true); 342 | if (expected_hash) |expected| { 343 | if (args.@"skip-check") { 344 | try moveToCache(allocator, tmp_dirname, expected); 345 | return expected; 346 | } 347 | if (!std.mem.eql(u8, expected, actual_hash)) { 348 | log.err("Hash incorrect for {s}, expected:{s}, actual:{s}", .{ 349 | repo_url, expected, actual_hash, 350 | }); 351 | return error.HashNotExpected; 352 | } 353 | } 354 | 355 | try moveToCache(allocator, tmp_dirname, actual_hash); 356 | return actual_hash; 357 | } 358 | 359 | fn execShell(allocator: Allocator, argv: []const []const u8) !void { 360 | var child = std.process.Child.init(argv, allocator); 361 | if (!args.verbose) { 362 | child.stdout_behavior = .Pipe; 363 | child.stderr_behavior = .Pipe; 364 | } 365 | const term = try child.spawnAndWait(); 366 | switch (term) { 367 | .Exited => |code| { 368 | if (code != 0) { 369 | return error.ExecShellFailed; 370 | } 371 | }, 372 | else => { 373 | log.err("Exec git clone failed, term:{any}", .{term}); 374 | return error.ExecShellFailed; 375 | }, 376 | } 377 | } 378 | 379 | fn moveToCache(allocator: Allocator, src_dir: []const u8, hex: []const u8) !void { 380 | const dst = try std.fmt.allocPrint(allocator, "{s}/p/{s}", .{ cache_dirname, hex }); 381 | defer allocator.free(dst); 382 | 383 | const found = try checkFileExists(dst); 384 | if (found) { 385 | if (args.verbose) { 386 | log.info("Dir({s}) already exists, skip copy...", .{dst}); 387 | } 388 | return; 389 | } 390 | 391 | try fs.renameAbsolute(src_dir, dst); 392 | } 393 | 394 | fn fetchPackage(allocator: Allocator, url: [:0]const u8, out_dir: fs.Dir) ![]const u8 { 395 | try easy.setFollowLocation(true); 396 | try easy.setVerbose(args.verbose); 397 | 398 | const resp = try easy.get(url); 399 | defer resp.deinit(); 400 | 401 | if (resp.status_code >= 400) { 402 | log.err("Failed to fetch {s}: {d}\n", .{ url, resp.status_code }); 403 | return error.BadFetch; 404 | } 405 | const buffer = resp.body.?; 406 | const header = try resp.getHeader("content-type"); 407 | const mime: ?MimeType = 408 | if (header) |h| blk: { 409 | const mime_type = h.get(); 410 | if (ascii.eqlIgnoreCase(mime_type, "application/x-tar")) { 411 | break :blk .Tar; 412 | } else if (ascii.eqlIgnoreCase(mime_type, "application/gzip") or 413 | ascii.eqlIgnoreCase(mime_type, "application/x-gzip") or 414 | ascii.eqlIgnoreCase(mime_type, "application/tar+gzip") or 415 | ascii.eqlIgnoreCase(mime_type, "application/x-tar-gz") or 416 | ascii.eqlIgnoreCase(mime_type, "application/x-gtar-compressed")) 417 | { 418 | break :blk .TarGz; 419 | } else if (ascii.eqlIgnoreCase(mime_type, "application/x-xz")) { 420 | break :blk .TarXz; 421 | } 422 | if (ascii.eqlIgnoreCase(mime_type, "application/zstd")) { 423 | break :blk .TarZst; 424 | } else if (ascii.eqlIgnoreCase(mime_type, "application/zip")) { 425 | break :blk .Zip; 426 | } else { 427 | break :blk guessMimeType(url); 428 | } 429 | } else guessMimeType(url); 430 | 431 | if (mime) |m| { 432 | switch (m) { 433 | .Tar => { 434 | var stream = std.io.fixedBufferStream(buffer.items); 435 | return try unpackTarball(allocator, out_dir, stream.reader()); 436 | }, 437 | .TarGz => { 438 | var stream = std.io.fixedBufferStream(buffer.items); 439 | var dcp = std.compress.gzip.decompressor(stream.reader()); 440 | return try unpackTarball(allocator, out_dir, dcp.reader()); 441 | }, 442 | .TarXz => { 443 | var stream = std.io.fixedBufferStream(buffer.items); 444 | var dcp = try std.compress.xz.decompress(allocator, stream.reader()); 445 | defer dcp.deinit(); 446 | return try unpackTarball(allocator, out_dir, dcp.reader()); 447 | }, 448 | .TarZst => { 449 | const window_size = std.compress.zstd.DecompressorOptions.default_window_buffer_len; 450 | var stream = std.io.fixedBufferStream(buffer.items); 451 | const window_buffer = try allocator.alloc(u8, window_size); 452 | var dcp = std.compress.zstd.decompressor(stream.reader(), .{ .window_buffer = window_buffer }); 453 | return try unpackTarball(allocator, out_dir, dcp.reader()); 454 | }, 455 | .Zip => { 456 | return try unzip(allocator, out_dir, buffer.items); 457 | }, 458 | } 459 | } else { 460 | return error.UnknownMimeType; 461 | } 462 | } 463 | 464 | fn loadManifest(allocator: Allocator, pkg_dir: fs.Dir) !?Manifest { 465 | var arena = std.heap.ArenaAllocator.init(allocator); 466 | defer arena.deinit(); 467 | const arena_allocator = arena.allocator(); 468 | 469 | const file = pkg_dir.openFile(Manifest.basename, .{}) catch |err| switch (err) { 470 | error.FileNotFound => return null, 471 | else => return err, 472 | }; 473 | defer file.close(); 474 | const bytes = try file.readToEndAllocOptions( 475 | arena_allocator, 476 | Manifest.max_bytes, 477 | null, 478 | 1, 479 | 0, 480 | ); 481 | const ast = try std.zig.Ast.parse(arena_allocator, bytes, .zon); 482 | const manifest = try Manifest.parse(allocator, ast, .{ 483 | .allow_missing_paths_field = true, 484 | }); 485 | return manifest; 486 | } 487 | 488 | fn unzip(allocator: Allocator, out_dir: fs.Dir, src: []const u8) ![]const u8 { 489 | const rand_int = std.crypto.random.int(u64); 490 | const tmp_file = try fs.path.join(allocator, &[_][]const u8{ 491 | cache_dirname, &Manifest.hex64(rand_int), 492 | }); 493 | defer allocator.free(tmp_file); 494 | 495 | const zip_file = try fs.createFileAbsolute(tmp_file, .{ 496 | .exclusive = true, 497 | .read = true, 498 | }); 499 | defer zip_file.close(); 500 | defer fs.deleteFileAbsolute(tmp_file) catch {}; 501 | 502 | try zip_file.writeAll(src); 503 | 504 | var diagnostics: std.zip.Diagnostics = .{ .allocator = allocator }; 505 | std.zip.extract(out_dir, zip_file.seekableStream(), .{ 506 | .allow_backslashes = true, 507 | .diagnostics = &diagnostics, 508 | }) catch |err| { 509 | log.err( 510 | "zip extract failed: {s}", 511 | .{@errorName(err)}, 512 | ); 513 | return err; 514 | }; 515 | return diagnostics.root_dir; 516 | } 517 | 518 | fn unpackTarball(allocator: Allocator, out_dir: fs.Dir, reader: anytype) ![]const u8 { 519 | var diagnostics: std.tar.Diagnostics = .{ .allocator = allocator }; 520 | std.tar.pipeToFileSystem(out_dir, reader, .{ 521 | .diagnostics = &diagnostics, 522 | .strip_components = 0, 523 | .mode_mode = .ignore, 524 | .exclude_empty_directories = true, 525 | }) catch |err| { 526 | log.err( 527 | "unable to unpack tarball to temporary directory: {s}", 528 | .{@errorName(err)}, 529 | ); 530 | return error.Untar; 531 | }; 532 | return diagnostics.root_dir; 533 | } 534 | 535 | const Filter = struct { 536 | include_paths: std.StringArrayHashMapUnmanaged(void) = .{}, 537 | 538 | /// sub_path is relative to the package root. 539 | pub fn includePath(self: Filter, sub_path: []const u8) bool { 540 | if (self.include_paths.count() == 0) return true; 541 | if (self.include_paths.contains("")) return true; 542 | if (self.include_paths.contains(".")) return true; 543 | if (self.include_paths.contains(sub_path)) return true; 544 | 545 | // Check if any included paths are parent directories of sub_path. 546 | var dirname = sub_path; 547 | while (std.fs.path.dirname(dirname)) |next_dirname| { 548 | if (self.include_paths.contains(next_dirname)) return true; 549 | dirname = next_dirname; 550 | } 551 | 552 | return false; 553 | } 554 | 555 | test includePath { 556 | const gpa = std.testing.allocator; 557 | var filter: Filter = .{}; 558 | defer filter.include_paths.deinit(gpa); 559 | 560 | try filter.include_paths.put(gpa, "src", {}); 561 | try std.testing.expect(filter.includePath("src/core/unix/SDL_poll.c")); 562 | try std.testing.expect(!filter.includePath(".gitignore")); 563 | } 564 | }; 565 | 566 | const ComputedHash = struct { 567 | digest: Manifest.Digest, 568 | total_size: u64, 569 | }; 570 | 571 | fn computeHash( 572 | allocator: Allocator, 573 | root_dir: fs.Dir, 574 | root_dirname: []const u8, 575 | filter: Filter, 576 | deleteIgnore: bool, 577 | ) !ComputedHash { 578 | // Collect all files, recursively, then sort. 579 | var all_files = std.ArrayList(*HashedFile).init(allocator); 580 | defer all_files.deinit(); 581 | 582 | var deleted_files = std.ArrayList(*DeletedFile).init(allocator); 583 | defer deleted_files.deinit(); 584 | 585 | // Track directories which had any files deleted from them so that empty directories 586 | // can be deleted. 587 | var sus_dirs: std.StringArrayHashMapUnmanaged(void) = .{}; 588 | defer sus_dirs.deinit(allocator); 589 | 590 | var walker = try root_dir.walk(allocator); 591 | defer walker.deinit(); 592 | 593 | { 594 | // The final hash will be a hash of each file hashed independently. This 595 | // allows hashing in parallel. 596 | var wait_group: std.Thread.WaitGroup = .{}; 597 | // `computeHash` is called from a worker thread so there must not be 598 | // any waiting without working or a deadlock could occur. 599 | defer thread_pool.waitAndWork(&wait_group); 600 | 601 | while (walker.next() catch |err| { 602 | log.err( 603 | "unable to walk temporary directory '{s}': {s}", 604 | .{ root_dirname, @errorName(err) }, 605 | ); 606 | return error.FetchFailed; 607 | }) |entry| { 608 | if (entry.kind == .directory) continue; 609 | 610 | const entry_pkg_path = stripRoot(entry.path, root_dirname); 611 | if (!filter.includePath(entry_pkg_path)) { 612 | if (!deleteIgnore) { 613 | continue; 614 | } 615 | // Delete instead of including in hash calculation. 616 | const fs_path = try allocator.dupe(u8, entry.path); 617 | 618 | // Also track the parent directory in case it becomes empty. 619 | if (fs.path.dirname(fs_path)) |parent| 620 | try sus_dirs.put(allocator, parent, {}); 621 | 622 | const deleted_file = try allocator.create(DeletedFile); 623 | deleted_file.* = .{ 624 | .fs_path = fs_path, 625 | .failure = undefined, // to be populated by the worker 626 | }; 627 | thread_pool.spawnWg(&wait_group, workerDeleteFile, .{ root_dir, deleted_file }); 628 | try deleted_files.append(deleted_file); 629 | continue; 630 | } 631 | 632 | const kind: HashedFile.Kind = switch (entry.kind) { 633 | .directory => unreachable, 634 | .file => .file, 635 | .sym_link => .link, 636 | else => { 637 | log.err( 638 | "package contains '{s}' which has illegal file type '{s}'", 639 | .{ entry.path, @tagName(entry.kind) }, 640 | ); 641 | return error.NotExpectedFileKind; 642 | }, 643 | }; 644 | 645 | const fs_path = try allocator.dupe(u8, entry.path); 646 | const hashed_file = try allocator.create(HashedFile); 647 | hashed_file.* = .{ 648 | .fs_path = fs_path, 649 | .normalized_path = try normalizePathAlloc(allocator, entry_pkg_path), 650 | .kind = kind, 651 | .hash = undefined, // to be populated by the worker 652 | .failure = undefined, // to be populated by the worker 653 | .size = undefined, // to be populated by the worker 654 | }; 655 | thread_pool.spawnWg(&wait_group, workerHashFile, .{ root_dir, hashed_file }); 656 | try all_files.append(hashed_file); 657 | } 658 | } 659 | 660 | { 661 | // Sort by length, descending, so that child directories get removed first. 662 | sus_dirs.sortUnstable(@as(struct { 663 | keys: []const []const u8, 664 | pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool { 665 | return ctx.keys[b_index].len < ctx.keys[a_index].len; 666 | } 667 | }, .{ .keys = sus_dirs.keys() })); 668 | 669 | // During this loop, more entries will be added, so we must loop by index. 670 | var i: usize = 0; 671 | while (i < sus_dirs.count()) : (i += 1) { 672 | const sus_dir = sus_dirs.keys()[i]; 673 | root_dir.deleteDir(sus_dir) catch |err| switch (err) { 674 | error.DirNotEmpty => continue, 675 | error.FileNotFound => continue, 676 | else => |e| { 677 | log.err( 678 | "unable to delete empty directory '{s}': {s}", 679 | .{ sus_dir, @errorName(e) }, 680 | ); 681 | return error.FetchFailed; 682 | }, 683 | }; 684 | if (fs.path.dirname(sus_dir)) |parent| { 685 | try sus_dirs.put(allocator, parent, {}); 686 | } 687 | } 688 | } 689 | 690 | std.mem.sortUnstable(*HashedFile, all_files.items, {}, HashedFile.lessThan); 691 | 692 | var hasher = Manifest.Hash.init(.{}); 693 | var any_failures = false; 694 | var total_size: u64 = 0; 695 | for (all_files.items) |hashed_file| { 696 | hashed_file.failure catch |err| { 697 | any_failures = true; 698 | log.err("unable to hash '{s}': {s}", .{ 699 | hashed_file.fs_path, @errorName(err), 700 | }); 701 | }; 702 | hasher.update(&hashed_file.hash); 703 | total_size += hashed_file.size; 704 | } 705 | for (deleted_files.items) |deleted_file| { 706 | deleted_file.failure catch |err| { 707 | any_failures = true; 708 | log.err("failed to delete excluded path '{s}' from package: {s}", .{ 709 | deleted_file.fs_path, @errorName(err), 710 | }); 711 | }; 712 | } 713 | 714 | if (any_failures) return error.FetchFailed; 715 | 716 | if (args.@"debug-hash") { 717 | // Print something to stdout that can be text diffed to figure out why 718 | // the package hash is different. 719 | dumpHashInfo(all_files.items) catch |err| { 720 | std.debug.print("unable to write to stdout: {s}\n", .{@errorName(err)}); 721 | std.process.exit(1); 722 | }; 723 | } 724 | 725 | return .{ 726 | .digest = hasher.finalResult(), 727 | .total_size = total_size, 728 | }; 729 | } 730 | 731 | pub fn computedPackageHash(raw: ComputedHash, manifest: ?Manifest) package.Hash { 732 | const saturated_size = std.math.cast(u32, raw.total_size) orelse std.math.maxInt(u32); 733 | if (manifest) |man| { 734 | var version_buffer: [32]u8 = undefined; 735 | const version: []const u8 = std.fmt.bufPrint(&version_buffer, "{}", .{man.version}) catch &version_buffer; 736 | return .init(raw.digest, man.name, version, man.id, saturated_size); 737 | } 738 | // In the future build.zig.zon fields will be added to allow overriding these values 739 | // for naked tarballs. 740 | return .init(raw.digest, "N", "V", 0xffff, saturated_size); 741 | } 742 | 743 | const HashedFile = struct { 744 | fs_path: []const u8, 745 | normalized_path: []const u8, 746 | hash: Manifest.Digest, 747 | failure: Error!void, 748 | kind: Kind, 749 | size: u64, 750 | 751 | const Error = 752 | fs.File.OpenError || 753 | fs.File.ReadError || 754 | fs.File.StatError || 755 | fs.File.ChmodError || 756 | fs.Dir.ReadLinkError; 757 | 758 | const Kind = enum { file, link }; 759 | 760 | fn lessThan(context: void, lhs: *const HashedFile, rhs: *const HashedFile) bool { 761 | _ = context; 762 | return std.mem.lessThan(u8, lhs.normalized_path, rhs.normalized_path); 763 | } 764 | }; 765 | 766 | const DeletedFile = struct { 767 | fs_path: []const u8, 768 | failure: Error!void, 769 | 770 | const Error = 771 | fs.Dir.DeleteFileError || 772 | fs.Dir.DeleteDirError; 773 | }; 774 | 775 | /// Strips root directory name from file system path. 776 | fn stripRoot(fs_path: []const u8, root_dir: []const u8) []const u8 { 777 | if (root_dir.len == 0 or fs_path.len <= root_dir.len) return fs_path; 778 | 779 | if (std.mem.eql(u8, fs_path[0..root_dir.len], root_dir) and fs_path[root_dir.len] == fs.path.sep) { 780 | return fs_path[root_dir.len + 1 ..]; 781 | } 782 | 783 | return fs_path; 784 | } 785 | 786 | fn workerHashFile(dir: fs.Dir, hashed_file: *HashedFile) void { 787 | hashed_file.failure = hashFileFallible(dir, hashed_file); 788 | } 789 | 790 | fn workerDeleteFile(dir: fs.Dir, deleted_file: *DeletedFile) void { 791 | deleted_file.failure = deleteFileFallible(dir, deleted_file); 792 | } 793 | 794 | fn hashFileFallible(dir: fs.Dir, hashed_file: *HashedFile) HashedFile.Error!void { 795 | var buf: [8000]u8 = undefined; 796 | var hasher = Manifest.Hash.init(.{}); 797 | hasher.update(hashed_file.normalized_path); 798 | var file_size: u64 = 0; 799 | 800 | switch (hashed_file.kind) { 801 | .file => { 802 | var file = try dir.openFile(hashed_file.fs_path, .{}); 803 | defer file.close(); 804 | // Hard-coded false executable bit: https://github.com/ziglang/zig/issues/17463 805 | hasher.update(&.{ 0, 0 }); 806 | var file_header: FileHeader = .{}; 807 | while (true) { 808 | const bytes_read = try file.read(&buf); 809 | if (bytes_read == 0) break; 810 | file_size += bytes_read; 811 | hasher.update(buf[0..bytes_read]); 812 | file_header.update(buf[0..bytes_read]); 813 | } 814 | if (file_header.isExecutable()) { 815 | try setExecutable(file); 816 | } 817 | }, 818 | .link => { 819 | const link_name = try dir.readLink(hashed_file.fs_path, &buf); 820 | if (fs.path.sep != canonical_sep) { 821 | // Package hashes are intended to be consistent across 822 | // platforms which means we must normalize path separators 823 | // inside symlinks. 824 | normalizePath(link_name); 825 | } 826 | hasher.update(link_name); 827 | }, 828 | } 829 | hasher.final(&hashed_file.hash); 830 | hashed_file.size = file_size; 831 | } 832 | 833 | fn deleteFileFallible(dir: fs.Dir, deleted_file: *DeletedFile) DeletedFile.Error!void { 834 | try dir.deleteFile(deleted_file.fs_path); 835 | } 836 | 837 | fn setExecutable(file: fs.File) !void { 838 | if (!std.fs.has_executable_bit) return; 839 | 840 | const S = std.posix.S; 841 | const mode = fs.File.default_mode | S.IXUSR | S.IXGRP | S.IXOTH; 842 | try file.chmod(mode); 843 | } 844 | 845 | // Detects executable header: ELF magic header or shebang line. 846 | const FileHeader = struct { 847 | const elf_magic = std.elf.MAGIC; 848 | const shebang = "#!"; 849 | 850 | header: [@max(elf_magic.len, shebang.len)]u8 = undefined, 851 | bytes_read: usize = 0, 852 | 853 | pub fn update(self: *FileHeader, buf: []const u8) void { 854 | if (self.bytes_read >= self.header.len) return; 855 | const n = @min(self.header.len - self.bytes_read, buf.len); 856 | @memcpy(self.header[self.bytes_read..][0..n], buf[0..n]); 857 | self.bytes_read += n; 858 | } 859 | 860 | pub fn isExecutable(self: *FileHeader) bool { 861 | return std.mem.eql(u8, self.header[0..shebang.len], shebang) or 862 | std.mem.eql(u8, self.header[0..elf_magic.len], elf_magic); 863 | } 864 | }; 865 | 866 | fn normalizePathAlloc(arena: Allocator, pkg_path: []const u8) ![]const u8 { 867 | const normalized = try arena.dupe(u8, pkg_path); 868 | if (fs.path.sep == canonical_sep) return normalized; 869 | normalizePath(normalized); 870 | return normalized; 871 | } 872 | 873 | const canonical_sep = fs.path.sep_posix; 874 | const assert = std.debug.assert; 875 | 876 | fn normalizePath(bytes: []u8) void { 877 | assert(fs.path.sep != canonical_sep); 878 | std.mem.replaceScalar(u8, bytes, fs.path.sep, canonical_sep); 879 | } 880 | 881 | fn dumpHashInfo(all_files: []const *const HashedFile) !void { 882 | const stdout = std.io.getStdOut(); 883 | var bw = std.io.bufferedWriter(stdout.writer()); 884 | const w = bw.writer(); 885 | 886 | for (all_files) |hashed_file| { 887 | try w.print("{s}: {s}: {s}\n", .{ 888 | @tagName(hashed_file.kind), 889 | std.fmt.fmtSliceHexLower(&hashed_file.hash), 890 | hashed_file.normalized_path, 891 | }); 892 | } 893 | 894 | try bw.flush(); 895 | } 896 | 897 | /// Caller owns returned memory. 898 | pub fn resolveGlobalCacheDir(allocator: Allocator) ![]u8 { 899 | if (builtin.os.tag == .wasi) 900 | @compileError("on WASI the global cache dir must be resolved with preopens"); 901 | 902 | if (try std.zig.EnvVar.ZIG_GLOBAL_CACHE_DIR.get(allocator)) |value| return value; 903 | 904 | const appname = "zig"; 905 | 906 | if (builtin.os.tag != .windows) { 907 | if (std.zig.EnvVar.XDG_CACHE_HOME.getPosix()) |cache_root| { 908 | return fs.path.join(allocator, &[_][]const u8{ cache_root, appname }); 909 | } else if (std.zig.EnvVar.HOME.getPosix()) |home| { 910 | return fs.path.join(allocator, &[_][]const u8{ home, ".cache", appname }); 911 | } 912 | } 913 | 914 | return fs.getAppDataDir(allocator, appname); 915 | } 916 | 917 | const MimeType = enum { 918 | Tar, 919 | TarGz, 920 | TarXz, 921 | TarZst, 922 | Zip, 923 | }; 924 | 925 | fn guessMimeType(url: []const u8) ?MimeType { 926 | if (std.mem.endsWith(u8, url, ".tar")) return .Tar; 927 | if (std.mem.endsWith(u8, url, ".tgz")) return .TarGz; 928 | if (std.mem.endsWith(u8, url, ".tar.gz")) return .TarGz; 929 | if (std.mem.endsWith(u8, url, ".txz")) return .TarXz; 930 | if (std.mem.endsWith(u8, url, ".tar.xz")) return .TarXz; 931 | if (std.mem.endsWith(u8, url, ".tzst")) return .TarZst; 932 | if (std.mem.endsWith(u8, url, ".tar.zst")) return .TarZst; 933 | if (std.mem.endsWith(u8, url, ".zip")) return .Zip; 934 | return null; 935 | } 936 | 937 | /// Caller own returned memory 938 | fn makeTmpDir(allocator: Allocator) ![]const u8 { 939 | const rand_int = std.crypto.random.int(u64); 940 | const tmp_dirname = try std.fmt.allocPrint(allocator, "{s}{s}zigfetch-{s}", .{ 941 | cache_dirname, 942 | fs.path.sep_str, 943 | Manifest.hex64(rand_int), 944 | }); 945 | 946 | try fs.makeDirAbsolute(tmp_dirname); 947 | return tmp_dirname; 948 | } 949 | // Recursive directory copy. 950 | fn recursiveDirectoryCopy(allocator: Allocator, dir: fs.Dir, tmp_dir: fs.Dir) anyerror!void { 951 | var it = try dir.walk(allocator); 952 | defer it.deinit(); 953 | while (try it.next()) |entry| { 954 | switch (entry.kind) { 955 | .directory => {}, // omit empty directories 956 | .file => { 957 | dir.copyFile( 958 | entry.path, 959 | tmp_dir, 960 | entry.path, 961 | .{}, 962 | ) catch |err| switch (err) { 963 | error.FileNotFound => { 964 | if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.makePath(dirname); 965 | try dir.copyFile(entry.path, tmp_dir, entry.path, .{}); 966 | }, 967 | else => |e| return e, 968 | }; 969 | }, 970 | .sym_link => { 971 | var buf: [fs.MAX_PATH_BYTES]u8 = undefined; 972 | const link_name = try dir.readLink(entry.path, &buf); 973 | // TODO: if this would create a symlink to outside 974 | // the destination directory, fail with an error instead. 975 | tmp_dir.symLink(link_name, entry.path, .{}) catch |err| switch (err) { 976 | error.FileNotFound => { 977 | if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.makePath(dirname); 978 | try tmp_dir.symLink(link_name, entry.path, .{}); 979 | }, 980 | else => |e| return e, 981 | }; 982 | }, 983 | else => return error.IllegalFileTypeInPackage, 984 | } 985 | } 986 | } 987 | 988 | // Returns true if path exists 989 | fn checkFileExists(path: []const u8) !bool { 990 | fs.cwd().access(path, .{}) catch |e| switch (e) { 991 | error.FileNotFound => return false, 992 | else => return e, 993 | }; 994 | 995 | return true; 996 | } 997 | -------------------------------------------------------------------------------- /src/mod/pretty-table.zig: -------------------------------------------------------------------------------- 1 | const std = @import("std"); 2 | 3 | pub const String = []const u8; 4 | pub fn Row(comptime num: usize) type { 5 | return [num]String; 6 | } 7 | 8 | pub const Separator = struct { 9 | pub const Mode = enum { 10 | ascii, 11 | box, 12 | dos, 13 | }; 14 | 15 | const box = [_][4]String{ 16 | .{ "┌", "─", "┬", "┐" }, 17 | .{ "│", "─", "│", "│" }, 18 | .{ "├", "─", "┼", "┤" }, 19 | .{ "└", "─", "┴", "┘" }, 20 | }; 21 | 22 | const ascii = [_][4]String{ 23 | .{ "+", "-", "+", "+" }, 24 | .{ "|", "-", "|", "|" }, 25 | .{ "+", "-", "+", "+" }, 26 | .{ "+", "-", "+", "+" }, 27 | }; 28 | 29 | const dos = [_][4]String{ 30 | .{ "╔", "═", "╦", "╗" }, 31 | .{ "║", "═", "║", "║" }, 32 | .{ "╠", "═", "╬", "╣" }, 33 | .{ "╚", "═", "╩", "╝" }, 34 | }; 35 | 36 | const Position = enum { First, Text, Sep, Last }; 37 | 38 | fn get(mode: Mode, row_pos: Position, col_pos: Position) []const u8 { 39 | const sep_table = switch (mode) { 40 | .ascii => ascii, 41 | .box => box, 42 | .dos => dos, 43 | }; 44 | 45 | return sep_table[@intFromEnum(row_pos)][@intFromEnum(col_pos)]; 46 | } 47 | }; 48 | 49 | pub fn Table(comptime len: usize) type { 50 | return struct { 51 | header: ?Row(len) = null, 52 | footer: ?Row(len) = null, 53 | rows: []const Row(len), 54 | mode: Separator.Mode = .ascii, 55 | padding: usize = 0, 56 | 57 | const Self = @This(); 58 | 59 | fn writeRowDelimiter(self: Self, writer: anytype, row_pos: Separator.Position, col_lens: [len]usize) !void { 60 | inline for (0..len, col_lens) |col_idx, max_len| { 61 | const first_col = col_idx == 0; 62 | if (first_col) { 63 | try writer.writeAll(Separator.get(self.mode, row_pos, .First)); 64 | } else { 65 | try writer.writeAll(Separator.get(self.mode, row_pos, .Sep)); 66 | } 67 | 68 | for (0..max_len) |_| { 69 | try writer.writeAll(Separator.get(self.mode, row_pos, .Text)); 70 | } 71 | } 72 | 73 | try writer.writeAll(Separator.get(self.mode, row_pos, .Last)); 74 | try writer.writeAll("\n"); 75 | } 76 | 77 | fn writeRow( 78 | self: Self, 79 | writer: anytype, 80 | row: []const String, 81 | col_lens: [len]usize, 82 | ) !void { 83 | const m = self.mode; 84 | for (row, col_lens, 0..) |column, col_len, col_idx| { 85 | const first_col = col_idx == 0; 86 | if (first_col) { 87 | try writer.writeAll(Separator.get(m, .Text, .First)); 88 | } else { 89 | try writer.writeAll(Separator.get(m, .Text, .Sep)); 90 | } 91 | 92 | try writer.writeAll(column); 93 | 94 | const left: usize = col_len - column.len; 95 | for (0..left) |_| { 96 | try writer.writeAll(" "); 97 | } 98 | } 99 | try writer.writeAll(Separator.get(m, .Text, .Last)); 100 | try writer.writeAll("\n"); 101 | } 102 | 103 | fn calculateColumnLens(self: Self) [len]usize { 104 | var lens = std.mem.zeroes([len]usize); 105 | if (self.header) |header| { 106 | for (header, &lens) |column, *n| { 107 | n.* = column.len; 108 | } 109 | } 110 | 111 | for (self.rows) |row| { 112 | for (row, &lens) |col, *n| { 113 | n.* = @max(col.len, n.*); 114 | } 115 | } 116 | 117 | if (self.footer) |footer| { 118 | for (footer, &lens) |col, *n| { 119 | n.* = @max(col.len, n.*); 120 | } 121 | } 122 | 123 | for (&lens) |*n| { 124 | n.* += self.padding; 125 | } 126 | return lens; 127 | } 128 | 129 | pub fn format( 130 | self: Self, 131 | comptime fmt: String, 132 | options: std.fmt.FormatOptions, 133 | writer: anytype, 134 | ) !void { 135 | _ = options; 136 | _ = fmt; 137 | _ = options; 138 | _ = fmt; 139 | 140 | const column_lens = self.calculateColumnLens(); 141 | 142 | try self.writeRowDelimiter(writer, .First, column_lens); 143 | if (self.header) |header| { 144 | try self.writeRow( 145 | writer, 146 | &header, 147 | column_lens, 148 | ); 149 | } 150 | 151 | try self.writeRowDelimiter(writer, .Sep, column_lens); 152 | for (self.rows) |row| { 153 | try self.writeRow(writer, &row, column_lens); 154 | } 155 | 156 | if (self.footer) |footer| { 157 | try self.writeRowDelimiter(writer, .Sep, column_lens); 158 | try self.writeRow(writer, &footer, column_lens); 159 | } 160 | 161 | try self.writeRowDelimiter(writer, .Last, column_lens); 162 | } 163 | }; 164 | } 165 | 166 | test "normal usage" { 167 | const t = Table(2){ 168 | .header = [_]String{ "Version", "Date" }, 169 | .rows = &[_][2]String{ 170 | .{ "0.7.1", "2020-12-13" }, 171 | .{ "0.7.0", "2020-11-08" }, 172 | .{ "0.6.0", "2020-04-13" }, 173 | .{ "0.5.0", "2019-09-30" }, 174 | }, 175 | .footer = null, 176 | }; 177 | 178 | var out = std.ArrayList(u8).init(std.testing.allocator); 179 | defer out.deinit(); 180 | try out.writer().print("{}", .{t}); 181 | 182 | try std.testing.expectEqualStrings( 183 | \\+-------+----------+ 184 | \\|Version|Date | 185 | \\+-------+----------+ 186 | \\|0.7.1 |2020-12-13| 187 | \\|0.7.0 |2020-11-08| 188 | \\|0.6.0 |2020-04-13| 189 | \\|0.5.0 |2019-09-30| 190 | \\+-------+----------+ 191 | \\ 192 | , out.items); 193 | } 194 | 195 | test "footer usage" { 196 | const t = Table(2){ 197 | .header = [_]String{ "Language", "Files" }, 198 | .rows = &[_][2]String{ 199 | .{ "Zig", "3" }, 200 | .{ "Python", "2" }, 201 | }, 202 | .footer = [2]String{ "Total", "5" }, 203 | }; 204 | 205 | var out = std.ArrayList(u8).init(std.testing.allocator); 206 | defer out.deinit(); 207 | try out.writer().print("{}", .{t}); 208 | 209 | try std.testing.expectEqualStrings( 210 | \\+--------+-----+ 211 | \\|Language|Files| 212 | \\+--------+-----+ 213 | \\|Zig |3 | 214 | \\|Python |2 | 215 | \\+--------+-----+ 216 | \\|Total |5 | 217 | \\+--------+-----+ 218 | \\ 219 | , out.items); 220 | } 221 | -------------------------------------------------------------------------------- /tests/test.c: -------------------------------------------------------------------------------- 1 | ///* 2 | /**/ 3 | /* 4 | 5 | 6 | */ 7 | int main() { 8 | 9 | } -------------------------------------------------------------------------------- /tests/test.py: -------------------------------------------------------------------------------- 1 | # define main function to print out something 2 | def main(): 3 | i = 1 4 | max = 10 5 | while (i < max): 6 | print(i) 7 | i = i + 1 8 | 9 | # call function main 10 | main() 11 | -------------------------------------------------------------------------------- /tests/test.rb: -------------------------------------------------------------------------------- 1 | # typed: true 2 | # frozen_string_literal: true 3 | 4 | print "How old are you ? " 5 | age = gets.chomp 6 | print "How tall are you ?" 7 | height = gets.chomp 8 | puts " You are #{age} year old and your height is #{height} cms" 9 | -------------------------------------------------------------------------------- /tests/test.zig: -------------------------------------------------------------------------------- 1 | //! `yes` unix command in Zig, optimized for speed 2 | //! Reference to: https://github.com/cgati/yes 3 | 4 | const std = @import("std"); 5 | 6 | const BUFFER_CAP = 64 * 1024; 7 | 8 | fn fillBuffer(buf: *[BUFFER_CAP]u8, word: []const u8) []const u8 { 9 | if (word.len > buf.len / 2) { 10 | return word; 11 | } 12 | 13 | std.mem.copy(u8, buf, word); 14 | var buffer_size = word.len; 15 | while (buffer_size < buf.len / 2) { 16 | std.mem.copy(u8, buf[buffer_size..], buf[0..buffer_size]); 17 | buffer_size *= 2; 18 | } 19 | 20 | return buf[0..buffer_size]; 21 | } 22 | 23 | pub fn main() !void { 24 | var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); 25 | defer arena.deinit(); 26 | const allocator = arena.allocator(); 27 | 28 | var args = try std.process.argsWithAllocator(allocator); 29 | _ = args.next(); 30 | const word = if (args.next()) |arg| x: { 31 | var buf = std.ArrayList(u8).init(allocator); 32 | try buf.appendSlice(arg); 33 | try buf.append('\n'); 34 | break :x buf.items; 35 | } else "y\n"; 36 | 37 | var buffer: [BUFFER_CAP]u8 = undefined; 38 | const body = fillBuffer(&buffer, word); 39 | const stdout = std.io.getStdOut(); 40 | var writer = stdout.writer(); 41 | while (true) { 42 | try writer.writeAll(body); 43 | } 44 | } 45 | --------------------------------------------------------------------------------