├── .cargo └── config.toml ├── .github ├── FUNDING.yml ├── renovate.json └── workflows │ ├── CI.yml │ └── lint.yml ├── .gitignore ├── .gitmodules ├── .npmignore ├── .prettierignore ├── .yarn └── releases │ └── yarn-4.3.1.cjs ├── .yarnrc.yml ├── .zed └── tasks.json ├── Cargo.toml ├── LICENSE ├── README.md ├── __test__ ├── index.spec.ts ├── package.json ├── tsconfig.json └── uuid.spec.ts ├── assets └── logo.png ├── benchmark ├── bench.ts ├── package.json └── tsconfig.json ├── browser.js ├── build.rs ├── examples ├── auth.mts ├── basic.mts ├── batch-statements.mts ├── custom-types │ ├── bigint.mts │ ├── double.mts │ ├── floats.mts │ ├── list.mts │ ├── map.mts │ ├── set.mts │ ├── tuple.mts │ ├── udt.mts │ ├── uuid.mts │ └── varint.mts ├── fetch-schema.mts ├── lwt.mts ├── prepared.mts ├── tls.mts ├── tracing.mts └── tsconfig.json ├── index.d.ts ├── index.js ├── npm ├── android-arm-eabi │ ├── README.md │ └── package.json ├── android-arm64 │ ├── README.md │ └── package.json ├── darwin-arm64 │ ├── README.md │ └── package.json ├── darwin-universal │ ├── README.md │ └── package.json ├── darwin-x64 │ ├── README.md │ └── package.json ├── freebsd-x64 │ ├── README.md │ └── package.json ├── linux-arm-gnueabihf │ ├── README.md │ └── package.json ├── linux-arm64-gnu │ ├── README.md │ └── package.json ├── linux-arm64-musl │ ├── README.md │ └── package.json ├── linux-x64-gnu │ ├── README.md │ └── package.json ├── linux-x64-musl │ ├── README.md │ └── package.json ├── wasm32-wasi │ ├── README.md │ └── package.json ├── win32-arm64-msvc │ ├── README.md │ └── package.json ├── win32-ia32-msvc │ ├── README.md │ └── package.json └── win32-x64-msvc │ ├── README.md │ └── package.json ├── package.json ├── pnpm-lock.yaml ├── rust-analyzer.json ├── rust-toolchain.toml ├── rustfmt.toml ├── scripts └── fix-files.mjs ├── simple-test.js ├── src ├── cluster │ ├── cluster_config │ │ ├── compression.rs │ │ └── mod.rs │ ├── execution_profile │ │ ├── consistency.rs │ │ ├── mod.rs │ │ └── serial_consistency.rs │ ├── mod.rs │ └── scylla_cluster.rs ├── error.rs ├── helpers │ ├── cql_value_bridge.rs │ ├── mod.rs │ ├── query_parameter.rs │ ├── query_results.rs │ └── to_cql_value.rs ├── lib.rs ├── query │ ├── batch_statement.rs │ ├── mod.rs │ ├── scylla_prepared_statement.rs │ └── scylla_query.rs ├── session │ ├── metrics.rs │ ├── mod.rs │ ├── scylla_session.rs │ └── topology.rs └── types │ ├── decimal.rs │ ├── double.rs │ ├── duration.rs │ ├── float.rs │ ├── list.rs │ ├── map.rs │ ├── mod.rs │ ├── set.rs │ ├── tracing.rs │ ├── uuid.rs │ └── varint.rs ├── tsconfig.json ├── wasi-worker-browser.mjs ├── wasi-worker.mjs └── yarn.lock /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [target.x86_64-pc-windows-msvc] 2 | rustflags = ["-C", "target-feature=+crt-static"] 3 | 4 | [target.i686-pc-windows-msvc] 5 | rustflags = ["-C", "target-feature=+crt-static"] -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [daniel-boll, danielhe4rt, matozinho] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 12 | polar: # Replace with a single Polar username 13 | buy_me_a_coffee: # Replace with a single Buy Me a Coffee username 14 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 15 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": ["config:base", "group:allNonMajor", ":preserveSemverRanges", ":disablePeerDependencies"], 4 | "labels": ["dependencies"], 5 | "packageRules": [ 6 | { 7 | "matchPackageNames": ["@napi/cli", "napi", "napi-build", "napi-derive"], 8 | "addLabels": ["napi-rs"], 9 | "groupName": "napi-rs" 10 | }, 11 | { 12 | "matchPackagePatterns": ["^eslint", "^@typescript-eslint"], 13 | "groupName": "linter" 14 | } 15 | ], 16 | "commitMessagePrefix": "chore: ", 17 | "commitMessageAction": "bump up", 18 | "commitMessageTopic": "{{depName}} version", 19 | "ignoreDeps": [] 20 | } 21 | -------------------------------------------------------------------------------- /.github/workflows/CI.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | env: 3 | DEBUG: napi:* 4 | APP_NAME: scylladb 5 | MACOSX_DEPLOYMENT_TARGET: "10.13" 6 | CARGO_INCREMENTAL: "1" 7 | permissions: 8 | contents: write 9 | id-token: write 10 | "on": 11 | push: 12 | branches: 13 | - main 14 | tags-ignore: 15 | - "**" 16 | paths-ignore: 17 | - "**/*.md" 18 | - LICENSE 19 | - "**/*.gitignore" 20 | - .editorconfig 21 | - docs/** 22 | pull_request: null 23 | concurrency: 24 | group: ${{ github.workflow }}-${{ github.ref }} 25 | cancel-in-progress: true 26 | jobs: 27 | check-version-in-commit: 28 | runs-on: ubuntu-latest 29 | outputs: 30 | should_publish: ${{ steps.check-commit.outputs.should_publish }} 31 | steps: 32 | - name: Checkout code 33 | uses: actions/checkout@v4 34 | - name: Check commit message for version 35 | id: check-commit 36 | run: | 37 | commit_msg=$(git log --format=%B -n 1) 38 | if [[ $commit_msg =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then 39 | echo "should_publish=true" >> $GITHUB_OUTPUT 40 | else 41 | echo "should_publish=false" >> $GITHUB_OUTPUT 42 | fi 43 | build: 44 | if: needs.check-version-in-commit.outputs.should_publish == 'true' 45 | needs: 46 | - check-version-in-commit 47 | strategy: 48 | fail-fast: false 49 | matrix: 50 | settings: 51 | - host: macos-latest 52 | target: x86_64-apple-darwin 53 | build: pnpm build --target x86_64-apple-darwin 54 | # - host: windows-latest 55 | # build: pnpm build --target x86_64-pc-windows-msvc 56 | # target: x86_64-pc-windows-msvc 57 | # - host: windows-latest 58 | # build: pnpm build --target i686-pc-windows-msvc 59 | # target: i686-pc-windows-msvc 60 | - host: ubuntu-latest 61 | target: x86_64-unknown-linux-gnu 62 | build: pnpm build --target x86_64-unknown-linux-gnu --use-napi-cross 63 | - host: ubuntu-latest 64 | target: x86_64-unknown-linux-musl 65 | build: pnpm build --target x86_64-unknown-linux-musl -x 66 | - host: macos-latest 67 | target: aarch64-apple-darwin 68 | build: pnpm build --target aarch64-apple-darwin 69 | - host: ubuntu-latest 70 | target: aarch64-unknown-linux-gnu 71 | build: pnpm build --target aarch64-unknown-linux-gnu --use-napi-cross 72 | - host: ubuntu-latest 73 | target: armv7-unknown-linux-gnueabihf 74 | build: pnpm build --target armv7-unknown-linux-gnueabihf --use-napi-cross 75 | - host: ubuntu-latest 76 | target: aarch64-linux-android 77 | build: pnpm build --target aarch64-linux-android 78 | - host: ubuntu-latest 79 | target: armv7-linux-androideabi 80 | build: pnpm build --target armv7-linux-androideabi 81 | - host: ubuntu-latest 82 | target: aarch64-unknown-linux-musl 83 | build: pnpm build --target aarch64-unknown-linux-musl -x 84 | # - host: windows-latest 85 | # target: aarch64-pc-windows-msvc 86 | # build: pnpm build --target aarch64-pc-windows-msvc 87 | # - host: ubuntu-latest 88 | # target: wasm32-wasip1-threads 89 | # build: pnpm build --target wasm32-wasip1-threads 90 | name: stable - ${{ matrix.settings.target }} - node@20 91 | runs-on: ${{ matrix.settings.host }} 92 | steps: 93 | - uses: actions/checkout@v4 94 | - name: setup pnpm 95 | uses: pnpm/action-setup@v4 96 | - name: Setup node 97 | uses: actions/setup-node@v4 98 | with: 99 | node-version: 20 100 | cache: pnpm 101 | - name: Install 102 | uses: dtolnay/rust-toolchain@stable 103 | with: 104 | toolchain: stable 105 | targets: ${{ matrix.settings.target }} 106 | - name: Cache cargo 107 | uses: actions/cache@v4 108 | with: 109 | path: | 110 | ~/.cargo/registry/index/ 111 | ~/.cargo/registry/cache/ 112 | ~/.cargo/git/db/ 113 | ~/.napi-rs 114 | .cargo-cache 115 | target/ 116 | key: ${{ matrix.settings.target }}-cargo-${{ matrix.settings.host }} 117 | - uses: goto-bus-stop/setup-zig@v2 118 | if: ${{ contains(matrix.settings.target, 'musl') }} 119 | with: 120 | version: 0.13.0 121 | - name: Install cargo-zigbuild 122 | uses: taiki-e/install-action@v2 123 | if: ${{ contains(matrix.settings.target, 'musl') }} 124 | env: 125 | GITHUB_TOKEN: ${{ github.token }} 126 | with: 127 | tool: cargo-zigbuild 128 | - name: Setup toolchain 129 | run: ${{ matrix.settings.setup }} 130 | if: ${{ matrix.settings.setup }} 131 | shell: bash 132 | - name: Install dependencies 133 | run: pnpm install 134 | - name: Setup node x86 135 | uses: actions/setup-node@v4 136 | if: matrix.settings.target == 'i686-pc-windows-msvc' 137 | with: 138 | node-version: 20 139 | cache: pnpm 140 | architecture: x86 141 | - name: Build 142 | run: ${{ matrix.settings.build }} 143 | shell: bash 144 | - name: Upload artifact 145 | uses: actions/upload-artifact@v4 146 | if: matrix.settings.target != 'wasm32-wasip1-threads' 147 | with: 148 | name: bindings-${{ matrix.settings.target }} 149 | path: "*.node" 150 | if-no-files-found: error 151 | 152 | - name: Upload artifact 153 | uses: actions/upload-artifact@v4 154 | if: matrix.settings.target == 'wasm32-wasip1-threads' 155 | with: 156 | name: bindings-${{ matrix.settings.target }} 157 | path: "*.wasm" 158 | if-no-files-found: error 159 | 160 | # build-freebsd: 161 | # runs-on: ubuntu-latest 162 | # name: Build FreeBSD 163 | # if: needs.check-version-in-commit.outputs.should_publish == 'true' 164 | # needs: 165 | # - check-version-in-commit 166 | # steps: 167 | # - uses: actions/checkout@v4 168 | # - name: Build 169 | # id: build 170 | # uses: cross-platform-actions/action@v0.25.0 171 | # env: 172 | # DEBUG: napi:* 173 | # RUSTUP_IO_THREADS: 1 174 | # with: 175 | # operating_system: freebsd 176 | # version: "14.1" 177 | # memory: 8G 178 | # cpu_count: 3 179 | # environment_variables: "DEBUG RUSTUP_IO_THREADS" 180 | # shell: bash 181 | # run: | 182 | # sudo pkg install -y -f curl node libnghttp2 npm 183 | # sudo npm install -g corepack 184 | # curl https://sh.rustup.rs -sSf --output rustup.sh 185 | # sh rustup.sh -y --profile minimal --default-toolchain beta 186 | # corepack prepare 187 | # corepack enable 188 | # source "$HOME/.cargo/env" 189 | # echo "~~~~ rustc --version ~~~~" 190 | # rustc --version 191 | # echo "~~~~ node -v ~~~~" 192 | # node -v 193 | # echo "~~~~ pnpm --version ~~~~" 194 | # pnpm --version 195 | # pwd 196 | # ls -lah 197 | # whoami 198 | # env 199 | # freebsd-version 200 | # pnpm install 201 | # pnpm build 202 | # rm -rf node_modules 203 | # rm -rf target 204 | # - name: Upload artifact 205 | # uses: actions/upload-artifact@v4 206 | # with: 207 | # name: bindings-freebsd 208 | # path: ${{ env.APP_NAME }}.*.node 209 | # if-no-files-found: error 210 | 211 | # test-macOS-windows-binding: 212 | # name: Test bindings on ${{ matrix.settings.target }} - node@${{ matrix.node }} 213 | # needs: 214 | # - build 215 | # strategy: 216 | # fail-fast: false 217 | # matrix: 218 | # settings: 219 | # - host: windows-latest 220 | # target: x86_64-pc-windows-msvc 221 | # architecture: x64 222 | # - host: macos-latest 223 | # target: x86_64-apple-darwin 224 | # architecture: x64 225 | # - host: macos-latest 226 | # target: aarch64-apple-darwin 227 | # architecture: arm64 228 | # node: 229 | # - "18" 230 | # - "20" 231 | # runs-on: ${{ matrix.settings.host }} 232 | # steps: 233 | # - uses: actions/checkout@v4 234 | # - name: setup pnpm 235 | # uses: pnpm/action-setup@v4 236 | # - name: Setup node 237 | # uses: actions/setup-node@v4 238 | # with: 239 | # node-version: ${{ matrix.node }} 240 | # cache: pnpm 241 | # architecture: ${{ matrix.settings.architecture }} 242 | # - name: Install dependencies 243 | # run: pnpm install 244 | # - name: Download artifacts 245 | # uses: actions/download-artifact@v4 246 | # with: 247 | # name: bindings-${{ matrix.settings.target }} 248 | # path: . 249 | # - name: List packages 250 | # run: ls -R . 251 | # shell: bash 252 | # - name: Test bindings 253 | # run: pnpm test 254 | # test-linux-binding: 255 | # name: Test ${{ matrix.target }} - node@${{ matrix.node }} 256 | # needs: 257 | # - build 258 | # strategy: 259 | # fail-fast: false 260 | # matrix: 261 | # target: 262 | # - x86_64-unknown-linux-gnu 263 | # - x86_64-unknown-linux-musl 264 | # - aarch64-unknown-linux-gnu 265 | # - aarch64-unknown-linux-musl 266 | # - armv7-unknown-linux-gnueabihf 267 | # node: 268 | # - "18" 269 | # - "20" 270 | # runs-on: ubuntu-latest 271 | # steps: 272 | # - uses: actions/checkout@v4 273 | # - name: setup pnpm 274 | # uses: pnpm/action-setup@v4 275 | # - name: Setup node 276 | # uses: actions/setup-node@v4 277 | # with: 278 | # node-version: ${{ matrix.node }} 279 | # cache: pnpm 280 | # - name: Output docker params 281 | # id: docker 282 | # run: | 283 | # node -e " 284 | # if ('${{ matrix.target }}'.startsWith('aarch64')) { 285 | # console.log('PLATFORM=linux/arm64') 286 | # } else if ('${{ matrix.target }}'.startsWith('armv7')) { 287 | # console.log('PLATFORM=linux/arm/v7') 288 | # } else { 289 | # console.log('PLATFORM=linux/amd64') 290 | # } 291 | # " >> $GITHUB_OUTPUT 292 | # node -e " 293 | # if ('${{ matrix.target }}'.endsWith('-musl')) { 294 | # console.log('IMAGE=node:${{ matrix.node }}-alpine') 295 | # } else { 296 | # console.log('IMAGE=node:${{ matrix.node }}-slim') 297 | # } 298 | # " >> $GITHUB_OUTPUT 299 | # echo "PNPM_STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_OUTPUT 300 | # # use --force to download the all platform/arch dependencies 301 | # - name: Install dependencies 302 | # run: pnpm install --force 303 | # - name: Download artifacts 304 | # uses: actions/download-artifact@v4 305 | # with: 306 | # name: bindings-${{ matrix.target }} 307 | # path: . 308 | # - name: List packages 309 | # run: ls -R . 310 | # shell: bash 311 | # - name: Set up QEMU 312 | # uses: docker/setup-qemu-action@v3 313 | # with: 314 | # platforms: all 315 | # - run: docker run --rm --privileged multiarch/qemu-user-static --reset -p yes 316 | # - name: Test bindings 317 | # uses: addnab/docker-run-action@v3 318 | # with: 319 | # image: ${{ steps.docker.outputs.IMAGE }} 320 | # options: -v ${{ steps.docker.outputs.PNPM_STORE_PATH }}:${{ steps.docker.outputs.PNPM_STORE_PATH }} -v ${{ github.workspace }}:${{ github.workspace }} -w ${{ github.workspace }} --platform ${{ steps.docker.outputs.PLATFORM }} 321 | # run: npm run test 322 | # test-wasi: 323 | # name: Test WASI target 324 | # needs: 325 | # - build 326 | # runs-on: ubuntu-latest 327 | # steps: 328 | # - uses: actions/checkout@v4 329 | # - name: setup pnpm 330 | # uses: pnpm/action-setup@v4 331 | # - name: Setup node 332 | # uses: actions/setup-node@v4 333 | # with: 334 | # node-version: 20 335 | # cache: pnpm 336 | # - name: Install dependencies 337 | # run: pnpm install 338 | # - name: Download artifacts 339 | # uses: actions/download-artifact@v4 340 | # with: 341 | # name: bindings-wasm32-wasip1-threads 342 | # path: . 343 | # - name: List packages 344 | # run: ls -R . 345 | # shell: bash 346 | # - name: Test bindings 347 | # run: pnpm test 348 | # env: 349 | # NAPI_RS_FORCE_WASI: 1 350 | publish: 351 | name: Publish 352 | runs-on: ubuntu-latest 353 | needs: 354 | - build 355 | # - build-freebsd 356 | # - test-macOS-windows-binding 357 | # - test-linux-binding 358 | # - test-wasi 359 | steps: 360 | - uses: actions/checkout@v4 361 | - name: setup pnpm 362 | uses: pnpm/action-setup@v4 363 | - name: Setup node 364 | uses: actions/setup-node@v4 365 | with: 366 | node-version: 20 367 | cache: pnpm 368 | - name: Install dependencies 369 | run: pnpm install 370 | - name: Download all artifacts 371 | uses: actions/download-artifact@v4 372 | with: 373 | path: artifacts 374 | - name: create npm dirs 375 | run: pnpm napi create-npm-dirs 376 | - name: Move artifacts 377 | run: pnpm artifacts 378 | - name: List packages 379 | run: ls -R ./npm 380 | shell: bash 381 | - name: Publish 382 | run: | 383 | npm config set provenance true 384 | if git log -1 --pretty=%B | grep "^v\?[0-9]\+\.[0-9]\+\.[0-9]\+$"; 385 | then 386 | echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ~/.npmrc 387 | npm publish --access public 388 | elif git log -1 --pretty=%B | grep "^v\?[0-9]\+\.[0-9]\+\.[0-9]\+"; 389 | then 390 | echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ~/.npmrc 391 | npm publish --tag next --access public 392 | else 393 | echo "Not a release, skipping publish" 394 | fi 395 | env: 396 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 397 | NPM_TOKEN: ${{ secrets.NPM_TOKEN }} 398 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | tags-ignore: 8 | - "**" 9 | pull_request: 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.ref }} 12 | cancel-in-progress: true 13 | jobs: 14 | lint: 15 | name: Lint 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v4 19 | - name: setup pnpm 20 | uses: pnpm/action-setup@v4 21 | - name: Setup node 22 | uses: actions/setup-node@v4 23 | with: 24 | node-version: 20 25 | cache: "pnpm" 26 | 27 | - name: Install 28 | uses: dtolnay/rust-toolchain@stable 29 | with: 30 | components: clippy, rustfmt 31 | 32 | - name: Install dependencies 33 | run: pnpm install 34 | 35 | - name: ESLint 36 | run: pnpm lint 37 | 38 | - name: Cargo fmt 39 | run: cargo fmt -- --check 40 | 41 | - name: Clippy 42 | run: cargo clippy 43 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ### Created by https://www.gitignore.io 2 | ### Rust ### 3 | # Generated by Cargo 4 | # will have compiled files and executables 5 | debug/ 6 | target/ 7 | 8 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 9 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 10 | Cargo.lock 11 | 12 | # These are backup files generated by rustfmt 13 | **/*.rs.bk 14 | 15 | # MSVC Windows builds of rustc generate these, which store debugging information 16 | *.pdb 17 | 18 | ### Created by https://www.gitignore.io 19 | ### Node ### 20 | # Logs 21 | logs 22 | *.log 23 | npm-debug.log* 24 | yarn-debug.log* 25 | yarn-error.log* 26 | lerna-debug.log* 27 | .pnpm-debug.log* 28 | 29 | # Diagnostic reports (https://nodejs.org/api/report.html) 30 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 31 | 32 | # Runtime data 33 | pids 34 | *.pid 35 | *.seed 36 | *.pid.lock 37 | 38 | # Directory for instrumented libs generated by jscoverage/JSCover 39 | lib-cov 40 | 41 | # Coverage directory used by tools like istanbul 42 | coverage 43 | *.lcov 44 | 45 | # nyc test coverage 46 | .nyc_output 47 | 48 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 49 | .grunt 50 | 51 | # Bower dependency directory (https://bower.io/) 52 | bower_components 53 | 54 | # node-waf configuration 55 | .lock-wscript 56 | 57 | # Compiled binary addons (https://nodejs.org/api/addons.html) 58 | build/Release 59 | 60 | # Dependency directories 61 | node_modules/ 62 | jspm_packages/ 63 | 64 | # Snowpack dependency directory (https://snowpack.dev/) 65 | web_modules/ 66 | 67 | # TypeScript cache 68 | *.tsbuildinfo 69 | 70 | # Optional npm cache directory 71 | .npm 72 | 73 | # Optional eslint cache 74 | .eslintcache 75 | 76 | # Optional stylelint cache 77 | .stylelintcache 78 | 79 | # Microbundle cache 80 | .rpt2_cache/ 81 | .rts2_cache_cjs/ 82 | .rts2_cache_es/ 83 | .rts2_cache_umd/ 84 | 85 | # Optional REPL history 86 | .node_repl_history 87 | 88 | # Output of 'npm pack' 89 | *.tgz 90 | 91 | # Yarn Integrity file 92 | .yarn-integrity 93 | 94 | # dotenv environment variable files 95 | .env 96 | .env.development.local 97 | .env.test.local 98 | .env.production.local 99 | .env.local 100 | 101 | # parcel-bundler cache (https://parceljs.org/) 102 | .cache 103 | .parcel-cache 104 | 105 | # Next.js build output 106 | .next 107 | out 108 | 109 | # Nuxt.js build / generate output 110 | .nuxt 111 | dist 112 | 113 | # Gatsby files 114 | .cache/ 115 | # Comment in the public line in if your project uses Gatsby and not Next.js 116 | # https://nextjs.org/blog/next-9-1#public-directory-support 117 | # public 118 | 119 | # vuepress build output 120 | .vuepress/dist 121 | 122 | # vuepress v2.x temp and cache directory 123 | .temp 124 | .cache 125 | 126 | # Docusaurus cache and generated files 127 | .docusaurus 128 | 129 | # Serverless directories 130 | .serverless/ 131 | 132 | # FuseBox cache 133 | .fusebox/ 134 | 135 | # DynamoDB Local files 136 | .dynamodb/ 137 | 138 | # TernJS port file 139 | .tern-port 140 | 141 | # Stores VSCode versions used for testing VSCode extensions 142 | .vscode-test 143 | 144 | # yarn v2 145 | .yarn/cache 146 | .yarn/unplugged 147 | .yarn/build-state.yml 148 | .yarn/install-state.gz 149 | .pnp.* 150 | 151 | *.node 152 | *.wasm 153 | scylladb.wasi* 154 | 155 | ### Node Patch ### 156 | # Serverless Webpack directories 157 | .webpack/ 158 | 159 | # Optional stylelint cache 160 | .stylelintcache 161 | 162 | # SvelteKit build / generate output 163 | .svelte-kit 164 | 165 | ### Created by https://www.gitignore.io 166 | ### macOS ### 167 | # General 168 | .DS_Store 169 | .AppleDouble 170 | .LSOverride 171 | 172 | # Icon must end with two \r 173 | Icon 174 | 175 | # Thumbnails 176 | ._* 177 | 178 | # Files that might appear in the root of a volume 179 | .DocumentRevisions-V100 180 | .fseventsd 181 | .Spotlight-V100 182 | .TemporaryItems 183 | .Trashes 184 | .VolumeIcon.icns 185 | .com.apple.timemachine.donotpresent 186 | 187 | # Directories potentially created on remote AFP share 188 | .AppleDB 189 | .AppleDesktop 190 | Network Trash Folder 191 | Temporary Items 192 | .apdisk 193 | 194 | ### macOS Patch ### 195 | # iCloud generated files 196 | *.icloud 197 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "examples/carepet-nestjs"] 2 | path = examples/carepet-nestjs 3 | url = https://github.com/daniel-Boll/carepet-nestjs.git 4 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | .cargo 4 | .github 5 | npm 6 | .eslintrc 7 | .prettierignore 8 | rustfmt.toml 9 | yarn.lock 10 | *.node 11 | .yarn 12 | __test__ 13 | renovate.json 14 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | target 2 | .yarn 3 | /*.js 4 | /*.mjs 5 | /*.cjs 6 | /index.js 7 | index.d.ts 8 | pnpm-lock.yaml -------------------------------------------------------------------------------- /.yarnrc.yml: -------------------------------------------------------------------------------- 1 | compressionLevel: mixed 2 | 3 | enableGlobalCache: false 4 | 5 | nodeLinker: node-modules 6 | 7 | yarnPath: .yarn/releases/yarn-4.3.1.cjs 8 | -------------------------------------------------------------------------------- /.zed/tasks.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "label": "Start docker container", 4 | "command": "docker run --name scylladb -d --rm -it -p 9042:9042 scylladb/scylla --smp 2", 5 | "reveal": "never", 6 | "hide": "on_success" 7 | }, 8 | { 9 | "label": "Build driver", 10 | "command": "yarn build", 11 | "hide": "on_success" 12 | }, 13 | { 14 | "label": "Run example udt", 15 | "command": "yarn tsx ./examples/udt.mts", 16 | "reveal": "always", 17 | "hide": "never", 18 | "shell": "system" 19 | } 20 | ] 21 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["Daniel Boll "] 3 | edition = "2021" 4 | name = "scylladb-driver" 5 | version = "0.1.0" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [lib] 10 | crate-type = ["cdylib"] 11 | 12 | [dependencies] 13 | napi = { version = "2", default-features = false, features = [ 14 | "napi8", 15 | "async", 16 | "serde", 17 | "serde_json", 18 | "serde-json", 19 | ] } 20 | napi-derive = "2" 21 | tokio = { version = "1", features = ["full"] } 22 | scylla = { version = "0.13.1", features = [ 23 | "ssl", 24 | "full-serialization", 25 | "cloud", 26 | ] } 27 | uuid = { version = "1.10", features = ["serde", "v4", "fast-rng"] } 28 | serde_json = "1.0" 29 | serde = { version = "1.0", features = ["derive"] } 30 | openssl = { version = "0.10", features = ["vendored"] } 31 | 32 | [build-dependencies] 33 | napi-build = "2" 34 | 35 | [profile.release] 36 | lto = true 37 | codegen-units = 1 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Daniel Boll 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | [![ScyllaDB Unnoficial Discord Server](https://img.shields.io/badge/ScyllaDB_Developers-Discord_Server-4C388C)](https://discord.gg/CzCT4cyRrr) 4 | 5 |
6 | 7 |
8 | 9 | Scylla Nodejs Driver 10 | 11 | 12 |

🚀 ScyllaDB NodeJS Driver 🧪🔧

13 |
14 | 15 | ## ⚠️ Disclaimer ⚠️ 16 | 17 | This repository and the associated npm package are currently in a 🐣 pre-release state and are being used for testing 🧪 purposes. They are subject to change without notice 📝. Users are encouraged to use this driver with caution ❗ and not in production environments until the official release. 18 | 19 | ## 🚀 Getting Started 🚀 20 | 21 | These instructions will get you a copy of the project up and running 🏃 on your local machine for development and testing purposes. 22 | 23 | ### 📋 Prerequisites 📋 24 | 25 | - Docker: We use Docker 🐳 to run the Scylla database easily without the need for a complex local setup. 26 | - Node.js: Make sure you have Node.js installed on your system to run JavaScript code. 27 | 28 | ### 🌟 Quickstart 🌟 29 | 30 | 1. **Start ScyllaDB in Docker:** 31 | 32 | Run a ScyllaDB instance using the following Docker command: 33 | 34 | ```bash 35 | docker run --name scylladb -d --rm -it -p 9042:9042 scylladb/scylla --smp 2 36 | ``` 37 | 38 | This command pulls the Scylla image if it's not already present on your system, and starts a new 🌟 container with the Scylla database. 39 | 40 | 2. **Use the JavaScript Driver:** 41 | 42 | Here's a simple script to connect to the database and execute a query: 43 | 44 | ```javascript 45 | import { Cluster } from "@lambda-group/scylladb"; 46 | 47 | const cluster = new Cluster({ 48 | nodes: ["127.0.0.1:9042"], 49 | }); 50 | 51 | const session = await cluster.connect("system_schema"); 52 | 53 | const result = await session.execute("SELECT * FROM scylla_tables limit ?", [1]).catch(console.error); 54 | 55 | console.log(result); 56 | ``` 57 | 58 | This script connects to the ScyllaDB instance running on your machine, performs a query, and logs the result. 59 | 60 | ### 📥 Installing 📥 61 | 62 | To install this package, use the following command: 63 | 64 | ```bash 65 | npm install @lambda-group/scylladb@latest 66 | ``` 67 | 68 | ## 📚 Examples 📚 69 | 70 | Reference wise you can guide yourself through the [examples/](https://github.com/Daniel-Boll/scylla-javascript-driver/tree/main/examples) folder in the repo. 71 | 72 | ## 🙏 Acknowledgments 🙏 73 | 74 | - Thanks to the developers of ScyllaDB for creating such a high-performance database. 75 | - Thanks to the Rust community for providing the robust `scylla` crate. 76 | - Thanks to the `napi-rs` project for enabling efficient Rust and Node.js integrations. 77 | -------------------------------------------------------------------------------- /__test__/index.spec.ts: -------------------------------------------------------------------------------- 1 | import test from "ava"; 2 | 3 | import { plus100 } from "../index.js"; 4 | 5 | test("sync function from native code", (t) => { 6 | const fixture = 42; 7 | t.is(plus100(fixture), fixture + 100); 8 | }); 9 | -------------------------------------------------------------------------------- /__test__/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "module" 3 | } 4 | -------------------------------------------------------------------------------- /__test__/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../tsconfig.json", 3 | "compilerOptions": { 4 | "module": "NodeNext", 5 | "moduleResolution": "NodeNext", 6 | "outDir": "lib" 7 | }, 8 | "include": ["."], 9 | "exclude": ["lib"] 10 | } 11 | -------------------------------------------------------------------------------- /__test__/uuid.spec.ts: -------------------------------------------------------------------------------- 1 | import test from "ava"; 2 | 3 | import { Uuid } from "../index.js"; 4 | 5 | test("Create a random UUID", (t) => { 6 | const uuid = Uuid.randomV4(); 7 | t.is(uuid.toString().length, 36); 8 | }); 9 | 10 | test("Create a UUID from a string", (t) => { 11 | const uuid = Uuid.fromString("123e4567-e89b-12d3-a456-426614174000"); 12 | t.is(uuid.toString(), "123e4567-e89b-12d3-a456-426614174000"); 13 | }); 14 | 15 | test("Should error on creating UUID from malformed string", (t) => { 16 | const error = t.throws( 17 | () => { 18 | throw Uuid.fromString("123e4567-e89b-12d3-a456-42661417400"); 19 | }, 20 | { instanceOf: Error }, 21 | ); 22 | 23 | if (!(error instanceof Error)) { 24 | t.fail("error is not an instance of Error"); 25 | return; 26 | } 27 | 28 | t.is( 29 | error.message, 30 | "Failed to parse UUID: invalid group length in group 4: expected 12, found 11", 31 | ); 32 | }); 33 | 34 | test("Should not error on creating UUID from well-formed string", (t) => { 35 | t.notThrows(() => { 36 | return Uuid.fromString("123e4567-e89b-12d3-a456-426614174000"); 37 | }); 38 | }); 39 | -------------------------------------------------------------------------------- /assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Daniel-Boll/scylla-javascript-driver/dc3a11040a61a0149a200db5d37d3bdde6dd2fd0/assets/logo.png -------------------------------------------------------------------------------- /benchmark/bench.ts: -------------------------------------------------------------------------------- 1 | import { Bench } from "tinybench"; 2 | 3 | import { plus100 } from "../index.js"; 4 | 5 | function add(a: number) { 6 | return a + 100; 7 | } 8 | 9 | const bench = new Bench(); 10 | 11 | bench.add("Native a + 100", () => { 12 | plus100(10); 13 | }); 14 | 15 | bench.add("JavaScript a + 100", () => { 16 | add(10); 17 | }); 18 | 19 | await bench.run(); 20 | 21 | console.table(bench.table()); 22 | -------------------------------------------------------------------------------- /benchmark/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "type": "module" 3 | } 4 | -------------------------------------------------------------------------------- /benchmark/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../tsconfig.json", 3 | "compilerOptions": { 4 | "module": "NodeNext", 5 | "moduleResolution": "NodeNext", 6 | "outDir": "lib" 7 | }, 8 | "include": ["."], 9 | "exclude": ["lib"] 10 | } 11 | -------------------------------------------------------------------------------- /browser.js: -------------------------------------------------------------------------------- 1 | export * from '@lambda-group/scylladb-wasm32-wasi' 2 | -------------------------------------------------------------------------------- /build.rs: -------------------------------------------------------------------------------- 1 | extern crate napi_build; 2 | 3 | fn main() { 4 | napi_build::setup(); 5 | } 6 | -------------------------------------------------------------------------------- /examples/auth.mts: -------------------------------------------------------------------------------- 1 | import { Cluster } from "../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | const cluster = new Cluster({ 6 | nodes, 7 | auth: { 8 | username: "cassandra", 9 | password: "cassandra", 10 | }, 11 | }); 12 | 13 | const session = await cluster.connect(); 14 | 15 | await session.execute( 16 | "CREATE KEYSPACE IF NOT EXISTS auth WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", 17 | ); 18 | await session.execute("DROP TABLE IF EXISTS auth.auth"); 19 | 20 | console.log("Ok."); 21 | -------------------------------------------------------------------------------- /examples/basic.mts: -------------------------------------------------------------------------------- 1 | import { Cluster } from "../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | console.log(`Connecting to ${nodes}`); 6 | 7 | const cluster = new Cluster({ nodes }); 8 | const session = await cluster.connect(); 9 | 10 | await session.execute( 11 | "CREATE KEYSPACE IF NOT EXISTS basic WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }", 12 | ); 13 | await session.useKeyspace("basic"); 14 | 15 | await session.execute("CREATE TABLE IF NOT EXISTS basic (a int, b int, c text, primary key (a, b))"); 16 | 17 | await session.execute("INSERT INTO basic (a, b, c) VALUES (1, 2, 'abc')"); 18 | await session.execute("INSERT INTO basic (a, b, c) VALUES (?, ?, ?)", [3, 4, "def"]); 19 | 20 | const prepared = await session.prepare("INSERT INTO basic (a, b, c) VALUES (?, 7, ?)"); 21 | await session.execute(prepared, [42, "I'm prepared!"]); 22 | await session.execute(prepared, [43, "I'm prepared 2!"]); 23 | await session.execute(prepared, [44, "I'm prepared 3!"]); 24 | 25 | interface RowData { 26 | a: number; 27 | b: number; 28 | c: string; 29 | } 30 | const result = await session.execute("SELECT a, b, c FROM basic"); 31 | console.log(result); 32 | 33 | const metrics = session.metrics(); 34 | console.log(`Queries requested: ${metrics.getQueriesNum()}`); 35 | console.log(`Iter queries requested: ${metrics.getQueriesIterNum()}`); 36 | console.log(`Errors occurred: ${metrics.getErrorsNum()}`); 37 | console.log(`Iter errors occurred: ${metrics.getErrorsIterNum()}`); 38 | console.log(`Average latency: ${metrics.getLatencyAvgMs()}`); 39 | console.log(`99.9 latency percentile: ${metrics.getLatencyPercentileMs(99.9)}`); 40 | -------------------------------------------------------------------------------- /examples/batch-statements.mts: -------------------------------------------------------------------------------- 1 | import { Cluster, BatchStatement, Query, Uuid } from "../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | const cluster = new Cluster({ nodes }); 6 | const session = await cluster.connect(); 7 | 8 | const batch = new BatchStatement(); 9 | 10 | await session.execute( 11 | "CREATE KEYSPACE IF NOT EXISTS batch_statements WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }", 12 | ); 13 | await session.useKeyspace("batch_statements"); 14 | await session.execute("CREATE TABLE IF NOT EXISTS users (id UUID PRIMARY KEY, name TEXT)"); 15 | 16 | const simpleStatement = new Query("INSERT INTO users (id, name) VALUES (?, ?)"); 17 | const preparedStatement = await session.prepare("INSERT INTO users (id, name) VALUES (?, ?)"); 18 | 19 | batch.appendStatement(simpleStatement); 20 | batch.appendStatement(preparedStatement); 21 | 22 | await session.batch(batch, [ 23 | [Uuid.randomV4(), "Alice"], 24 | [Uuid.randomV4(), "Bob"], 25 | ]); 26 | 27 | console.log(await session.execute("SELECT * FROM users")); 28 | -------------------------------------------------------------------------------- /examples/custom-types/bigint.mts: -------------------------------------------------------------------------------- 1 | import { Cluster } from "../../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | console.log(`Connecting to ${nodes}`); 6 | 7 | const cluster = new Cluster({ nodes }); 8 | const session = await cluster.connect(); 9 | 10 | await session.execute( 11 | "CREATE KEYSPACE IF NOT EXISTS bigints WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }", 12 | ); 13 | await session.useKeyspace("bigints"); 14 | 15 | await session.execute("CREATE TABLE IF NOT EXISTS bigints (a bigint, primary key (a))"); 16 | 17 | await session.execute("INSERT INTO bigints (a) VALUES (?)", [1238773128n]); 18 | 19 | const results = await session.execute("SELECT a FROM bigints"); 20 | console.log(results); 21 | -------------------------------------------------------------------------------- /examples/custom-types/double.mts: -------------------------------------------------------------------------------- 1 | import { Cluster, Double } from "../../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | console.log(`Connecting to ${nodes}`); 6 | 7 | const cluster = new Cluster({ nodes }); 8 | const session = await cluster.connect(); 9 | 10 | await session.execute( 11 | "CREATE KEYSPACE IF NOT EXISTS double WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }", 12 | ); 13 | await session.useKeyspace("double"); 14 | 15 | await session.execute("CREATE TABLE IF NOT EXISTS double (a double, primary key (a))"); 16 | 17 | const input = new Double(1.1127830921); 18 | await session.execute("INSERT INTO double (a) VALUES (?)", [input]); 19 | 20 | const results = await session.execute("SELECT a FROM double"); 21 | console.log(results); 22 | -------------------------------------------------------------------------------- /examples/custom-types/floats.mts: -------------------------------------------------------------------------------- 1 | import { Cluster, Float } from "../../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | console.log(`Connecting to ${nodes}`); 6 | 7 | const cluster = new Cluster({ nodes }); 8 | const session = await cluster.connect(); 9 | 10 | await session.execute( 11 | "CREATE KEYSPACE IF NOT EXISTS floats WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }", 12 | ); 13 | await session.useKeyspace("floats"); 14 | 15 | await session.execute("CREATE TABLE IF NOT EXISTS floats (a float, primary key (a))"); 16 | 17 | const input = new Float(1.1127830921); 18 | await session.execute("INSERT INTO floats (a) VALUES (?)", [input]); 19 | 20 | const results = await session.execute("SELECT a FROM floats"); 21 | console.log(results); 22 | -------------------------------------------------------------------------------- /examples/custom-types/list.mts: -------------------------------------------------------------------------------- 1 | import { Cluster, List, Uuid } from "../../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | console.log(`Connecting to ${nodes}`); 6 | 7 | const cluster = new Cluster({ nodes }); 8 | const session = await cluster.connect(); 9 | 10 | await session.execute( 11 | "CREATE KEYSPACE IF NOT EXISTS lists WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }", 12 | ); 13 | await session.useKeyspace("lists"); 14 | 15 | await session.execute("CREATE TABLE IF NOT EXISTS lists (a uuid, b list, primary key (a))"); 16 | 17 | // NOTE: driver is not throwing errors if the return of the function is not used. 18 | await session.execute("INSERT INTO lists (a, b) VALUES (?, ?)", [Uuid.randomV4(), new List([1, 2, 3])]); 19 | 20 | const results = await session.execute("SELECT * FROM lists"); 21 | console.log(results); 22 | -------------------------------------------------------------------------------- /examples/custom-types/map.mts: -------------------------------------------------------------------------------- 1 | import { Cluster, Map, Uuid } from "../../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | console.log(`Connecting to ${nodes}`); 6 | 7 | const cluster = new Cluster({ nodes }); 8 | const session = await cluster.connect(); 9 | 10 | await session.execute( 11 | "CREATE KEYSPACE IF NOT EXISTS maps WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }", 12 | ); 13 | await session.useKeyspace("maps"); 14 | 15 | await session.execute("CREATE TABLE IF NOT EXISTS maps (a uuid, b map, primary key (a))"); 16 | 17 | await session.execute("INSERT INTO maps (a, b) VALUES (?, ?)", [ 18 | Uuid.randomV4(), 19 | new Map([ 20 | ["a", 1], 21 | ["b", 2], 22 | ["c", 3], 23 | ]), 24 | ]); 25 | 26 | const results = await session.execute("SELECT * FROM maps"); 27 | console.log(results); 28 | -------------------------------------------------------------------------------- /examples/custom-types/set.mts: -------------------------------------------------------------------------------- 1 | import { Cluster, Set, Uuid } from "../../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | console.log(`Connecting to ${nodes}`); 6 | 7 | const cluster = new Cluster({ nodes }); 8 | const session = await cluster.connect(); 9 | 10 | await session.execute( 11 | "CREATE KEYSPACE IF NOT EXISTS sets WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }", 12 | ); 13 | await session.useKeyspace("sets"); 14 | 15 | await session.execute("CREATE TABLE IF NOT EXISTS sets (a uuid, b set, primary key (a))"); 16 | 17 | await session.execute("INSERT INTO sets (a, b) VALUES (?, ?)", [Uuid.randomV4(), new Set([1, 2, 3, 1])]); 18 | 19 | const results = await session.execute("SELECT * FROM sets"); 20 | console.log(results); 21 | -------------------------------------------------------------------------------- /examples/custom-types/tuple.mts: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Daniel-Boll/scylla-javascript-driver/dc3a11040a61a0149a200db5d37d3bdde6dd2fd0/examples/custom-types/tuple.mts -------------------------------------------------------------------------------- /examples/custom-types/udt.mts: -------------------------------------------------------------------------------- 1 | import { Cluster } from "../../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | console.log(`Connecting to ${nodes}`); 6 | 7 | const cluster = new Cluster({ nodes }); 8 | const session = await cluster.connect(); 9 | 10 | await session.execute( 11 | "CREATE KEYSPACE IF NOT EXISTS udt WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }", 12 | ); 13 | await session.useKeyspace("udt"); 14 | 15 | await session.execute("CREATE TYPE IF NOT EXISTS address (street text, neighbor text)"); 16 | await session.execute("CREATE TABLE IF NOT EXISTS user (name text, address address, primary key (name))"); 17 | 18 | interface User { 19 | name: string; 20 | address: { 21 | street: string; 22 | neighbor: string; 23 | }; 24 | } 25 | 26 | const user: User = { 27 | name: "John Doe", 28 | address: { 29 | street: "123 Main St", 30 | neighbor: "Downtown", 31 | }, 32 | }; 33 | 34 | await session.execute("INSERT INTO user (name, address) VALUES (?, ?)", [user.name, user.address]); 35 | 36 | const users = (await session.execute("SELECT * FROM user")) as User[]; 37 | console.log(users); 38 | -------------------------------------------------------------------------------- /examples/custom-types/uuid.mts: -------------------------------------------------------------------------------- 1 | import { Cluster, Uuid } from "../../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | console.log(`Connecting to ${nodes}`); 6 | 7 | const cluster = new Cluster({ nodes }); 8 | const session = await cluster.connect(); 9 | 10 | await session.execute( 11 | "CREATE KEYSPACE IF NOT EXISTS uuids WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }", 12 | ); 13 | await session.useKeyspace("uuids"); 14 | 15 | await session.execute("CREATE TABLE IF NOT EXISTS uuids (a uuid, primary key (a))"); 16 | 17 | await session.execute("INSERT INTO uuids (a) VALUES (?)", [Uuid.randomV4()]); 18 | 19 | const results = await session.execute("SELECT a FROM uuids"); 20 | console.log(results); 21 | -------------------------------------------------------------------------------- /examples/custom-types/varint.mts: -------------------------------------------------------------------------------- 1 | import { Cluster, Varint } from "../../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | console.log(`Connecting to ${nodes}`); 6 | 7 | const cluster = new Cluster({ nodes }); 8 | const session = await cluster.connect(); 9 | 10 | await session.execute( 11 | "CREATE KEYSPACE IF NOT EXISTS varints WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }", 12 | ); 13 | await session.useKeyspace("varints"); 14 | 15 | await session.execute("CREATE TABLE IF NOT EXISTS varints (a varint, primary key (a))"); 16 | 17 | await session.execute("INSERT INTO varints (a) VALUES (?)", [new Varint([0x00, 0x01, 0x02])]); 18 | 19 | const results = await session.execute("SELECT a FROM varints"); 20 | console.log(results); 21 | -------------------------------------------------------------------------------- /examples/fetch-schema.mts: -------------------------------------------------------------------------------- 1 | import { Cluster } from "../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | console.log(`Connecting to ${nodes}`); 6 | 7 | const cluster = new Cluster({ nodes }); 8 | const session = await cluster.connect(); 9 | 10 | const clusterData = await session.getClusterData(); 11 | const keyspaceInfo = clusterData.getKeyspaceInfo(); 12 | 13 | if (!keyspaceInfo) throw new Error("No data found"); 14 | 15 | console.log("ALL KEYSPACES"); 16 | for (const keyspaceName in keyspaceInfo) { 17 | console.log("========================================================"); 18 | const keyspaceData = keyspaceInfo[keyspaceName]; 19 | console.log("Keyspace: ", keyspaceName); 20 | console.log("replication strategy: ", keyspaceData.strategy.kind, keyspaceData.strategy.data); 21 | for (const tableName in keyspaceData.tables) { 22 | console.log("-----------------------"); 23 | const tableData = keyspaceData.tables[tableName]; 24 | console.log("Table: ", tableName); 25 | console.log("partitionKey: ", tableData.partitionKey); 26 | console.log("clusteringKey: ", tableData.clusteringKey); 27 | console.log("columns: ", tableData.columns); 28 | console.log("-----------------------"); 29 | } 30 | console.log("========================================================"); 31 | } 32 | 33 | console.log("================== SPECIFIC KEYSPACES =================="); 34 | console.log("keyspace: system_auth | strategy: ", keyspaceInfo.system_auth.strategy); 35 | console.log("keyspace: system_traces | strategy: ", keyspaceInfo.system_traces.strategy); 36 | console.log( 37 | "keyspace: system_distributed_everywhere | strategy: ", 38 | keyspaceInfo.system_distributed_everywhere.strategy, 39 | ); 40 | console.log("keyspace: system_distributed | strategy: ", keyspaceInfo.system_distributed.strategy); 41 | -------------------------------------------------------------------------------- /examples/lwt.mts: -------------------------------------------------------------------------------- 1 | import { Cluster, Consistency, Query, SerialConsistency } from "../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | const cluster = new Cluster({ nodes }); 6 | 7 | const session = await cluster.connect(); 8 | 9 | await session.execute( 10 | "CREATE KEYSPACE IF NOT EXISTS lwt WITH REPLICATION = {'class' : 'NetworkTopologyStrategy', 'replication_factor' : 1}", 11 | ); 12 | await session.execute("CREATE TABLE IF NOT EXISTS lwt.tab (a int PRIMARY KEY)"); 13 | 14 | const query = new Query("INSERT INTO lwt.tab (a) VALUES(?) IF NOT EXISTS"); 15 | query.setConsistency(Consistency.One); 16 | query.setSerialConsistency(SerialConsistency.Serial); 17 | 18 | await session.execute(query, [12345]); 19 | 20 | console.log("Ok."); 21 | -------------------------------------------------------------------------------- /examples/prepared.mts: -------------------------------------------------------------------------------- 1 | import { Cluster } from "../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | console.log(`Connecting to ${nodes}`); 6 | 7 | const cluster = new Cluster({ nodes }); 8 | const session = await cluster.connect(); 9 | 10 | await session.execute( 11 | "CREATE KEYSPACE IF NOT EXISTS prepared WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }", 12 | ); 13 | await session.useKeyspace("prepared"); 14 | 15 | await session.execute("CREATE TABLE IF NOT EXISTS prepared (a int, b int, c text, primary key (a, b))"); 16 | 17 | const prepared = await session.prepare("INSERT INTO basic (a, b, c) VALUES (?, 7, ?)"); 18 | await session.execute(prepared, [42, "I'm prepared!"]); 19 | await session.execute(prepared, [43, "I'm prepared 2!"]); 20 | await session.execute(prepared, [44, "I'm prepared 3!"]); 21 | 22 | await session.execute("INSERT INTO basic (a, b, c) VALUES (?, 7, ?)", [45, "I'm also prepared"], { prepare: true }); 23 | 24 | const metrics = session.metrics(); 25 | console.log(`Queries requested: ${metrics.getQueriesNum()}`); 26 | console.log(`Iter queries requested: ${metrics.getQueriesIterNum()}`); 27 | console.log(`Errors occurred: ${metrics.getErrorsNum()}`); 28 | console.log(`Iter errors occurred: ${metrics.getErrorsIterNum()}`); 29 | console.log(`Average latency: ${metrics.getLatencyAvgMs()}`); 30 | console.log(`99.9 latency percentile: ${metrics.getLatencyPercentileMs(99.9)}`); 31 | -------------------------------------------------------------------------------- /examples/tls.mts: -------------------------------------------------------------------------------- 1 | import { Cluster, VerifyMode } from "../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["localhost:9142"]; 4 | console.log(`Connecting to ${nodes}`); 5 | 6 | const cluster = new Cluster({ 7 | nodes, 8 | ssl: { 9 | enabled: true, 10 | truststoreFilepath: "/your/path/to/certificates/client_cert.pem", 11 | privateKeyFilepath: "/your/path/to/certificates/client_key.pem", 12 | caFilepath: "/your/path/to/certificates/client_truststore.pem", 13 | verifyMode: VerifyMode.Peer, 14 | }, 15 | }); 16 | 17 | const session = await cluster.connect(); 18 | 19 | interface ConnectedClient { 20 | address: String; 21 | port: number; 22 | username: String; 23 | driver_name: String; 24 | driver_version: String; 25 | } 26 | 27 | // @ts-ignore 28 | let result = await session.execute( 29 | "SELECT address, port, username, driver_name, driver_version FROM system.clients", 30 | ); 31 | 32 | console.log(result); 33 | // [ 34 | // { 35 | // address: '127.0.0.1', 36 | // driver_name: 'scylla-rust-driver', 37 | // driver_version: '0.10.1', 38 | // port: 58846, 39 | // username: 'developer' 40 | // } 41 | // ] 42 | -------------------------------------------------------------------------------- /examples/tracing.mts: -------------------------------------------------------------------------------- 1 | import { Cluster } from "../index.js"; 2 | 3 | const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 4 | 5 | console.log(`Connecting to ${nodes}`); 6 | 7 | const cluster = new Cluster({ nodes }); 8 | const session = await cluster.connect(); 9 | 10 | const { tracing, result } = await session.executeWithTracing( 11 | "SELECT * FROM system_schema.scylla_tables", 12 | [], 13 | // { 14 | // prepare: true, 15 | // }, 16 | ); 17 | 18 | console.log(result, tracing); 19 | -------------------------------------------------------------------------------- /examples/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../tsconfig.json", 3 | "compilerOptions": { 4 | "module": "NodeNext", 5 | "target": "ESNext", 6 | "moduleResolution": "NodeNext" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /index.d.ts: -------------------------------------------------------------------------------- 1 | /* auto-generated by NAPI-RS */ 2 | /* eslint-disable */ 3 | /** 4 | * Batch statements 5 | * 6 | * A batch statement allows to execute many data-modifying statements at once. 7 | * These statements can be simple or prepared. 8 | * Only INSERT, UPDATE and DELETE statements are allowed. 9 | */ 10 | export declare class BatchStatement { 11 | constructor() 12 | /** 13 | * Appends a statement to the batch. 14 | * 15 | * _Warning_ 16 | * Using simple statements with bind markers in batches is strongly discouraged. For each simple statement with a non-empty list of values in the batch, the driver will send a prepare request, and it will be done sequentially. Results of preparation are not cached between `session.batch` calls. Consider preparing the statements before putting them into the batch. 17 | */ 18 | appendStatement(statement: Query | PreparedStatement): void 19 | } 20 | export type ScyllaBatchStatement = BatchStatement 21 | 22 | export declare class Cluster { 23 | /** 24 | * Object config is in the format: 25 | * { 26 | * nodes: Array, 27 | * } 28 | */ 29 | constructor(clusterConfig: ClusterConfig) 30 | /** Connect to the cluster */ 31 | connect(keyspaceOrOptions?: string | ConnectionOptions | undefined | null, options?: ConnectionOptions | undefined | null): Promise 32 | } 33 | export type ScyllaCluster = Cluster 34 | 35 | export declare class Decimal { 36 | constructor(intVal: Array, scale: number) 37 | /** Returns the string representation of the Decimal. */ 38 | toString(): string 39 | } 40 | 41 | /** 42 | * A double precision float number. 43 | * 44 | * Due to the nature of numbers in JavaScript, it's hard to distinguish between integers and floats, so this type is used to represent 45 | * double precision float numbers while any other JS number will be treated as an integer. (This is not the case for BigInts, which are always treated as BigInts). 46 | */ 47 | export declare class Double { 48 | constructor(inner: number) 49 | toString(): string 50 | } 51 | 52 | export declare class Duration { 53 | months: number 54 | days: number 55 | nanoseconds: number 56 | constructor(months: number, days: number, nanoseconds: number) 57 | /** Returns the string representation of the Duration. */ 58 | toString(): string 59 | } 60 | 61 | /** 62 | * A float number. 63 | * 64 | * Due to the nature of numbers in JavaScript, it's hard to distinguish between integers and floats, so this type is used to represent 65 | * float numbers while any other JS number will be treated as an integer. (This is not the case for BigInts, which are always treated as BigInts). 66 | */ 67 | export declare class Float { 68 | constructor(inner: number) 69 | toString(): string 70 | } 71 | 72 | /** A list of any CqlType */ 73 | export declare class List { 74 | constructor(values: T[]) 75 | toString(): string 76 | } 77 | 78 | /** A map of any CqlType to any CqlType */ 79 | export declare class Map { 80 | constructor(values: Array>) 81 | toString(): string 82 | } 83 | 84 | export declare class Metrics { 85 | /** Returns counter for nonpaged queries */ 86 | getQueriesNum(): bigint 87 | /** Returns counter for pages requested in paged queries */ 88 | getQueriesIterNum(): bigint 89 | /** Returns counter for errors occurred in nonpaged queries */ 90 | getErrorsNum(): bigint 91 | /** Returns counter for errors occurred in paged queries */ 92 | getErrorsIterNum(): bigint 93 | /** Returns average latency in milliseconds */ 94 | getLatencyAvgMs(): bigint 95 | /** 96 | * Returns latency from histogram for a given percentile 97 | * 98 | * # Arguments 99 | * 100 | * * `percentile` - float value (0.0 - 100.0), value will be clamped to this range 101 | */ 102 | getLatencyPercentileMs(percentile: number): bigint 103 | } 104 | 105 | export declare class PreparedStatement { 106 | setConsistency(consistency: Consistency): void 107 | setSerialConsistency(serialConsistency: SerialConsistency): void 108 | } 109 | 110 | export declare class Query { 111 | constructor(query: string) 112 | setConsistency(consistency: Consistency): void 113 | setSerialConsistency(serialConsistency: SerialConsistency): void 114 | setPageSize(pageSize: number): void 115 | } 116 | 117 | export declare class ScyllaClusterData { 118 | /** 119 | * Access keyspaces details collected by the driver Driver collects various schema details like 120 | * tables, partitioners, columns, types. They can be read using this method 121 | */ 122 | getKeyspaceInfo(): Record | null 123 | } 124 | 125 | export declare class ScyllaSession { 126 | metrics(): Metrics 127 | getClusterData(): Promise 128 | executeWithTracing(query: string | Query | PreparedStatement, parameters?: Array | undefined | null, options?: QueryOptions | undefined | null): Promise 129 | /** 130 | * Sends a query to the database and receives a response.\ 131 | * Returns only a single page of results, to receive multiple pages use (TODO: Not implemented yet) 132 | * 133 | * This is the easiest way to make a query, but performance is worse than that of prepared queries. 134 | * 135 | * It is discouraged to use this method with non-empty values argument. In such case, query first needs to be prepared (on a single connection), so 136 | * driver will perform 2 round trips instead of 1. Please use `PreparedStatement` object or `{ prepared: true }` option instead. 137 | * 138 | * # Notes 139 | * 140 | * ## UDT 141 | * Order of fields in the object must match the order of fields as defined in the UDT. The 142 | * driver does not check it by itself, so incorrect data will be written if the order is 143 | * wrong. 144 | */ 145 | execute(query: string | Query | PreparedStatement, parameters?: Array | undefined | null, options?: QueryOptions | undefined | null): Promise 146 | query(scyllaQuery: Query, parameters?: Array | undefined | null): Promise 147 | prepare(query: string): Promise 148 | /** 149 | * Perform a batch query\ 150 | * Batch contains many `simple` or `prepared` queries which are executed at once\ 151 | * Batch doesn't return any rows 152 | * 153 | * Batch values must contain values for each of the queries 154 | * 155 | * See [the book](https://rust-driver.docs.scylladb.com/stable/queries/batch.html) for more information 156 | * 157 | * # Arguments 158 | * * `batch` - Batch to be performed 159 | * * `values` - List of values for each query, it's the easiest to use an array of arrays 160 | * 161 | * # Example 162 | * ```javascript 163 | * const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 164 | * 165 | * const cluster = new Cluster({ nodes }); 166 | * const session = await cluster.connect(); 167 | * 168 | * const batch = new BatchStatement(); 169 | * 170 | * await session.execute("CREATE KEYSPACE IF NOT EXISTS batch_statements WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); 171 | * await session.useKeyspace("batch_statements"); 172 | * await session.execute("CREATE TABLE IF NOT EXISTS users (id UUID PRIMARY KEY, name TEXT)"); 173 | * 174 | * const simpleStatement = new Query("INSERT INTO users (id, name) VALUES (?, ?)"); 175 | * const preparedStatement = await session.prepare("INSERT INTO users (id, name) VALUES (?, ?)"); 176 | * 177 | * batch.appendStatement(simpleStatement); 178 | * batch.appendStatement(preparedStatement); 179 | * 180 | * await session.batch(batch, [[Uuid.randomV4(), "Alice"], [Uuid.randomV4(), "Bob"]]); 181 | * 182 | * console.log(await session.execute("SELECT * FROM users")); 183 | * ``` 184 | */ 185 | batch(batch: BatchStatement, parameters: Array | undefined | null>): Promise 186 | /** 187 | * Sends `USE ` request on all connections\ 188 | * This allows to write `SELECT * FROM table` instead of `SELECT * FROM keyspace.table`\ 189 | * 190 | * Note that even failed `useKeyspace` can change currently used keyspace - the request is sent on all connections and 191 | * can overwrite previously used keyspace. 192 | * 193 | * Call only one `useKeyspace` at a time.\ 194 | * Trying to do two `useKeyspace` requests simultaneously with different names 195 | * can end with some connections using one keyspace and the rest using the other. 196 | * 197 | * # Arguments 198 | * 199 | * * `keyspaceName` - keyspace name to use, 200 | * keyspace names can have up to 48 alphanumeric characters and contain underscores 201 | * * `caseSensitive` - if set to true the generated query will put keyspace name in quotes 202 | * 203 | * # Errors 204 | * 205 | * * `InvalidArg` - if the keyspace name is invalid 206 | * 207 | * # Example 208 | * 209 | * ```javascript 210 | * import { Cluster } from "."; 211 | * 212 | * const cluster = new Cluster({ 213 | * nodes: ["127.0.0.1:9042"], 214 | * }); 215 | * 216 | * const session = await cluster.connect(); 217 | * 218 | * await session.useKeyspace("system_schema"); 219 | * 220 | * const result = await session 221 | * .execute("SELECT * FROM scylla_tables limit ?", [1]) 222 | * .catch(console.error); 223 | * ``` 224 | */ 225 | useKeyspace(keyspaceName: string, caseSensitive?: boolean | undefined | null): Promise 226 | /** 227 | * session.awaitSchemaAgreement returns a Promise that can be awaited as long as schema is not in an agreement. 228 | * However, it won’t wait forever; ClusterConfig defines a timeout that limits the time of waiting. If the timeout elapses, 229 | * the return value is an error, otherwise it is the schema_version. 230 | * 231 | * # Returns 232 | * 233 | * * `Promise` - schema_version 234 | * 235 | * # Errors 236 | * * `GenericFailure` - if the timeout elapses 237 | * 238 | * # Example 239 | * ```javascript 240 | * import { Cluster } from "."; 241 | * 242 | * const cluster = new Cluster({ nodes: ["127.0.0.1:9042"] }); 243 | * const session = await cluster.connect(); 244 | * 245 | * const schemaVersion = await session.awaitSchemaAgreement().catch(console.error); 246 | * console.log(schemaVersion); 247 | * 248 | * const isAgreed = await session.checkSchemaAgreement().catch(console.error); 249 | * console.log(isAgreed); 250 | * ``` 251 | */ 252 | awaitSchemaAgreement(): Promise 253 | checkSchemaAgreement(): Promise 254 | } 255 | 256 | /** A list of any CqlType */ 257 | export declare class Set { 258 | constructor(values: T[]) 259 | toString(): string 260 | } 261 | 262 | export declare class Uuid { 263 | /** Generates a random UUID v4. */ 264 | static randomV4(): Uuid 265 | /** Parses a UUID from a string. It may fail if the string is not a valid UUID. */ 266 | static fromString(str: string): Uuid 267 | /** Returns the string representation of the UUID. */ 268 | toString(): string 269 | } 270 | 271 | /** 272 | * Native CQL `varint` representation. 273 | * 274 | * Represented as two's-complement binary in big-endian order. 275 | * 276 | * This type is a raw representation in bytes. It's the default 277 | * implementation of `varint` type - independent of any 278 | * external crates and crate features. 279 | * 280 | * # DB data format 281 | * Notice that constructors don't perform any normalization 282 | * on the provided data. This means that underlying bytes may 283 | * contain leading zeros. 284 | * 285 | * Currently, Scylla and Cassandra support non-normalized `varint` values. 286 | * Bytes provided by the user via constructor are passed to DB as is. 287 | */ 288 | export declare class Varint { 289 | constructor(inner: Array) 290 | toString(): string 291 | } 292 | 293 | export interface Auth { 294 | username: string 295 | password: string 296 | } 297 | 298 | export interface ClusterConfig { 299 | nodes: Array 300 | compression?: Compression 301 | defaultExecutionProfile?: ExecutionProfile 302 | keyspace?: string 303 | auth?: Auth 304 | ssl?: Ssl 305 | /** The driver automatically awaits schema agreement after a schema-altering query is executed. Waiting for schema agreement more than necessary is never a bug, but might slow down applications which do a lot of schema changes (e.g. a migration). For instance, in case where somebody wishes to create a keyspace and then a lot of tables in it, it makes sense only to wait after creating a keyspace and after creating all the tables rather than after every query. */ 306 | autoAwaitSchemaAgreement?: boolean 307 | /** If the schema is not agreed upon, the driver sleeps for a duration in seconds before checking it again. The default value is 0.2 (200 milliseconds) */ 308 | schemaAgreementInterval?: number 309 | } 310 | 311 | export declare const enum Compression { 312 | None = 0, 313 | Lz4 = 1, 314 | Snappy = 2 315 | } 316 | 317 | export interface ConnectionOptions { 318 | keyspace?: string 319 | auth?: Auth 320 | ssl?: Ssl 321 | } 322 | 323 | export declare const enum Consistency { 324 | Any = 0, 325 | One = 1, 326 | Two = 2, 327 | Three = 3, 328 | Quorum = 4, 329 | All = 5, 330 | LocalQuorum = 6, 331 | EachQuorum = 7, 332 | LocalOne = 10, 333 | Serial = 8, 334 | LocalSerial = 9 335 | } 336 | 337 | export interface ExecutionProfile { 338 | consistency?: Consistency 339 | serialConsistency?: SerialConsistency 340 | requestTimeout?: number 341 | } 342 | 343 | export interface NetworkTopologyStrategy { 344 | datacenterRepfactors: Record 345 | } 346 | 347 | export interface Other { 348 | name: string 349 | data: Record 350 | } 351 | 352 | export interface QueryOptions { 353 | prepare?: boolean 354 | } 355 | 356 | export interface ScyllaKeyspace { 357 | strategy: ScyllaStrategy 358 | tables: Record 359 | views: Record 360 | } 361 | 362 | export interface ScyllaMaterializedView { 363 | viewMetadata: ScyllaTable 364 | baseTableName: string 365 | } 366 | 367 | export interface ScyllaStrategy { 368 | kind: string 369 | data?: SimpleStrategy | NetworkTopologyStrategy | Other 370 | } 371 | 372 | export interface ScyllaTable { 373 | columns: Array 374 | partitionKey: Array 375 | clusteringKey: Array 376 | partitioner?: string 377 | } 378 | 379 | export declare const enum SerialConsistency { 380 | Serial = 8, 381 | LocalSerial = 9 382 | } 383 | 384 | export interface SimpleStrategy { 385 | replicationFactor: number 386 | } 387 | 388 | export interface Ssl { 389 | enabled: boolean 390 | caFilepath?: string 391 | privateKeyFilepath?: string 392 | truststoreFilepath?: string 393 | verifyMode?: VerifyMode 394 | } 395 | 396 | export declare const enum VerifyMode { 397 | None = 0, 398 | Peer = 1 399 | } 400 | 401 | type NativeTypes = number | string | Uuid | bigint | Duration | Decimal | Float | List; 402 | type WithMapType = NativeTypes | Record | NativeTypes[]; 403 | type ParameterWithMapType = WithMapType; 404 | type JSQueryResult = Record[]; 405 | type TracingReturn = { result: JSQueryResult; tracing: TracingInfo }; 406 | 407 | export interface TracingInfo { 408 | client?: string; // IP address as a string 409 | command?: string; 410 | coordinator?: string; // IP address as a string 411 | duration?: number; 412 | parameters?: Record; 413 | request?: string; 414 | /** 415 | * started_at is a timestamp - time since unix epoch 416 | */ 417 | started_at?: string; 418 | events: TracingEvent[]; 419 | } 420 | 421 | /** 422 | * A single event happening during a traced query 423 | */ 424 | export interface TracingEvent { 425 | event_id: string; 426 | activity?: string; 427 | source?: string; // IP address as a string 428 | source_elapsed?: number; 429 | thread?: string; 430 | } -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | // prettier-ignore 2 | /* eslint-disable */ 3 | // @ts-nocheck 4 | /* auto-generated by NAPI-RS */ 5 | 6 | const { createRequire } = require('node:module') 7 | require = createRequire(__filename) 8 | 9 | const { readFileSync } = require('node:fs') 10 | let nativeBinding = null 11 | const loadErrors = [] 12 | 13 | const isMusl = () => { 14 | let musl = false 15 | if (process.platform === 'linux') { 16 | musl = isMuslFromFilesystem() 17 | if (musl === null) { 18 | musl = isMuslFromReport() 19 | } 20 | if (musl === null) { 21 | musl = isMuslFromChildProcess() 22 | } 23 | } 24 | return musl 25 | } 26 | 27 | const isFileMusl = (f) => f.includes('libc.musl-') || f.includes('ld-musl-') 28 | 29 | const isMuslFromFilesystem = () => { 30 | try { 31 | return readFileSync('/usr/bin/ldd', 'utf-8').includes('musl') 32 | } catch { 33 | return null 34 | } 35 | } 36 | 37 | const isMuslFromReport = () => { 38 | const report = typeof process.report.getReport === 'function' ? process.report.getReport() : null 39 | if (!report) { 40 | return null 41 | } 42 | if (report.header && report.header.glibcVersionRuntime) { 43 | return false 44 | } 45 | if (Array.isArray(report.sharedObjects)) { 46 | if (report.sharedObjects.some(isFileMusl)) { 47 | return true 48 | } 49 | } 50 | return false 51 | } 52 | 53 | const isMuslFromChildProcess = () => { 54 | try { 55 | return require('child_process').execSync('ldd --version', { encoding: 'utf8' }).includes('musl') 56 | } catch (e) { 57 | // If we reach this case, we don't know if the system is musl or not, so is better to just fallback to false 58 | return false 59 | } 60 | } 61 | 62 | function requireNative() { 63 | if (process.platform === 'android') { 64 | if (process.arch === 'arm64') { 65 | try { 66 | return require('./scylladb.android-arm64.node') 67 | } catch (e) { 68 | loadErrors.push(e) 69 | } 70 | try { 71 | return require('@lambda-group/scylladb-android-arm64') 72 | } catch (e) { 73 | loadErrors.push(e) 74 | } 75 | 76 | } else if (process.arch === 'arm') { 77 | try { 78 | return require('./scylladb.android-arm-eabi.node') 79 | } catch (e) { 80 | loadErrors.push(e) 81 | } 82 | try { 83 | return require('@lambda-group/scylladb-android-arm-eabi') 84 | } catch (e) { 85 | loadErrors.push(e) 86 | } 87 | 88 | } else { 89 | loadErrors.push(new Error(`Unsupported architecture on Android ${process.arch}`)) 90 | } 91 | } else if (process.platform === 'win32') { 92 | if (process.arch === 'x64') { 93 | try { 94 | return require('./scylladb.win32-x64-msvc.node') 95 | } catch (e) { 96 | loadErrors.push(e) 97 | } 98 | try { 99 | return require('@lambda-group/scylladb-win32-x64-msvc') 100 | } catch (e) { 101 | loadErrors.push(e) 102 | } 103 | 104 | } else if (process.arch === 'ia32') { 105 | try { 106 | return require('./scylladb.win32-ia32-msvc.node') 107 | } catch (e) { 108 | loadErrors.push(e) 109 | } 110 | try { 111 | return require('@lambda-group/scylladb-win32-ia32-msvc') 112 | } catch (e) { 113 | loadErrors.push(e) 114 | } 115 | 116 | } else if (process.arch === 'arm64') { 117 | try { 118 | return require('./scylladb.win32-arm64-msvc.node') 119 | } catch (e) { 120 | loadErrors.push(e) 121 | } 122 | try { 123 | return require('@lambda-group/scylladb-win32-arm64-msvc') 124 | } catch (e) { 125 | loadErrors.push(e) 126 | } 127 | 128 | } else { 129 | loadErrors.push(new Error(`Unsupported architecture on Windows: ${process.arch}`)) 130 | } 131 | } else if (process.platform === 'darwin') { 132 | try { 133 | return require('./scylladb.darwin-universal.node') 134 | } catch (e) { 135 | loadErrors.push(e) 136 | } 137 | try { 138 | return require('@lambda-group/scylladb-darwin-universal') 139 | } catch (e) { 140 | loadErrors.push(e) 141 | } 142 | 143 | if (process.arch === 'x64') { 144 | try { 145 | return require('./scylladb.darwin-x64.node') 146 | } catch (e) { 147 | loadErrors.push(e) 148 | } 149 | try { 150 | return require('@lambda-group/scylladb-darwin-x64') 151 | } catch (e) { 152 | loadErrors.push(e) 153 | } 154 | 155 | } else if (process.arch === 'arm64') { 156 | try { 157 | return require('./scylladb.darwin-arm64.node') 158 | } catch (e) { 159 | loadErrors.push(e) 160 | } 161 | try { 162 | return require('@lambda-group/scylladb-darwin-arm64') 163 | } catch (e) { 164 | loadErrors.push(e) 165 | } 166 | 167 | } else { 168 | loadErrors.push(new Error(`Unsupported architecture on macOS: ${process.arch}`)) 169 | } 170 | } else if (process.platform === 'freebsd') { 171 | if (process.arch === 'x64') { 172 | try { 173 | return require('./scylladb.freebsd-x64.node') 174 | } catch (e) { 175 | loadErrors.push(e) 176 | } 177 | try { 178 | return require('@lambda-group/scylladb-freebsd-x64') 179 | } catch (e) { 180 | loadErrors.push(e) 181 | } 182 | 183 | } else if (process.arch === 'arm64') { 184 | try { 185 | return require('./scylladb.freebsd-arm64.node') 186 | } catch (e) { 187 | loadErrors.push(e) 188 | } 189 | try { 190 | return require('@lambda-group/scylladb-freebsd-arm64') 191 | } catch (e) { 192 | loadErrors.push(e) 193 | } 194 | 195 | } else { 196 | loadErrors.push(new Error(`Unsupported architecture on FreeBSD: ${process.arch}`)) 197 | } 198 | } else if (process.platform === 'linux') { 199 | if (process.arch === 'x64') { 200 | if (isMusl()) { 201 | try { 202 | return require('./scylladb.linux-x64-musl.node') 203 | } catch (e) { 204 | loadErrors.push(e) 205 | } 206 | try { 207 | return require('@lambda-group/scylladb-linux-x64-musl') 208 | } catch (e) { 209 | loadErrors.push(e) 210 | } 211 | 212 | } else { 213 | try { 214 | return require('./scylladb.linux-x64-gnu.node') 215 | } catch (e) { 216 | loadErrors.push(e) 217 | } 218 | try { 219 | return require('@lambda-group/scylladb-linux-x64-gnu') 220 | } catch (e) { 221 | loadErrors.push(e) 222 | } 223 | 224 | } 225 | } else if (process.arch === 'arm64') { 226 | if (isMusl()) { 227 | try { 228 | return require('./scylladb.linux-arm64-musl.node') 229 | } catch (e) { 230 | loadErrors.push(e) 231 | } 232 | try { 233 | return require('@lambda-group/scylladb-linux-arm64-musl') 234 | } catch (e) { 235 | loadErrors.push(e) 236 | } 237 | 238 | } else { 239 | try { 240 | return require('./scylladb.linux-arm64-gnu.node') 241 | } catch (e) { 242 | loadErrors.push(e) 243 | } 244 | try { 245 | return require('@lambda-group/scylladb-linux-arm64-gnu') 246 | } catch (e) { 247 | loadErrors.push(e) 248 | } 249 | 250 | } 251 | } else if (process.arch === 'arm') { 252 | if (isMusl()) { 253 | try { 254 | return require('./scylladb.linux-arm-musleabihf.node') 255 | } catch (e) { 256 | loadErrors.push(e) 257 | } 258 | try { 259 | return require('@lambda-group/scylladb-linux-arm-musleabihf') 260 | } catch (e) { 261 | loadErrors.push(e) 262 | } 263 | 264 | } else { 265 | try { 266 | return require('./scylladb.linux-arm-gnueabihf.node') 267 | } catch (e) { 268 | loadErrors.push(e) 269 | } 270 | try { 271 | return require('@lambda-group/scylladb-linux-arm-gnueabihf') 272 | } catch (e) { 273 | loadErrors.push(e) 274 | } 275 | 276 | } 277 | } else if (process.arch === 'riscv64') { 278 | if (isMusl()) { 279 | try { 280 | return require('./scylladb.linux-riscv64-musl.node') 281 | } catch (e) { 282 | loadErrors.push(e) 283 | } 284 | try { 285 | return require('@lambda-group/scylladb-linux-riscv64-musl') 286 | } catch (e) { 287 | loadErrors.push(e) 288 | } 289 | 290 | } else { 291 | try { 292 | return require('./scylladb.linux-riscv64-gnu.node') 293 | } catch (e) { 294 | loadErrors.push(e) 295 | } 296 | try { 297 | return require('@lambda-group/scylladb-linux-riscv64-gnu') 298 | } catch (e) { 299 | loadErrors.push(e) 300 | } 301 | 302 | } 303 | } else if (process.arch === 'ppc64') { 304 | try { 305 | return require('./scylladb.linux-ppc64-gnu.node') 306 | } catch (e) { 307 | loadErrors.push(e) 308 | } 309 | try { 310 | return require('@lambda-group/scylladb-linux-ppc64-gnu') 311 | } catch (e) { 312 | loadErrors.push(e) 313 | } 314 | 315 | } else if (process.arch === 's390x') { 316 | try { 317 | return require('./scylladb.linux-s390x-gnu.node') 318 | } catch (e) { 319 | loadErrors.push(e) 320 | } 321 | try { 322 | return require('@lambda-group/scylladb-linux-s390x-gnu') 323 | } catch (e) { 324 | loadErrors.push(e) 325 | } 326 | 327 | } else { 328 | loadErrors.push(new Error(`Unsupported architecture on Linux: ${process.arch}`)) 329 | } 330 | } else { 331 | loadErrors.push(new Error(`Unsupported OS: ${process.platform}, architecture: ${process.arch}`)) 332 | } 333 | } 334 | 335 | nativeBinding = requireNative() 336 | 337 | if (!nativeBinding || process.env.NAPI_RS_FORCE_WASI) { 338 | try { 339 | nativeBinding = require('./scylladb.wasi.cjs') 340 | } catch (err) { 341 | if (process.env.NAPI_RS_FORCE_WASI) { 342 | loadErrors.push(err) 343 | } 344 | } 345 | if (!nativeBinding) { 346 | try { 347 | nativeBinding = require('@lambda-group/scylladb-wasm32-wasi') 348 | } catch (err) { 349 | if (process.env.NAPI_RS_FORCE_WASI) { 350 | loadErrors.push(err) 351 | } 352 | } 353 | } 354 | } 355 | 356 | if (!nativeBinding) { 357 | if (loadErrors.length > 0) { 358 | // TODO Link to documentation with potential fixes 359 | // - The package owner could build/publish bindings for this arch 360 | // - The user may need to bundle the correct files 361 | // - The user may need to re-install node_modules to get new packages 362 | throw new Error('Failed to load native binding', { cause: loadErrors }) 363 | } 364 | throw new Error(`Failed to load native binding`) 365 | } 366 | 367 | module.exports.BatchStatement = nativeBinding.BatchStatement 368 | module.exports.ScyllaBatchStatement = nativeBinding.ScyllaBatchStatement 369 | module.exports.Cluster = nativeBinding.Cluster 370 | module.exports.ScyllaCluster = nativeBinding.ScyllaCluster 371 | module.exports.Decimal = nativeBinding.Decimal 372 | module.exports.Double = nativeBinding.Double 373 | module.exports.Duration = nativeBinding.Duration 374 | module.exports.Float = nativeBinding.Float 375 | module.exports.List = nativeBinding.List 376 | module.exports.Map = nativeBinding.Map 377 | module.exports.Metrics = nativeBinding.Metrics 378 | module.exports.PreparedStatement = nativeBinding.PreparedStatement 379 | module.exports.Query = nativeBinding.Query 380 | module.exports.ScyllaClusterData = nativeBinding.ScyllaClusterData 381 | module.exports.ScyllaSession = nativeBinding.ScyllaSession 382 | module.exports.Set = nativeBinding.Set 383 | module.exports.Uuid = nativeBinding.Uuid 384 | module.exports.Varint = nativeBinding.Varint 385 | module.exports.Compression = nativeBinding.Compression 386 | module.exports.Consistency = nativeBinding.Consistency 387 | module.exports.SerialConsistency = nativeBinding.SerialConsistency 388 | module.exports.VerifyMode = nativeBinding.VerifyMode 389 | 390 | const customInspectSymbol = Symbol.for('nodejs.util.inspect.custom') 391 | 392 | nativeBinding.Uuid.prototype[customInspectSymbol] = function () { return this.toString(); } 393 | nativeBinding.Duration.prototype[customInspectSymbol] = function () { return this.toString(); } 394 | nativeBinding.Decimal.prototype[customInspectSymbol] = function () { return this.toString(); } 395 | nativeBinding.Float.prototype[customInspectSymbol] = function () { return this.toString(); } 396 | nativeBinding.Double.prototype[customInspectSymbol] = function () { return this.toString(); } 397 | nativeBinding.List.prototype[customInspectSymbol] = function () { return this.toString(); } 398 | nativeBinding.Set.prototype[customInspectSymbol] = function () { return this.toString(); } 399 | nativeBinding.Map.prototype[customInspectSymbol] = function () { return this.toString(); } 400 | nativeBinding.Varint.prototype[customInspectSymbol] = function () { return this.toString(); } -------------------------------------------------------------------------------- /npm/android-arm-eabi/README.md: -------------------------------------------------------------------------------- 1 | # `@lambda-group/scylladb-android-arm-eabi` 2 | 3 | This is the **armv7-linux-androideabi** binary for `@lambda-group/scylladb` 4 | -------------------------------------------------------------------------------- /npm/android-arm-eabi/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb-android-arm-eabi", 3 | "version": "0.7.1", 4 | "cpu": [ 5 | "arm" 6 | ], 7 | "main": "scylladb.android-arm-eabi.node", 8 | "files": [ 9 | "scylladb.android-arm-eabi.node" 10 | ], 11 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 12 | "keywords": [ 13 | "napi-rs", 14 | "NAPI", 15 | "N-API", 16 | "Rust", 17 | "node-addon", 18 | "node-addon-api" 19 | ], 20 | "author": { 21 | "name": "Daniel Boll", 22 | "email": "danielboll.dev@proton.me", 23 | "url": "https://daniel-boll.me" 24 | }, 25 | "license": "MIT", 26 | "engines": { 27 | "node": ">= 20" 28 | }, 29 | "publishConfig": { 30 | "registry": "https://registry.npmjs.org/", 31 | "access": "public" 32 | }, 33 | "repository": "https://github.com/Daniel-Boll/scylla-javascript-driver", 34 | "os": [ 35 | "android" 36 | ] 37 | } -------------------------------------------------------------------------------- /npm/android-arm64/README.md: -------------------------------------------------------------------------------- 1 | # `@lambda-group/scylladb-android-arm64` 2 | 3 | This is the **aarch64-linux-android** binary for `@lambda-group/scylladb` 4 | -------------------------------------------------------------------------------- /npm/android-arm64/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb-android-arm64", 3 | "version": "0.7.1", 4 | "cpu": [ 5 | "arm64" 6 | ], 7 | "main": "scylladb.android-arm64.node", 8 | "files": [ 9 | "scylladb.android-arm64.node" 10 | ], 11 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 12 | "keywords": [ 13 | "napi-rs", 14 | "NAPI", 15 | "N-API", 16 | "Rust", 17 | "node-addon", 18 | "node-addon-api" 19 | ], 20 | "author": { 21 | "name": "Daniel Boll", 22 | "email": "danielboll.dev@proton.me", 23 | "url": "https://daniel-boll.me" 24 | }, 25 | "license": "MIT", 26 | "engines": { 27 | "node": ">= 20" 28 | }, 29 | "publishConfig": { 30 | "registry": "https://registry.npmjs.org/", 31 | "access": "public" 32 | }, 33 | "repository": "https://github.com/Daniel-Boll/scylla-javascript-driver", 34 | "os": [ 35 | "android" 36 | ] 37 | } -------------------------------------------------------------------------------- /npm/darwin-arm64/README.md: -------------------------------------------------------------------------------- 1 | # `@lambda-group/scylladb-darwin-arm64` 2 | 3 | This is the **aarch64-apple-darwin** binary for `@lambda-group/scylladb` 4 | -------------------------------------------------------------------------------- /npm/darwin-arm64/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb-darwin-arm64", 3 | "version": "0.7.1", 4 | "cpu": [ 5 | "arm64" 6 | ], 7 | "main": "scylladb.darwin-arm64.node", 8 | "files": [ 9 | "scylladb.darwin-arm64.node" 10 | ], 11 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 12 | "keywords": [ 13 | "napi-rs", 14 | "NAPI", 15 | "N-API", 16 | "Rust", 17 | "node-addon", 18 | "node-addon-api" 19 | ], 20 | "author": { 21 | "name": "Daniel Boll", 22 | "email": "danielboll.dev@proton.me", 23 | "url": "https://daniel-boll.me" 24 | }, 25 | "license": "MIT", 26 | "engines": { 27 | "node": ">= 20" 28 | }, 29 | "publishConfig": { 30 | "registry": "https://registry.npmjs.org/", 31 | "access": "public" 32 | }, 33 | "repository": "https://github.com/Daniel-Boll/scylla-javascript-driver", 34 | "os": [ 35 | "darwin" 36 | ] 37 | } -------------------------------------------------------------------------------- /npm/darwin-universal/README.md: -------------------------------------------------------------------------------- 1 | # `@lambda-group/scylladb-darwin-universal` 2 | 3 | This is the **universal-apple-darwin** binary for `@lambda-group/scylladb` 4 | -------------------------------------------------------------------------------- /npm/darwin-universal/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb-darwin-universal", 3 | "version": "0.7.0", 4 | "main": "scylladb.darwin-universal.node", 5 | "files": [ 6 | "scylladb.darwin-universal.node" 7 | ], 8 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 9 | "author": { 10 | "name": "Daniel Boll", 11 | "email": "danielboll.dev@proton.me", 12 | "url": "https://daniel-boll.me" 13 | }, 14 | "license": "MIT", 15 | "engines": { 16 | "node": ">= 10" 17 | }, 18 | "repository": "https://github.com/Daniel-Boll/scylla-javascript-driver", 19 | "os": [ 20 | "darwin" 21 | ] 22 | } -------------------------------------------------------------------------------- /npm/darwin-x64/README.md: -------------------------------------------------------------------------------- 1 | # `@lambda-group/scylladb-darwin-x64` 2 | 3 | This is the **x86_64-apple-darwin** binary for `@lambda-group/scylladb` 4 | -------------------------------------------------------------------------------- /npm/darwin-x64/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb-darwin-x64", 3 | "version": "0.7.1", 4 | "cpu": [ 5 | "x64" 6 | ], 7 | "main": "scylladb.darwin-x64.node", 8 | "files": [ 9 | "scylladb.darwin-x64.node" 10 | ], 11 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 12 | "keywords": [ 13 | "napi-rs", 14 | "NAPI", 15 | "N-API", 16 | "Rust", 17 | "node-addon", 18 | "node-addon-api" 19 | ], 20 | "author": { 21 | "name": "Daniel Boll", 22 | "email": "danielboll.dev@proton.me", 23 | "url": "https://daniel-boll.me" 24 | }, 25 | "license": "MIT", 26 | "engines": { 27 | "node": ">= 20" 28 | }, 29 | "publishConfig": { 30 | "registry": "https://registry.npmjs.org/", 31 | "access": "public" 32 | }, 33 | "repository": "https://github.com/Daniel-Boll/scylla-javascript-driver", 34 | "os": [ 35 | "darwin" 36 | ] 37 | } -------------------------------------------------------------------------------- /npm/freebsd-x64/README.md: -------------------------------------------------------------------------------- 1 | # `@lambda-group/scylladb-freebsd-x64` 2 | 3 | This is the **x86_64-unknown-freebsd** binary for `@lambda-group/scylladb` 4 | -------------------------------------------------------------------------------- /npm/freebsd-x64/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb-freebsd-x64", 3 | "version": "0.7.0", 4 | "cpu": [ 5 | "x64" 6 | ], 7 | "main": "scylladb.freebsd-x64.node", 8 | "files": [ 9 | "scylladb.freebsd-x64.node" 10 | ], 11 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 12 | "keywords": [ 13 | "napi-rs", 14 | "NAPI", 15 | "N-API", 16 | "Rust", 17 | "node-addon", 18 | "node-addon-api" 19 | ], 20 | "license": "MIT", 21 | "engines": { 22 | "node": ">= 10" 23 | }, 24 | "publishConfig": { 25 | "registry": "https://registry.npmjs.org/", 26 | "access": "public" 27 | }, 28 | "repository": { 29 | "url": "git+ssh://git@github.com/napi-rs/scylladb-pnpm.git", 30 | "type": "git" 31 | }, 32 | "os": [ 33 | "freebsd" 34 | ] 35 | } 36 | -------------------------------------------------------------------------------- /npm/linux-arm-gnueabihf/README.md: -------------------------------------------------------------------------------- 1 | # `@lambda-group/scylladb-linux-arm-gnueabihf` 2 | 3 | This is the **armv7-unknown-linux-gnueabihf** binary for `@lambda-group/scylladb` 4 | -------------------------------------------------------------------------------- /npm/linux-arm-gnueabihf/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb-linux-arm-gnueabihf", 3 | "version": "0.7.1", 4 | "cpu": [ 5 | "arm" 6 | ], 7 | "main": "scylladb.linux-arm-gnueabihf.node", 8 | "files": [ 9 | "scylladb.linux-arm-gnueabihf.node" 10 | ], 11 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 12 | "keywords": [ 13 | "napi-rs", 14 | "NAPI", 15 | "N-API", 16 | "Rust", 17 | "node-addon", 18 | "node-addon-api" 19 | ], 20 | "author": { 21 | "name": "Daniel Boll", 22 | "email": "danielboll.dev@proton.me", 23 | "url": "https://daniel-boll.me" 24 | }, 25 | "license": "MIT", 26 | "engines": { 27 | "node": ">= 20" 28 | }, 29 | "publishConfig": { 30 | "registry": "https://registry.npmjs.org/", 31 | "access": "public" 32 | }, 33 | "repository": "https://github.com/Daniel-Boll/scylla-javascript-driver", 34 | "os": [ 35 | "linux" 36 | ] 37 | } -------------------------------------------------------------------------------- /npm/linux-arm64-gnu/README.md: -------------------------------------------------------------------------------- 1 | # `@lambda-group/scylladb-linux-arm64-gnu` 2 | 3 | This is the **aarch64-unknown-linux-gnu** binary for `@lambda-group/scylladb` 4 | -------------------------------------------------------------------------------- /npm/linux-arm64-gnu/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb-linux-arm64-gnu", 3 | "version": "0.7.1", 4 | "cpu": [ 5 | "arm64" 6 | ], 7 | "main": "scylladb.linux-arm64-gnu.node", 8 | "files": [ 9 | "scylladb.linux-arm64-gnu.node" 10 | ], 11 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 12 | "keywords": [ 13 | "napi-rs", 14 | "NAPI", 15 | "N-API", 16 | "Rust", 17 | "node-addon", 18 | "node-addon-api" 19 | ], 20 | "author": { 21 | "name": "Daniel Boll", 22 | "email": "danielboll.dev@proton.me", 23 | "url": "https://daniel-boll.me" 24 | }, 25 | "license": "MIT", 26 | "engines": { 27 | "node": ">= 20" 28 | }, 29 | "publishConfig": { 30 | "registry": "https://registry.npmjs.org/", 31 | "access": "public" 32 | }, 33 | "repository": "https://github.com/Daniel-Boll/scylla-javascript-driver", 34 | "os": [ 35 | "linux" 36 | ], 37 | "libc": [ 38 | "glibc" 39 | ] 40 | } -------------------------------------------------------------------------------- /npm/linux-arm64-musl/README.md: -------------------------------------------------------------------------------- 1 | # `@lambda-group/scylladb-linux-arm64-musl` 2 | 3 | This is the **aarch64-unknown-linux-musl** binary for `@lambda-group/scylladb` 4 | -------------------------------------------------------------------------------- /npm/linux-arm64-musl/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb-linux-arm64-musl", 3 | "version": "0.7.1", 4 | "cpu": [ 5 | "arm64" 6 | ], 7 | "main": "scylladb.linux-arm64-musl.node", 8 | "files": [ 9 | "scylladb.linux-arm64-musl.node" 10 | ], 11 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 12 | "keywords": [ 13 | "napi-rs", 14 | "NAPI", 15 | "N-API", 16 | "Rust", 17 | "node-addon", 18 | "node-addon-api" 19 | ], 20 | "author": { 21 | "name": "Daniel Boll", 22 | "email": "danielboll.dev@proton.me", 23 | "url": "https://daniel-boll.me" 24 | }, 25 | "license": "MIT", 26 | "engines": { 27 | "node": ">= 20" 28 | }, 29 | "publishConfig": { 30 | "registry": "https://registry.npmjs.org/", 31 | "access": "public" 32 | }, 33 | "repository": "https://github.com/Daniel-Boll/scylla-javascript-driver", 34 | "os": [ 35 | "linux" 36 | ], 37 | "libc": [ 38 | "musl" 39 | ] 40 | } -------------------------------------------------------------------------------- /npm/linux-x64-gnu/README.md: -------------------------------------------------------------------------------- 1 | # `@lambda-group/scylladb-linux-x64-gnu` 2 | 3 | This is the **x86_64-unknown-linux-gnu** binary for `@lambda-group/scylladb` 4 | -------------------------------------------------------------------------------- /npm/linux-x64-gnu/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb-linux-x64-gnu", 3 | "version": "0.7.1", 4 | "cpu": [ 5 | "x64" 6 | ], 7 | "main": "scylladb.linux-x64-gnu.node", 8 | "files": [ 9 | "scylladb.linux-x64-gnu.node" 10 | ], 11 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 12 | "keywords": [ 13 | "napi-rs", 14 | "NAPI", 15 | "N-API", 16 | "Rust", 17 | "node-addon", 18 | "node-addon-api" 19 | ], 20 | "author": { 21 | "name": "Daniel Boll", 22 | "email": "danielboll.dev@proton.me", 23 | "url": "https://daniel-boll.me" 24 | }, 25 | "license": "MIT", 26 | "engines": { 27 | "node": ">= 20" 28 | }, 29 | "publishConfig": { 30 | "registry": "https://registry.npmjs.org/", 31 | "access": "public" 32 | }, 33 | "repository": "https://github.com/Daniel-Boll/scylla-javascript-driver", 34 | "os": [ 35 | "linux" 36 | ], 37 | "libc": [ 38 | "glibc" 39 | ] 40 | } -------------------------------------------------------------------------------- /npm/linux-x64-musl/README.md: -------------------------------------------------------------------------------- 1 | # `@lambda-group/scylladb-linux-x64-musl` 2 | 3 | This is the **x86_64-unknown-linux-musl** binary for `@lambda-group/scylladb` 4 | -------------------------------------------------------------------------------- /npm/linux-x64-musl/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb-linux-x64-musl", 3 | "version": "0.7.1", 4 | "cpu": [ 5 | "x64" 6 | ], 7 | "main": "scylladb.linux-x64-musl.node", 8 | "files": [ 9 | "scylladb.linux-x64-musl.node" 10 | ], 11 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 12 | "keywords": [ 13 | "napi-rs", 14 | "NAPI", 15 | "N-API", 16 | "Rust", 17 | "node-addon", 18 | "node-addon-api" 19 | ], 20 | "author": { 21 | "name": "Daniel Boll", 22 | "email": "danielboll.dev@proton.me", 23 | "url": "https://daniel-boll.me" 24 | }, 25 | "license": "MIT", 26 | "engines": { 27 | "node": ">= 20" 28 | }, 29 | "publishConfig": { 30 | "registry": "https://registry.npmjs.org/", 31 | "access": "public" 32 | }, 33 | "repository": "https://github.com/Daniel-Boll/scylla-javascript-driver", 34 | "os": [ 35 | "linux" 36 | ], 37 | "libc": [ 38 | "musl" 39 | ] 40 | } -------------------------------------------------------------------------------- /npm/wasm32-wasi/README.md: -------------------------------------------------------------------------------- 1 | # `@lambda-group/scylladb-wasm32-wasi` 2 | 3 | This is the **wasm32-wasi-preview1-threads** binary for `@lambda-group/scylladb` 4 | -------------------------------------------------------------------------------- /npm/wasm32-wasi/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb-wasm32-wasi", 3 | "version": "0.7.0", 4 | "cpu": [ 5 | "wasm32" 6 | ], 7 | "main": "scylladb.wasi.cjs", 8 | "files": [ 9 | "scylladb.wasm32-wasi.wasm", 10 | "scylladb.wasi.cjs", 11 | "scylladb.wasi-browser.js", 12 | "wasi-worker.mjs", 13 | "wasi-worker-browser.mjs" 14 | ], 15 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 16 | "keywords": [ 17 | "napi-rs", 18 | "NAPI", 19 | "N-API", 20 | "Rust", 21 | "node-addon", 22 | "node-addon-api" 23 | ], 24 | "license": "MIT", 25 | "engines": { 26 | "node": ">=14.0.0" 27 | }, 28 | "publishConfig": { 29 | "registry": "https://registry.npmjs.org/", 30 | "access": "public" 31 | }, 32 | "repository": "https://github.com/Daniel-Boll/scylla-javascript-driver", 33 | "browser": "scylladb.wasi-browser.js", 34 | "dependencies": { 35 | "@napi-rs/wasm-runtime": "^0.2.5" 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /npm/win32-arm64-msvc/README.md: -------------------------------------------------------------------------------- 1 | # `@lambda-group/scylladb-win32-arm64-msvc` 2 | 3 | This is the **aarch64-pc-windows-msvc** binary for `@lambda-group/scylladb` 4 | -------------------------------------------------------------------------------- /npm/win32-arm64-msvc/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb-win32-arm64-msvc", 3 | "version": "0.7.0", 4 | "cpu": [ 5 | "arm64" 6 | ], 7 | "main": "scylladb.win32-arm64-msvc.node", 8 | "files": [ 9 | "scylladb.win32-arm64-msvc.node" 10 | ], 11 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 12 | "keywords": [ 13 | "napi-rs", 14 | "NAPI", 15 | "N-API", 16 | "Rust", 17 | "node-addon", 18 | "node-addon-api" 19 | ], 20 | "license": "MIT", 21 | "engines": { 22 | "node": ">= 10" 23 | }, 24 | "publishConfig": { 25 | "registry": "https://registry.npmjs.org/", 26 | "access": "public" 27 | }, 28 | "repository": "https://github.com/Daniel-Boll/scylla-javascript-driver", 29 | "os": [ 30 | "win32" 31 | ] 32 | } 33 | -------------------------------------------------------------------------------- /npm/win32-ia32-msvc/README.md: -------------------------------------------------------------------------------- 1 | # `@lambda-group/scylladb-win32-ia32-msvc` 2 | 3 | This is the **i686-pc-windows-msvc** binary for `@lambda-group/scylladb` 4 | -------------------------------------------------------------------------------- /npm/win32-ia32-msvc/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb-win32-ia32-msvc", 3 | "version": "0.7.0", 4 | "cpu": [ 5 | "ia32" 6 | ], 7 | "main": "scylladb.win32-ia32-msvc.node", 8 | "files": [ 9 | "scylladb.win32-ia32-msvc.node" 10 | ], 11 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 12 | "keywords": [ 13 | "napi-rs", 14 | "NAPI", 15 | "N-API", 16 | "Rust", 17 | "node-addon", 18 | "node-addon-api" 19 | ], 20 | "license": "MIT", 21 | "engines": { 22 | "node": ">= 10" 23 | }, 24 | "publishConfig": { 25 | "registry": "https://registry.npmjs.org/", 26 | "access": "public" 27 | }, 28 | "repository": "https://github.com/Daniel-Boll/scylla-javascript-driver", 29 | "os": [ 30 | "win32" 31 | ] 32 | } 33 | -------------------------------------------------------------------------------- /npm/win32-x64-msvc/README.md: -------------------------------------------------------------------------------- 1 | # `@lambda-group/scylladb-win32-x64-msvc` 2 | 3 | This is the **x86_64-pc-windows-msvc** binary for `@lambda-group/scylladb` 4 | -------------------------------------------------------------------------------- /npm/win32-x64-msvc/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb-win32-x64-msvc", 3 | "version": "0.7.0", 4 | "cpu": [ 5 | "x64" 6 | ], 7 | "main": "scylladb.win32-x64-msvc.node", 8 | "files": [ 9 | "scylladb.win32-x64-msvc.node" 10 | ], 11 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 12 | "keywords": [ 13 | "napi-rs", 14 | "NAPI", 15 | "N-API", 16 | "Rust", 17 | "node-addon", 18 | "node-addon-api" 19 | ], 20 | "license": "MIT", 21 | "engines": { 22 | "node": ">= 10" 23 | }, 24 | "publishConfig": { 25 | "registry": "https://registry.npmjs.org/", 26 | "access": "public" 27 | }, 28 | "repository": "https://github.com/Daniel-Boll/scylla-javascript-driver", 29 | "os": [ 30 | "win32" 31 | ] 32 | } 33 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@lambda-group/scylladb", 3 | "version": "0.7.1", 4 | "description": "🚀 JavaScript driver for ScyllaDB, harnessing Rust's power through napi-rs for top performance. Pre-release stage. 🧪🔧", 5 | "main": "index.js", 6 | "types": "index.d.ts", 7 | "browser": "browser.js", 8 | "repository": "https://github.com/Daniel-Boll/scylla-javascript-driver", 9 | "author": { 10 | "name": "Daniel Boll", 11 | "email": "danielboll.dev@proton.me", 12 | "url": "https://daniel-boll.me" 13 | }, 14 | "license": "MIT", 15 | "keywords": [ 16 | "napi-rs", 17 | "NAPI", 18 | "N-API", 19 | "Rust", 20 | "node-addon", 21 | "node-addon-api" 22 | ], 23 | "files": [ 24 | "index.d.ts", 25 | "index.js", 26 | "browser.js" 27 | ], 28 | "napi": { 29 | "binaryName": "scylladb", 30 | "targets": [ 31 | "x86_64-apple-darwin", 32 | "aarch64-apple-darwin", 33 | "x86_64-unknown-linux-gnu", 34 | "x86_64-unknown-linux-musl", 35 | "aarch64-unknown-linux-gnu", 36 | "armv7-unknown-linux-gnueabihf", 37 | "aarch64-linux-android", 38 | "aarch64-unknown-linux-musl", 39 | "armv7-linux-androideabi" 40 | ] 41 | }, 42 | "engines": { 43 | "node": ">= 20" 44 | }, 45 | "publishConfig": { 46 | "registry": "https://registry.npmjs.org/", 47 | "access": "public" 48 | }, 49 | "scripts": { 50 | "artifacts": "napi artifacts", 51 | "bench": "node --import @swc-node/register/esm-register benchmark/bench.ts", 52 | "build": "napi build --platform --release --pipe \"node ./scripts/fix-files.mjs\"", 53 | "build:debug": "napi build --platform --pipe \"node ./scripts/fix-files.mjs\"", 54 | "format": "run-p format:prettier format:rs format:toml", 55 | "format:prettier": "prettier . -w", 56 | "format:toml": "taplo format", 57 | "format:rs": "cargo fmt", 58 | "lint": "oxlint", 59 | "prepublishOnly": "napi prepublish -t npm", 60 | "test": "ava", 61 | "version": "napi version" 62 | }, 63 | "devDependencies": { 64 | "@emnapi/core": "^1.2.0", 65 | "@emnapi/runtime": "^1.2.0", 66 | "@napi-rs/cli": "3.0.0-alpha.64", 67 | "@napi-rs/wasm-runtime": "^0.2.4", 68 | "@swc-node/register": "^1.10.6", 69 | "@swc/core": "^1.6.13", 70 | "@taplo/cli": "^0.7.0", 71 | "@tybys/wasm-util": "^0.9.0", 72 | "@types/node": "^22.9.0", 73 | "ava": "^6.1.3", 74 | "chalk": "^5.3.0", 75 | "emnapi": "^1.2.0", 76 | "husky": "^9.0.11", 77 | "lint-staged": "^15.2.7", 78 | "npm-run-all2": "^7.0.0", 79 | "oxlint": "^0.11.0", 80 | "prettier": "^3.3.3", 81 | "tinybench": "^3.0.0", 82 | "tsx": "^4.19.2", 83 | "typescript": "^5.5.3" 84 | }, 85 | "lint-staged": { 86 | "*.@(js|ts|tsx)": [ 87 | "oxlint --fix" 88 | ], 89 | "*.@(js|ts|tsx|yml|yaml|md|json)": [ 90 | "prettier --write" 91 | ], 92 | "*.toml": [ 93 | "taplo format" 94 | ] 95 | }, 96 | "ava": { 97 | "extensions": { 98 | "ts": "module" 99 | }, 100 | "timeout": "2m", 101 | "workerThreads": false, 102 | "environmentVariables": { 103 | "TS_NODE_PROJECT": "./tsconfig.json" 104 | }, 105 | "nodeArguments": [ 106 | "--import", 107 | "@swc-node/register/esm-register" 108 | ] 109 | }, 110 | "prettier": { 111 | "printWidth": 120, 112 | "semi": true, 113 | "trailingComma": "all", 114 | "singleQuote": false, 115 | "arrowParens": "always" 116 | }, 117 | "packageManager": "pnpm@9.12.3" 118 | } 119 | -------------------------------------------------------------------------------- /rust-analyzer.json: -------------------------------------------------------------------------------- 1 | { 2 | "procMacro": { 3 | "enable": true 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "stable" 3 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | version = "Two" 2 | edition = "2021" 3 | tab_spaces = 2 4 | -------------------------------------------------------------------------------- /scripts/fix-files.mjs: -------------------------------------------------------------------------------- 1 | import { readFileSync, writeFileSync } from "node:fs"; 2 | 3 | function addGenericTypes(filename) { 4 | const content = readFileSync(filename, "utf8"); 5 | const updatedContent = content 6 | .replace(/export declare class List\b(.*){/, "export declare class List$1{") 7 | .replace(/export declare class Map\b(.*){/, "export declare class Map$1{") 8 | .replace(/export declare class Set\b(.*){/, "export declare class Set$1{"); 9 | 10 | writeFileSync(filename, updatedContent); 11 | } 12 | 13 | // Append to filename inspectors for custom types 14 | function addInspector(filename) { 15 | writeFileSync( 16 | filename, 17 | readFileSync(filename, "utf8") 18 | .concat( 19 | ` 20 | const customInspectSymbol = Symbol.for('nodejs.util.inspect.custom') 21 | 22 | nativeBinding.Uuid.prototype[customInspectSymbol] = function () { return this.toString(); } 23 | nativeBinding.Duration.prototype[customInspectSymbol] = function () { return this.toString(); } 24 | nativeBinding.Decimal.prototype[customInspectSymbol] = function () { return this.toString(); } 25 | nativeBinding.Float.prototype[customInspectSymbol] = function () { return this.toString(); } 26 | nativeBinding.Double.prototype[customInspectSymbol] = function () { return this.toString(); } 27 | nativeBinding.List.prototype[customInspectSymbol] = function () { return this.toString(); } 28 | nativeBinding.Set.prototype[customInspectSymbol] = function () { return this.toString(); } 29 | nativeBinding.Map.prototype[customInspectSymbol] = function () { return this.toString(); } 30 | nativeBinding.Varint.prototype[customInspectSymbol] = function () { return this.toString(); } 31 | `, 32 | ) 33 | .trim(), 34 | ); 35 | } 36 | 37 | function addJSQueryResultType(filename) { 38 | writeFileSync( 39 | filename, 40 | readFileSync(filename, "utf8") 41 | .concat( 42 | ` 43 | type NativeTypes = number | string | Uuid | bigint | Duration | Decimal | Float | List; 44 | type WithMapType = NativeTypes | Record | NativeTypes[]; 45 | type ParameterWithMapType = WithMapType; 46 | type JSQueryResult = Record[]; 47 | type TracingReturn = { result: JSQueryResult; tracing: TracingInfo }; 48 | 49 | export interface TracingInfo { 50 | client?: string; // IP address as a string 51 | command?: string; 52 | coordinator?: string; // IP address as a string 53 | duration?: number; 54 | parameters?: Record; 55 | request?: string; 56 | /** 57 | * started_at is a timestamp - time since unix epoch 58 | */ 59 | started_at?: string; 60 | events: TracingEvent[]; 61 | } 62 | 63 | /** 64 | * A single event happening during a traced query 65 | */ 66 | export interface TracingEvent { 67 | event_id: string; 68 | activity?: string; 69 | source?: string; // IP address as a string 70 | source_elapsed?: number; 71 | thread?: string; 72 | } 73 | `, 74 | ) 75 | .trim(), 76 | ); 77 | } 78 | 79 | const filename = process.argv[process.argv.length - 1]; 80 | if (filename.endsWith("index.js")) addInspector(filename); 81 | else if (filename.endsWith("index.d.ts")) { 82 | addGenericTypes(filename); 83 | addJSQueryResultType(filename); 84 | } 85 | -------------------------------------------------------------------------------- /simple-test.js: -------------------------------------------------------------------------------- 1 | const { plus100 } = require('./index') 2 | 3 | console.assert(plus100(0) === 100, 'Simple test failed') 4 | 5 | console.info('Simple test passed') 6 | -------------------------------------------------------------------------------- /src/cluster/cluster_config/compression.rs: -------------------------------------------------------------------------------- 1 | #[napi] 2 | pub enum Compression { 3 | None, 4 | Lz4, 5 | Snappy, 6 | } 7 | 8 | impl From for Option { 9 | fn from(value: Compression) -> Self { 10 | match value { 11 | Compression::None => None, 12 | Compression::Lz4 => Some(scylla::transport::Compression::Lz4), 13 | Compression::Snappy => Some(scylla::transport::Compression::Snappy), 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /src/cluster/cluster_config/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::cluster::{ 2 | cluster_config::compression::Compression, 3 | execution_profile::ExecutionProfile, 4 | scylla_cluster::{Auth, Ssl}, 5 | }; 6 | 7 | pub mod compression; 8 | 9 | #[napi(object)] 10 | pub struct ClusterConfig { 11 | pub nodes: Vec, 12 | pub compression: Option, 13 | pub default_execution_profile: Option, 14 | 15 | pub keyspace: Option, 16 | pub auth: Option, 17 | pub ssl: Option, 18 | 19 | /// The driver automatically awaits schema agreement after a schema-altering query is executed. Waiting for schema agreement more than necessary is never a bug, but might slow down applications which do a lot of schema changes (e.g. a migration). For instance, in case where somebody wishes to create a keyspace and then a lot of tables in it, it makes sense only to wait after creating a keyspace and after creating all the tables rather than after every query. 20 | pub auto_await_schema_agreement: Option, 21 | /// If the schema is not agreed upon, the driver sleeps for a duration in seconds before checking it again. The default value is 0.2 (200 milliseconds) 22 | pub schema_agreement_interval: Option, 23 | } 24 | -------------------------------------------------------------------------------- /src/cluster/execution_profile/consistency.rs: -------------------------------------------------------------------------------- 1 | #[napi] 2 | pub enum Consistency { 3 | Any = 0x0000, 4 | One = 0x0001, 5 | Two = 0x0002, 6 | Three = 0x0003, 7 | Quorum = 0x0004, 8 | All = 0x0005, 9 | LocalQuorum = 0x0006, 10 | EachQuorum = 0x0007, 11 | LocalOne = 0x000A, 12 | 13 | // Apparently, Consistency can be set to Serial or LocalSerial in SELECT statements 14 | // to make them use Paxos. 15 | Serial = 0x0008, 16 | LocalSerial = 0x0009, 17 | } 18 | 19 | impl From for scylla::statement::Consistency { 20 | fn from(value: Consistency) -> Self { 21 | match value { 22 | Consistency::Any => Self::Any, 23 | Consistency::One => Self::One, 24 | Consistency::Two => Self::Two, 25 | Consistency::Three => Self::Three, 26 | Consistency::Quorum => Self::Quorum, 27 | Consistency::All => Self::All, 28 | Consistency::LocalQuorum => Self::LocalQuorum, 29 | Consistency::EachQuorum => Self::EachQuorum, 30 | Consistency::LocalOne => Self::LocalOne, 31 | Consistency::Serial => Self::Serial, 32 | Consistency::LocalSerial => Self::LocalSerial, 33 | } 34 | } 35 | } 36 | 37 | impl From for Consistency { 38 | fn from(value: scylla::statement::Consistency) -> Self { 39 | match value { 40 | scylla::statement::Consistency::Any => Self::Any, 41 | scylla::statement::Consistency::One => Self::One, 42 | scylla::statement::Consistency::Two => Self::Two, 43 | scylla::statement::Consistency::Three => Self::Three, 44 | scylla::statement::Consistency::Quorum => Self::Quorum, 45 | scylla::statement::Consistency::All => Self::All, 46 | scylla::statement::Consistency::LocalQuorum => Self::LocalQuorum, 47 | scylla::statement::Consistency::EachQuorum => Self::EachQuorum, 48 | scylla::statement::Consistency::LocalOne => Self::LocalOne, 49 | scylla::statement::Consistency::Serial => Self::Serial, 50 | scylla::statement::Consistency::LocalSerial => Self::LocalSerial, 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/cluster/execution_profile/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod consistency; 2 | pub mod serial_consistency; 3 | 4 | use self::consistency::Consistency; 5 | use self::serial_consistency::SerialConsistency; 6 | 7 | #[napi(object)] 8 | #[derive(Copy, Clone)] 9 | pub struct ExecutionProfile { 10 | pub consistency: Option, 11 | pub serial_consistency: Option, 12 | pub request_timeout: Option, 13 | } 14 | 15 | impl ExecutionProfile { 16 | fn create_execution_profile(self) -> scylla::ExecutionProfile { 17 | let mut ec_builder = scylla::transport::ExecutionProfile::builder(); 18 | 19 | if let Some(consistency) = self.consistency { 20 | ec_builder = ec_builder.consistency(consistency.into()); 21 | } 22 | 23 | ec_builder = ec_builder.serial_consistency(self.serial_consistency.map(|sc| sc.into())); 24 | 25 | if let Some(request_timeout) = self.request_timeout { 26 | ec_builder = 27 | ec_builder.request_timeout(Some(std::time::Duration::from_secs(request_timeout.into()))); 28 | } 29 | 30 | ec_builder.build() 31 | } 32 | 33 | pub(crate) fn into_handle(self) -> scylla::execution_profile::ExecutionProfileHandle { 34 | self.create_execution_profile().into_handle() 35 | } 36 | 37 | pub(crate) fn into_handle_with_label( 38 | self, 39 | label: String, 40 | ) -> scylla::execution_profile::ExecutionProfileHandle { 41 | self 42 | .create_execution_profile() 43 | .into_handle_with_label(label) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/cluster/execution_profile/serial_consistency.rs: -------------------------------------------------------------------------------- 1 | #[napi] 2 | pub enum SerialConsistency { 3 | Serial = 0x0008, 4 | LocalSerial = 0x0009, 5 | } 6 | 7 | impl From for scylla::statement::SerialConsistency { 8 | fn from(value: SerialConsistency) -> Self { 9 | match value { 10 | SerialConsistency::Serial => Self::Serial, 11 | SerialConsistency::LocalSerial => Self::LocalSerial, 12 | } 13 | } 14 | } 15 | 16 | impl From for SerialConsistency { 17 | fn from(value: scylla::statement::SerialConsistency) -> Self { 18 | match value { 19 | scylla::statement::SerialConsistency::Serial => Self::Serial, 20 | scylla::statement::SerialConsistency::LocalSerial => Self::LocalSerial, 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/cluster/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cluster_config; 2 | pub mod execution_profile; 3 | pub mod scylla_cluster; 4 | -------------------------------------------------------------------------------- /src/cluster/scylla_cluster.rs: -------------------------------------------------------------------------------- 1 | use std::time::Duration; 2 | 3 | use napi::Either; 4 | use openssl::ssl::{SslContextBuilder, SslFiletype}; 5 | 6 | use crate::{ 7 | cluster::{ 8 | cluster_config::{compression::Compression, ClusterConfig}, 9 | execution_profile::ExecutionProfile, 10 | }, 11 | session::scylla_session::ScyllaSession, 12 | }; 13 | 14 | #[napi(js_name = "Cluster")] 15 | struct ScyllaCluster { 16 | uri: String, 17 | compression: Option, 18 | default_execution_profile: Option, 19 | auto_await_schema_agreement: Option, 20 | schema_agreement_interval: Option, 21 | 22 | // connection fields 23 | connection: Option, 24 | } 25 | 26 | #[napi(object)] 27 | struct ConnectionOptions { 28 | pub keyspace: Option, 29 | pub auth: Option, 30 | pub ssl: Option, 31 | } 32 | 33 | #[napi(object)] 34 | #[derive(Clone, Debug)] 35 | pub struct Auth { 36 | pub username: String, 37 | pub password: String, 38 | } 39 | 40 | #[napi(object)] 41 | #[derive(Clone)] 42 | pub struct Ssl { 43 | pub enabled: bool, 44 | pub ca_filepath: Option, 45 | pub private_key_filepath: Option, 46 | pub truststore_filepath: Option, 47 | pub verify_mode: Option, 48 | // SSL Filetype: PEM / ASN1 49 | } 50 | 51 | #[napi] 52 | pub enum VerifyMode { 53 | None, 54 | Peer, 55 | } 56 | 57 | #[napi] 58 | impl ScyllaCluster { 59 | /// Object config is in the format: 60 | /// { 61 | /// nodes: Array, 62 | /// } 63 | #[napi(constructor)] 64 | pub fn new(cluster_config: ClusterConfig) -> Self { 65 | let ClusterConfig { 66 | nodes, 67 | compression, 68 | default_execution_profile, 69 | keyspace, 70 | auth, 71 | ssl, 72 | auto_await_schema_agreement, 73 | schema_agreement_interval, 74 | } = cluster_config; 75 | 76 | let uri = nodes.first().expect("at least one node is required"); 77 | 78 | Self { 79 | uri: uri.to_string(), 80 | compression, 81 | default_execution_profile, 82 | connection: Some(ConnectionOptions { 83 | keyspace, 84 | auth, 85 | ssl, 86 | }), 87 | auto_await_schema_agreement, 88 | schema_agreement_interval: schema_agreement_interval.map(|d| Duration::from_secs(d as u64)), 89 | } 90 | } 91 | 92 | #[napi] 93 | /// Connect to the cluster 94 | pub async fn connect( 95 | &self, 96 | keyspace_or_options: Option>, 97 | options: Option, 98 | ) -> napi::Result { 99 | let mut builder = scylla::SessionBuilder::new().known_node(self.uri.as_str()); 100 | 101 | // TODO: We need to think of a better way to deal with keyspace possibly being options 102 | let keyspace: Result, napi::Error> = match (&keyspace_or_options, &options) { 103 | (Some(Either::A(keyspace)), _) => Ok(Some(keyspace.clone())), 104 | (Some(Either::B(options)), _) => { 105 | if options.keyspace.is_none() { 106 | Ok( 107 | self 108 | .connection 109 | .as_ref() 110 | .and_then(|conn| conn.keyspace.clone()), 111 | ) 112 | } else { 113 | Ok(options.keyspace.clone()) 114 | } 115 | } 116 | (None, Some(options)) => { 117 | if options.keyspace.is_none() { 118 | Ok( 119 | self 120 | .connection 121 | .as_ref() 122 | .and_then(|conn| conn.keyspace.clone()), 123 | ) 124 | } else { 125 | Ok(options.keyspace.clone()) 126 | } 127 | } 128 | (None, None) => Ok( 129 | self 130 | .connection 131 | .as_ref() 132 | .and_then(|conn| conn.keyspace.clone()), 133 | ), 134 | }; 135 | 136 | let auth = match (&keyspace_or_options, &options) { 137 | (Some(Either::A(_)), Some(options)) => Ok(options.auth.clone()), // when keyspace is provided as a string 138 | (Some(Either::A(_)), None) => Ok(self.connection.as_ref().and_then(|conn| conn.auth.clone())), // when keyspace is provided as a string and options is not provided 139 | (Some(Either::B(options)), None) => { 140 | if options.auth.is_none() { 141 | Ok(self.connection.as_ref().and_then(|conn| conn.auth.clone())) 142 | } else { 143 | Ok(options.auth.clone()) 144 | } 145 | } // when keyspace is provided as an object 146 | (Some(Either::B(_)), Some(_)) => Err(napi::Error::new( 147 | napi::Status::InvalidArg, 148 | "Options cannot be provided twice", 149 | )), // when keyspace is provided as an object and options is already provided 150 | (None, Some(options)) => { 151 | if options.auth.is_none() { 152 | Ok(self.connection.as_ref().and_then(|conn| conn.auth.clone())) 153 | } else { 154 | Ok(options.auth.clone()) 155 | } 156 | } // when keyspace is not provided and options is provided (shouldn't happen) 157 | (None, None) => Ok(self.connection.as_ref().and_then(|conn| conn.auth.clone())), // when keyspace is not provided and options is not provided 158 | }; 159 | 160 | let ssl = match (&keyspace_or_options, &options) { 161 | (Some(Either::A(_)), Some(options)) => { 162 | if options.ssl.is_none() { 163 | Ok(self.connection.as_ref().and_then(|conn| conn.ssl.clone())) 164 | } else { 165 | Ok(options.ssl.clone()) 166 | } 167 | } 168 | (Some(Either::B(_)), Some(_)) => Err(napi::Error::new( 169 | napi::Status::InvalidArg, 170 | "Options cannot be provided twice", 171 | )), 172 | (Some(Either::B(options)), None) => { 173 | if options.ssl.is_none() { 174 | Ok(self.connection.as_ref().and_then(|conn| conn.ssl.clone())) 175 | } else { 176 | Ok(options.ssl.clone()) 177 | } 178 | } 179 | (None, Some(options)) => { 180 | if options.ssl.is_none() { 181 | Ok(self.connection.as_ref().and_then(|conn| conn.ssl.clone())) 182 | } else { 183 | Ok(options.ssl.clone()) 184 | } 185 | } 186 | (None, None) => Ok(self.connection.as_ref().and_then(|conn| conn.ssl.clone())), 187 | (Some(Either::A(_)), None) => Ok(self.connection.as_ref().and_then(|conn| conn.ssl.clone())), 188 | }; 189 | 190 | if let Some(keyspace) = keyspace.clone()? { 191 | builder = builder.use_keyspace(keyspace, false); 192 | } 193 | 194 | if let Some(auth) = auth? { 195 | builder = builder.user(auth.username, auth.password); 196 | } 197 | 198 | if let Some(ssl) = ssl? { 199 | if ssl.enabled { 200 | let ssl_builder = SslContextBuilder::new(openssl::ssl::SslMethod::tls()); 201 | 202 | if let Err(err) = ssl_builder { 203 | return Err(napi::Error::new( 204 | napi::Status::InvalidArg, 205 | format!("Failed to create SSL context: {}", err), 206 | )); 207 | } 208 | 209 | // Safe to unwrap because we checked for Err above 210 | let mut ssl_builder = ssl_builder.unwrap(); 211 | 212 | if let Some(verify_mode) = ssl.verify_mode { 213 | ssl_builder.set_verify(match verify_mode { 214 | VerifyMode::None => openssl::ssl::SslVerifyMode::NONE, 215 | VerifyMode::Peer => openssl::ssl::SslVerifyMode::PEER, 216 | }); 217 | } else { 218 | ssl_builder.set_verify(openssl::ssl::SslVerifyMode::NONE); 219 | } 220 | 221 | if let Some(private_key_filepath) = ssl.private_key_filepath { 222 | if let Err(err) = ssl_builder.set_private_key_file(private_key_filepath, SslFiletype::PEM) 223 | { 224 | return Err(napi::Error::new( 225 | napi::Status::InvalidArg, 226 | format!("Failed to set private key file: {}", err), 227 | )); 228 | } 229 | } 230 | 231 | if let Some(truststore_filepath) = ssl.truststore_filepath { 232 | if let Err(err) = ssl_builder.set_certificate_chain_file(truststore_filepath) { 233 | return Err(napi::Error::new( 234 | napi::Status::InvalidArg, 235 | format!("Failed to set truststore file: {}", err), 236 | )); 237 | } 238 | } 239 | 240 | if let Some(ca_filepath) = ssl.ca_filepath { 241 | if let Err(err) = ssl_builder.set_ca_file(ca_filepath) { 242 | return Err(napi::Error::new( 243 | napi::Status::InvalidArg, 244 | format!("Failed to set CA file: {}", err), 245 | )); 246 | } 247 | } 248 | 249 | if let Some(auto_await_schema_agreement) = self.auto_await_schema_agreement { 250 | builder = builder.auto_await_schema_agreement(auto_await_schema_agreement); 251 | } 252 | 253 | if let Some(schema_agreement_interval) = self.schema_agreement_interval { 254 | builder = builder.schema_agreement_interval(schema_agreement_interval); 255 | } 256 | 257 | builder = builder.ssl_context(Some(ssl_builder.build())); 258 | } 259 | } 260 | 261 | if let Some(default_execution_profile) = &self.default_execution_profile { 262 | builder = builder.default_execution_profile_handle(default_execution_profile.into_handle()); 263 | } 264 | 265 | if let Some(compression) = self.compression { 266 | builder = builder.compression(compression.into()); 267 | } 268 | 269 | let session = builder.build().await; 270 | 271 | match session { 272 | Ok(session) => Ok(ScyllaSession::new(session)), 273 | Err(err) => Err(napi::Error::from_reason(format!( 274 | "Failed to connect to the database: {} - [{uri}] - Keyspace: {keyspace}", 275 | err, 276 | uri = self.uri, 277 | keyspace = keyspace 278 | .unwrap_or(Some("No keyspace provided".to_string())) 279 | .unwrap_or("No keyspace provided".to_string()) 280 | ))), 281 | } 282 | } 283 | } 284 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | // https://github.com/surrealdb/surrealdb.node/blob/main/src/error.rs 2 | pub fn pipe_error(err: impl std::error::Error) -> napi::Error { 3 | napi::Error::from_reason(err.to_string()) 4 | } 5 | 6 | pub fn pipe_error_from_string(err: impl std::string::ToString) -> napi::Error { 7 | napi::Error::from_reason(err.to_string()) 8 | } 9 | -------------------------------------------------------------------------------- /src/helpers/cql_value_bridge.rs: -------------------------------------------------------------------------------- 1 | use napi::bindgen_prelude::{BigInt, Either14, Either15}; 2 | use scylla::frame::response::result::CqlValue; 3 | 4 | use std::collections::HashMap; 5 | 6 | use crate::types::{ 7 | decimal::Decimal, double::Double, duration::Duration, float::Float, list::List, map::Map, 8 | set::Set, uuid::Uuid, varint::Varint, 9 | }; 10 | 11 | use super::to_cql_value::ToCqlValue; 12 | 13 | macro_rules! define_expected_type { 14 | ($lifetime:lifetime, $($t:ty),+) => { 15 | pub type ParameterNativeTypes<$lifetime> = Either14<$($t),+>; 16 | pub type ParameterWithMapType<$lifetime> = Either15<$($t),+, HashMap>>; 17 | pub type JSQueryParameters<$lifetime> = napi::Result>>>; 18 | }; 19 | } 20 | 21 | define_expected_type!('a, u32, String, &'a Uuid, BigInt, &'a Duration, &'a Decimal, bool, Vec, &'a Float, &'a Varint, &'a List, &'a Set, &'a Map, &'a Double); 22 | 23 | impl<'a> ToCqlValue for ParameterWithMapType<'a> { 24 | fn to_cql_value(&self) -> CqlValue { 25 | match self { 26 | ParameterWithMapType::A(num) => num.to_cql_value(), 27 | ParameterWithMapType::B(str) => str.to_cql_value(), 28 | ParameterWithMapType::C(uuid) => uuid.to_cql_value(), 29 | ParameterWithMapType::D(bigint) => bigint.to_cql_value(), 30 | ParameterWithMapType::E(duration) => duration.to_cql_value(), 31 | ParameterWithMapType::F(decimal) => decimal.to_cql_value(), 32 | ParameterWithMapType::G(bool_val) => bool_val.to_cql_value(), 33 | ParameterWithMapType::H(buffer) => buffer.to_cql_value(), 34 | ParameterWithMapType::I(float) => float.to_cql_value(), 35 | ParameterWithMapType::J(varint) => varint.to_cql_value(), 36 | ParameterWithMapType::K(list) => list.to_cql_value(), 37 | ParameterWithMapType::L(set) => set.to_cql_value(), 38 | ParameterWithMapType::M(map) => map.to_cql_value(), 39 | ParameterWithMapType::N(double) => double.to_cql_value(), 40 | ParameterWithMapType::O(map) => CqlValue::UserDefinedType { 41 | // TODO: think a better way to fill this info here 42 | keyspace: "keyspace".to_string(), 43 | type_name: "type_name".to_string(), 44 | fields: map 45 | .iter() 46 | .map(|(key, value)| (key.clone(), Some(value.to_cql_value()))) 47 | .collect::)>>(), 48 | }, 49 | } 50 | } 51 | } 52 | 53 | impl<'a> ToCqlValue for ParameterNativeTypes<'a> { 54 | fn to_cql_value(&self) -> CqlValue { 55 | match self { 56 | ParameterNativeTypes::A(num) => num.to_cql_value(), 57 | ParameterNativeTypes::B(str) => str.to_cql_value(), 58 | ParameterNativeTypes::C(uuid) => uuid.to_cql_value(), 59 | ParameterNativeTypes::D(bigint) => bigint.to_cql_value(), 60 | ParameterNativeTypes::E(duration) => duration.to_cql_value(), 61 | ParameterNativeTypes::F(decimal) => decimal.to_cql_value(), 62 | ParameterNativeTypes::G(bool_val) => bool_val.to_cql_value(), 63 | ParameterNativeTypes::H(buffer) => buffer.to_cql_value(), 64 | ParameterNativeTypes::J(varint) => varint.to_cql_value(), 65 | ParameterNativeTypes::I(float) => float.to_cql_value(), 66 | ParameterNativeTypes::K(list) => list.to_cql_value(), 67 | ParameterNativeTypes::L(set) => set.to_cql_value(), 68 | ParameterNativeTypes::M(map) => map.to_cql_value(), 69 | ParameterNativeTypes::N(double) => double.to_cql_value(), 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/helpers/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod cql_value_bridge; 2 | pub mod query_parameter; 3 | pub mod query_results; 4 | pub mod to_cql_value; 5 | -------------------------------------------------------------------------------- /src/helpers/query_parameter.rs: -------------------------------------------------------------------------------- 1 | use scylla::serialize::{ 2 | row::{RowSerializationContext, SerializeRow}, 3 | value::SerializeCql, 4 | RowWriter, SerializationError, 5 | }; 6 | 7 | use super::{cql_value_bridge::ParameterWithMapType, to_cql_value::ToCqlValue}; 8 | 9 | #[derive(Debug, Clone)] 10 | pub struct QueryParameter<'a> { 11 | #[allow(clippy::type_complexity)] 12 | pub(crate) parameters: Option>>, 13 | } 14 | 15 | impl<'a> SerializeRow for QueryParameter<'a> { 16 | fn serialize( 17 | &self, 18 | ctx: &RowSerializationContext<'_>, 19 | writer: &mut RowWriter, 20 | ) -> Result<(), SerializationError> { 21 | if let Some(parameters) = &self.parameters { 22 | for (i, parameter) in parameters.iter().enumerate() { 23 | parameter 24 | .to_cql_value() 25 | .serialize(&ctx.columns()[i].typ, writer.make_cell_writer())?; 26 | } 27 | } 28 | Ok(()) 29 | } 30 | 31 | fn is_empty(&self) -> bool { 32 | self.parameters.is_none() || self.parameters.as_ref().unwrap().is_empty() 33 | } 34 | } 35 | 36 | impl<'a> QueryParameter<'a> { 37 | #[allow(clippy::type_complexity)] 38 | pub fn parser(parameters: Option>>) -> Option { 39 | if parameters.is_none() { 40 | return Some(QueryParameter { parameters: None }); 41 | } 42 | 43 | let parameters = parameters.unwrap(); 44 | 45 | let mut params = Vec::with_capacity(parameters.len()); 46 | for parameter in parameters { 47 | params.push(parameter); 48 | } 49 | 50 | Some(QueryParameter { 51 | parameters: Some(params), 52 | }) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/helpers/query_results.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use napi::bindgen_prelude::{BigInt, Either10, Either11, Either9}; 4 | use scylla::frame::response::result::{ColumnType, CqlValue}; 5 | 6 | use crate::types::{decimal::Decimal, duration::Duration, uuid::Uuid}; 7 | pub struct QueryResult { 8 | pub(crate) result: scylla::QueryResult, 9 | } 10 | 11 | macro_rules! define_return_type { 12 | ($($t:ty),+) => { 13 | type BaseTypes = Either9<$($t),+>; 14 | type NativeTypes = Either10<$($t),+, Vec>; 15 | pub type WithMapType = Either11<$($t),+, Vec, HashMap>; 16 | type ReturnType = napi::Result>; 17 | pub type JSQueryResult = napi::Result>>; 18 | }; 19 | } 20 | 21 | define_return_type!( 22 | String, 23 | i64, 24 | f64, 25 | bool, 26 | BigInt, 27 | Uuid, 28 | Duration, 29 | Decimal, 30 | Vec 31 | ); 32 | 33 | impl QueryResult { 34 | pub fn parser(result: scylla::QueryResult) -> JSQueryResult { 35 | if result.result_not_rows().is_ok() || result.rows.is_none() { 36 | return Ok(Default::default()); 37 | } 38 | 39 | let rows = result.rows.unwrap(); 40 | let column_specs = result.col_specs; 41 | 42 | let mut result_json: Vec> = vec![]; 43 | 44 | for row in rows { 45 | let mut row_object: HashMap = HashMap::new(); 46 | 47 | for (i, column) in row.columns.iter().enumerate() { 48 | let column_name = column_specs[i].name.clone(); 49 | let column_value = Self::parse_value(column, &column_specs[i].typ)?; 50 | if let Some(column_value) = column_value { 51 | row_object.insert(column_name, column_value); 52 | } 53 | } 54 | 55 | result_json.push(row_object); 56 | } 57 | 58 | Ok(result_json) 59 | } 60 | 61 | fn parse_value(column: &Option, column_type: &ColumnType) -> ReturnType { 62 | column 63 | .as_ref() 64 | .map(|column| match column_type { 65 | ColumnType::Ascii => Ok(WithMapType::A(column.as_ascii().unwrap().to_string())), 66 | ColumnType::Text => Ok(WithMapType::A(column.as_text().unwrap().to_string())), 67 | ColumnType::Uuid => Ok(WithMapType::F(Uuid { 68 | uuid: column.as_uuid().unwrap(), 69 | })), 70 | ColumnType::BigInt => Ok(WithMapType::E(column.as_bigint().unwrap().into())), 71 | ColumnType::Int => Ok(WithMapType::B(column.as_int().unwrap() as i64)), 72 | ColumnType::Float => Ok(WithMapType::C(column.as_float().unwrap() as f64)), 73 | ColumnType::Double => Ok(WithMapType::C(column.as_double().unwrap())), 74 | ColumnType::Boolean => Ok(WithMapType::D(column.as_boolean().unwrap())), 75 | ColumnType::SmallInt => Ok(WithMapType::B(column.as_smallint().unwrap() as i64)), 76 | ColumnType::TinyInt => Ok(WithMapType::B(column.as_tinyint().unwrap() as i64)), 77 | ColumnType::Date | ColumnType::Timestamp => { 78 | Ok(WithMapType::A(column.as_date().unwrap().to_string())) 79 | } 80 | ColumnType::Inet => Ok(WithMapType::A(column.as_inet().unwrap().to_string())), 81 | ColumnType::Duration => Ok(WithMapType::G(column.as_cql_duration().unwrap().into())), 82 | ColumnType::Decimal => Ok(WithMapType::H( 83 | column.clone().into_cql_decimal().unwrap().into(), 84 | )), 85 | ColumnType::Blob => Ok(WithMapType::I(column.as_blob().unwrap().clone())), 86 | ColumnType::Counter => Ok(WithMapType::B(column.as_counter().unwrap().0)), 87 | ColumnType::Varint => Ok(WithMapType::I( 88 | column 89 | .clone() 90 | .into_cql_varint() 91 | .unwrap() 92 | .as_signed_bytes_be_slice() 93 | .into(), 94 | )), 95 | ColumnType::Time => Ok(WithMapType::B(column.as_time().unwrap().nanosecond() as i64)), 96 | ColumnType::Timeuuid => Ok(WithMapType::F(column.as_timeuuid().unwrap().into())), 97 | ColumnType::Map(key, value) => { 98 | let map = column 99 | .as_map() 100 | .unwrap() 101 | .iter() 102 | .map(|(k, v)| { 103 | let key = Self::parse_value(&Some(k.clone()), key).unwrap(); 104 | let value = 105 | Self::remove_map_from_type(Self::parse_value(&Some(v.clone()), value).unwrap())? 106 | .unwrap(); 107 | key 108 | .map(|key| match key { 109 | WithMapType::A(key) => Ok((key, value)), 110 | _ => Err(napi::Error::new( 111 | napi::Status::GenericFailure, 112 | "Map key must be a string", 113 | )), 114 | }) 115 | .transpose() 116 | }) 117 | .collect::>>>(); 118 | 119 | Ok(WithMapType::K(map?.unwrap())) 120 | } 121 | ColumnType::UserDefinedType { field_types, .. } => Ok(WithMapType::K(Self::parse_udt( 122 | column.as_udt().unwrap(), 123 | field_types, 124 | )?)), 125 | ColumnType::List(list_type) => Ok(WithMapType::J(Self::extract_base_types( 126 | column 127 | .as_list() 128 | .unwrap() 129 | .iter() 130 | .map(|e| Self::parse_value(&Some(e.clone()), list_type)) 131 | .collect::>(), 132 | )?)), 133 | ColumnType::Set(set_type) => Ok(WithMapType::J(Self::extract_base_types( 134 | column 135 | .as_set() 136 | .unwrap() 137 | .iter() 138 | .map(|e| Self::parse_value(&Some(e.clone()), set_type)) 139 | .collect::>(), 140 | )?)), 141 | ColumnType::Custom(_) => Ok(WithMapType::A( 142 | "ColumnType Custom not supported yet".to_string(), 143 | )), 144 | ColumnType::Tuple(_) => Ok(WithMapType::A( 145 | "ColumnType Tuple not supported yet".to_string(), 146 | )), 147 | }) 148 | .transpose() 149 | } 150 | 151 | fn parse_udt( 152 | udt: &[(String, Option)], 153 | field_types: &[(String, ColumnType)], 154 | ) -> napi::Result> { 155 | let mut result: HashMap = HashMap::new(); 156 | 157 | for (i, (field_name, field_value)) in udt.iter().enumerate() { 158 | let field_type = &field_types[i].1; 159 | let parsed_value = Self::parse_value(field_value, field_type); 160 | if let Some(parsed_value) = Self::remove_map_from_type(parsed_value?)? { 161 | result.insert(field_name.clone(), parsed_value); 162 | } 163 | } 164 | 165 | Ok(result) 166 | } 167 | 168 | fn remove_map_from_type(a: Option) -> napi::Result> { 169 | a.map(|f| match f { 170 | WithMapType::A(a) => Ok(NativeTypes::A(a)), 171 | WithMapType::B(a) => Ok(NativeTypes::B(a)), 172 | WithMapType::C(a) => Ok(NativeTypes::C(a)), 173 | WithMapType::D(a) => Ok(NativeTypes::D(a)), 174 | WithMapType::E(a) => Ok(NativeTypes::E(a)), 175 | WithMapType::F(a) => Ok(NativeTypes::F(a)), 176 | WithMapType::G(a) => Ok(NativeTypes::G(a)), 177 | WithMapType::H(a) => Ok(NativeTypes::H(a)), 178 | WithMapType::I(a) => Ok(NativeTypes::I(a)), 179 | WithMapType::J(a) => Ok(NativeTypes::J(a)), 180 | WithMapType::K(_) => Err(napi::Error::new( 181 | napi::Status::GenericFailure, 182 | "Map type is not supported in this context".to_string(), 183 | )), 184 | }) 185 | .transpose() 186 | } 187 | 188 | fn extract_base_types(return_types: Vec) -> napi::Result> { 189 | return_types 190 | .into_iter() 191 | .filter_map(|return_type| { 192 | return_type.ok().and_then(|opt_with_map_type| { 193 | opt_with_map_type.map(|with_map_type| match with_map_type { 194 | WithMapType::A(a) => Ok(BaseTypes::A(a)), 195 | WithMapType::B(b) => Ok(BaseTypes::B(b)), 196 | WithMapType::C(c) => Ok(BaseTypes::C(c)), 197 | WithMapType::D(d) => Ok(BaseTypes::D(d)), 198 | WithMapType::E(e) => Ok(BaseTypes::E(e)), 199 | WithMapType::F(f) => Ok(BaseTypes::F(f)), 200 | WithMapType::G(g) => Ok(BaseTypes::G(g)), 201 | WithMapType::H(h) => Ok(BaseTypes::H(h)), 202 | WithMapType::I(i) => Ok(BaseTypes::I(i)), 203 | WithMapType::J(_) | WithMapType::K(_) => Err(napi::Error::new( 204 | napi::Status::GenericFailure, 205 | "Nested collections or maps are not supported".to_string(), 206 | )), 207 | }) 208 | }) 209 | }) 210 | .collect::>>() 211 | } 212 | } 213 | -------------------------------------------------------------------------------- /src/helpers/to_cql_value.rs: -------------------------------------------------------------------------------- 1 | use napi::bindgen_prelude::BigInt; 2 | use scylla::frame::response::result::CqlValue; 3 | 4 | use crate::types::{ 5 | decimal::Decimal, double::Double, duration::Duration, float::Float, list::List, map::Map, 6 | set::Set, uuid::Uuid, varint::Varint, 7 | }; 8 | 9 | // Trait to abstract the conversion to CqlValue 10 | pub trait ToCqlValue { 11 | fn to_cql_value(&self) -> CqlValue; 12 | } 13 | 14 | // Implement ToCqlValue for various types 15 | impl ToCqlValue for u32 { 16 | fn to_cql_value(&self) -> CqlValue { 17 | CqlValue::Int(*self as i32) 18 | } 19 | } 20 | 21 | impl ToCqlValue for String { 22 | fn to_cql_value(&self) -> CqlValue { 23 | CqlValue::Text(self.clone()) 24 | } 25 | } 26 | 27 | impl ToCqlValue for &Uuid { 28 | fn to_cql_value(&self) -> CqlValue { 29 | CqlValue::Uuid(self.get_inner()) 30 | } 31 | } 32 | 33 | impl ToCqlValue for BigInt { 34 | fn to_cql_value(&self) -> CqlValue { 35 | CqlValue::BigInt(self.get_i64().0) 36 | } 37 | } 38 | 39 | impl ToCqlValue for &Duration { 40 | fn to_cql_value(&self) -> CqlValue { 41 | CqlValue::Duration((**self).into()) 42 | } 43 | } 44 | 45 | impl ToCqlValue for &Decimal { 46 | fn to_cql_value(&self) -> CqlValue { 47 | CqlValue::Decimal((*self).into()) 48 | } 49 | } 50 | 51 | impl ToCqlValue for bool { 52 | fn to_cql_value(&self) -> CqlValue { 53 | CqlValue::Boolean(*self) 54 | } 55 | } 56 | 57 | impl ToCqlValue for &Float { 58 | fn to_cql_value(&self) -> CqlValue { 59 | CqlValue::Float((*self).into()) 60 | } 61 | } 62 | 63 | impl ToCqlValue for &Double { 64 | fn to_cql_value(&self) -> CqlValue { 65 | CqlValue::Double((*self).into()) 66 | } 67 | } 68 | 69 | impl ToCqlValue for &Varint { 70 | fn to_cql_value(&self) -> CqlValue { 71 | CqlValue::Varint((*self).into()) 72 | } 73 | } 74 | 75 | impl ToCqlValue for &List { 76 | fn to_cql_value(&self) -> CqlValue { 77 | CqlValue::List(self.inner.clone()) 78 | } 79 | } 80 | 81 | impl ToCqlValue for &Set { 82 | fn to_cql_value(&self) -> CqlValue { 83 | CqlValue::Set(self.inner.clone()) 84 | } 85 | } 86 | 87 | impl ToCqlValue for &Map { 88 | fn to_cql_value(&self) -> CqlValue { 89 | CqlValue::Map(self.inner.clone()) 90 | } 91 | } 92 | 93 | // Helper function to convert u32 vector to u8 vector 94 | fn u32_vec_to_u8_vec(input: &[u32]) -> Vec { 95 | input.iter().map(|&num| num as u8).collect() 96 | } 97 | 98 | impl ToCqlValue for Vec { 99 | fn to_cql_value(&self) -> CqlValue { 100 | CqlValue::Blob(u32_vec_to_u8_vec(self)) 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | #[macro_use] 4 | extern crate napi_derive; 5 | 6 | pub mod cluster; 7 | pub mod error; 8 | pub mod helpers; 9 | pub mod query; 10 | pub mod session; 11 | pub mod types; 12 | -------------------------------------------------------------------------------- /src/query/batch_statement.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Display; 2 | 3 | use napi::Either; 4 | use scylla::batch::Batch; 5 | 6 | use super::{scylla_prepared_statement::PreparedStatement, scylla_query::Query}; 7 | 8 | /// Batch statements 9 | /// 10 | /// A batch statement allows to execute many data-modifying statements at once. 11 | /// These statements can be simple or prepared. 12 | /// Only INSERT, UPDATE and DELETE statements are allowed. 13 | #[napi(js_name = "BatchStatement")] 14 | pub struct ScyllaBatchStatement { 15 | pub(crate) batch: Batch, 16 | } 17 | 18 | impl Display for ScyllaBatchStatement { 19 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 20 | write!( 21 | f, 22 | "ScyllaBatchStatement: {:?}", 23 | self 24 | .batch 25 | .statements 26 | .iter() 27 | .map(|s| match s { 28 | scylla::batch::BatchStatement::Query(q) => q.contents.clone(), 29 | scylla::batch::BatchStatement::PreparedStatement(p) => p.get_statement().to_string(), 30 | }) 31 | .collect::>() 32 | ) 33 | } 34 | } 35 | 36 | #[napi] 37 | impl ScyllaBatchStatement { 38 | #[napi(constructor)] 39 | pub fn new() -> Self { 40 | Self { 41 | batch: Default::default(), 42 | } 43 | } 44 | 45 | /// Appends a statement to the batch. 46 | /// 47 | /// _Warning_ 48 | /// Using simple statements with bind markers in batches is strongly discouraged. For each simple statement with a non-empty list of values in the batch, the driver will send a prepare request, and it will be done sequentially. Results of preparation are not cached between `session.batch` calls. Consider preparing the statements before putting them into the batch. 49 | #[napi] 50 | pub fn append_statement(&mut self, statement: Either<&Query, &PreparedStatement>) { 51 | match statement { 52 | Either::A(simple_query) => self.batch.append_statement(simple_query.query.clone()), 53 | Either::B(prepared_statement) => self 54 | .batch 55 | .append_statement(prepared_statement.prepared.clone()), 56 | } 57 | } 58 | } 59 | 60 | impl Default for ScyllaBatchStatement { 61 | fn default() -> Self { 62 | Self::new() 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /src/query/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod batch_statement; 2 | pub mod scylla_prepared_statement; 3 | pub mod scylla_query; 4 | -------------------------------------------------------------------------------- /src/query/scylla_prepared_statement.rs: -------------------------------------------------------------------------------- 1 | use scylla::prepared_statement; 2 | 3 | use crate::cluster::execution_profile::{ 4 | consistency::Consistency, serial_consistency::SerialConsistency, 5 | }; 6 | 7 | #[napi] 8 | pub struct PreparedStatement { 9 | pub(crate) prepared: prepared_statement::PreparedStatement, 10 | } 11 | 12 | #[napi] 13 | impl PreparedStatement { 14 | pub fn new(prepared: prepared_statement::PreparedStatement) -> Self { 15 | Self { prepared } 16 | } 17 | 18 | #[napi] 19 | pub fn set_consistency(&mut self, consistency: Consistency) { 20 | self.prepared.set_consistency(consistency.into()); 21 | } 22 | 23 | #[napi] 24 | pub fn set_serial_consistency(&mut self, serial_consistency: SerialConsistency) { 25 | self 26 | .prepared 27 | .set_serial_consistency(Some(serial_consistency.into())); 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /src/query/scylla_query.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Display; 2 | 3 | use crate::cluster::execution_profile::{ 4 | consistency::Consistency, serial_consistency::SerialConsistency, 5 | }; 6 | use scylla::query; 7 | 8 | #[napi] 9 | pub struct Query { 10 | pub(crate) query: query::Query, 11 | } 12 | 13 | impl Display for Query { 14 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 15 | write!(f, "ScyllaQuery: {}", self.query.contents) 16 | } 17 | } 18 | 19 | #[napi] 20 | impl Query { 21 | #[napi(constructor)] 22 | pub fn new(query: String) -> Self { 23 | Self { 24 | query: query::Query::new(query), 25 | } 26 | } 27 | 28 | #[napi] 29 | pub fn set_consistency(&mut self, consistency: Consistency) { 30 | self.query.set_consistency(consistency.into()); 31 | } 32 | 33 | #[napi] 34 | pub fn set_serial_consistency(&mut self, serial_consistency: SerialConsistency) { 35 | self 36 | .query 37 | .set_serial_consistency(Some(serial_consistency.into())); 38 | } 39 | 40 | #[napi] 41 | pub fn set_page_size(&mut self, page_size: i32) { 42 | self.query.set_page_size(page_size); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /src/session/metrics.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::error::pipe_error_from_string; 4 | 5 | #[napi] 6 | pub struct Metrics { 7 | metrics: Arc, 8 | } 9 | 10 | #[napi] 11 | impl Metrics { 12 | pub fn new(metrics: Arc) -> Self { 13 | Self { metrics } 14 | } 15 | 16 | /// Returns counter for nonpaged queries 17 | #[napi] 18 | pub fn get_queries_num(&self) -> u64 { 19 | self.metrics.get_queries_num() 20 | } 21 | 22 | /// Returns counter for pages requested in paged queries 23 | #[napi] 24 | pub fn get_queries_iter_num(&self) -> u64 { 25 | self.metrics.get_queries_iter_num() 26 | } 27 | 28 | /// Returns counter for errors occurred in nonpaged queries 29 | #[napi] 30 | pub fn get_errors_num(&self) -> u64 { 31 | self.metrics.get_errors_num() 32 | } 33 | 34 | /// Returns counter for errors occurred in paged queries 35 | #[napi] 36 | pub fn get_errors_iter_num(&self) -> u64 { 37 | self.metrics.get_errors_iter_num() 38 | } 39 | 40 | /// Returns average latency in milliseconds 41 | #[napi] 42 | pub fn get_latency_avg_ms(&self) -> napi::Result { 43 | self 44 | .metrics 45 | .get_latency_avg_ms() 46 | .map_err(pipe_error_from_string) 47 | } 48 | 49 | /// Returns latency from histogram for a given percentile 50 | /// 51 | /// # Arguments 52 | /// 53 | /// * `percentile` - float value (0.0 - 100.0), value will be clamped to this range 54 | #[napi] 55 | pub fn get_latency_percentile_ms(&self, percentile: f64) -> napi::Result { 56 | self 57 | .metrics 58 | .get_latency_percentile_ms(percentile.clamp(0.0, 100.0)) 59 | .map_err(pipe_error_from_string) 60 | } 61 | } 62 | -------------------------------------------------------------------------------- /src/session/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod metrics; 2 | pub mod scylla_session; 3 | pub mod topology; 4 | -------------------------------------------------------------------------------- /src/session/scylla_session.rs: -------------------------------------------------------------------------------- 1 | use crate::helpers::cql_value_bridge::ParameterWithMapType; 2 | use crate::helpers::query_parameter::QueryParameter; 3 | use crate::helpers::query_results::{JSQueryResult, QueryResult}; 4 | use crate::query::batch_statement::ScyllaBatchStatement; 5 | use crate::query::scylla_prepared_statement::PreparedStatement; 6 | use crate::query::scylla_query::Query; 7 | use crate::types::tracing::TracingReturn; 8 | use crate::types::uuid::Uuid; 9 | use napi::bindgen_prelude::Either3; 10 | use napi::Either; 11 | use scylla::statement::query::Query as ScyllaQuery; 12 | 13 | use super::metrics; 14 | use super::topology::ScyllaClusterData; 15 | 16 | #[napi(object)] 17 | pub struct QueryOptions { 18 | pub prepare: Option, 19 | } 20 | 21 | #[napi] 22 | pub struct ScyllaSession { 23 | session: scylla::Session, 24 | } 25 | 26 | #[napi] 27 | impl ScyllaSession { 28 | pub fn new(session: scylla::Session) -> Self { 29 | Self { session } 30 | } 31 | 32 | #[napi] 33 | pub fn metrics(&self) -> metrics::Metrics { 34 | metrics::Metrics::new(self.session.get_metrics()) 35 | } 36 | 37 | #[napi] 38 | pub async fn get_cluster_data(&self) -> ScyllaClusterData { 39 | self 40 | .session 41 | .refresh_metadata() 42 | .await 43 | .expect("Failed to refresh metadata"); 44 | 45 | let cluster_data = self.session.get_cluster_data(); 46 | cluster_data.into() 47 | } 48 | 49 | #[napi] 50 | pub async fn execute_with_tracing( 51 | &self, 52 | query: Either3, 53 | parameters: Option>>, 54 | options: Option, 55 | ) -> napi::Result { 56 | let values = QueryParameter::parser(parameters.clone()).ok_or_else(|| { 57 | napi::Error::new( 58 | napi::Status::InvalidArg, 59 | format!( 60 | "Something went wrong with your query parameters. {:?}", 61 | parameters 62 | ), 63 | ) 64 | })?; 65 | 66 | let should_prepare = options.map_or(false, |options| options.prepare.unwrap_or(false)); 67 | 68 | match query { 69 | Either3::A(ref query_str) if should_prepare => { 70 | let mut prepared = self.session.prepare(query_str.clone()).await.map_err(|e| { 71 | napi::Error::new( 72 | napi::Status::InvalidArg, 73 | format!( 74 | "Something went wrong preparing your statement. - [{}]\n{}", 75 | query_str, e 76 | ), 77 | ) 78 | })?; 79 | prepared.set_tracing(true); 80 | self.execute_prepared(&prepared, values, query_str).await 81 | } 82 | Either3::A(query_str) => { 83 | let mut query = ScyllaQuery::new(query_str); 84 | query.set_tracing(true); 85 | self.execute_query(Either::B(query), values).await 86 | } 87 | Either3::B(query_ref) => { 88 | let mut query = query_ref.query.clone(); 89 | query.set_tracing(true); 90 | 91 | self.execute_query(Either::B(query), values).await 92 | } 93 | Either3::C(prepared_ref) => { 94 | let mut prepared = prepared_ref.prepared.clone(); 95 | prepared.set_tracing(true); 96 | 97 | self 98 | .execute_prepared(&prepared, values, prepared_ref.prepared.get_statement()) 99 | .await 100 | } 101 | } 102 | } 103 | 104 | /// Sends a query to the database and receives a response.\ 105 | /// Returns only a single page of results, to receive multiple pages use (TODO: Not implemented yet) 106 | /// 107 | /// This is the easiest way to make a query, but performance is worse than that of prepared queries. 108 | /// 109 | /// It is discouraged to use this method with non-empty values argument. In such case, query first needs to be prepared (on a single connection), so 110 | /// driver will perform 2 round trips instead of 1. Please use `PreparedStatement` object or `{ prepared: true }` option instead. 111 | /// 112 | /// # Notes 113 | /// 114 | /// ## UDT 115 | /// Order of fields in the object must match the order of fields as defined in the UDT. The 116 | /// driver does not check it by itself, so incorrect data will be written if the order is 117 | /// wrong. 118 | #[napi] 119 | pub async fn execute( 120 | &self, 121 | query: Either3, 122 | parameters: Option>>, 123 | options: Option, 124 | ) -> JSQueryResult { 125 | let values = QueryParameter::parser(parameters.clone()).ok_or_else(|| { 126 | napi::Error::new( 127 | napi::Status::InvalidArg, 128 | format!( 129 | "Something went wrong with your query parameters. {:?}", 130 | parameters 131 | ), 132 | ) 133 | })?; 134 | 135 | let should_prepare = options.map_or(false, |options| options.prepare.unwrap_or(false)); 136 | 137 | let result = match query { 138 | Either3::A(ref query_str) if should_prepare => { 139 | let prepared = self.session.prepare(query_str.clone()).await.map_err(|e| { 140 | napi::Error::new( 141 | napi::Status::InvalidArg, 142 | format!( 143 | "Something went wrong preparing your statement. - [{}]\n{}", 144 | query_str, e 145 | ), 146 | ) 147 | })?; 148 | self.execute_prepared(&prepared, values, query_str).await 149 | } 150 | Either3::A(query_str) => self.execute_query(Either::A(query_str), values).await, 151 | Either3::B(query_ref) => { 152 | self 153 | .execute_query(Either::B(query_ref.query.clone()), values) 154 | .await 155 | } 156 | Either3::C(prepared_ref) => { 157 | self 158 | .execute_prepared( 159 | &prepared_ref.prepared, 160 | values, 161 | prepared_ref.prepared.get_statement(), 162 | ) 163 | .await 164 | } 165 | } 166 | .map_err(|e| { 167 | napi::Error::new( 168 | napi::Status::InvalidArg, 169 | format!("Something went wrong with your query. - \n{}", e), // TODO: handle different queries here 170 | ) 171 | })? 172 | .get("result") 173 | .cloned() 174 | .ok_or(napi::Error::new( 175 | napi::Status::InvalidArg, 176 | r#"Something went wrong with your query."#.to_string(), // TODO: handle different queries here 177 | ))?; 178 | 179 | match result { 180 | Either::A(results) => Ok(results), 181 | Either::B(_tracing) => unreachable!(), 182 | } 183 | } 184 | 185 | // Helper method to handle prepared statements 186 | async fn execute_prepared( 187 | &self, 188 | prepared: &scylla::prepared_statement::PreparedStatement, 189 | values: QueryParameter<'_>, 190 | query: &str, 191 | ) -> napi::Result { 192 | let query_result = self.session.execute(prepared, values).await.map_err(|e| { 193 | napi::Error::new( 194 | napi::Status::InvalidArg, 195 | format!( 196 | "Something went wrong with your prepared statement. - [{}]\n{}", 197 | query, e 198 | ), 199 | ) 200 | })?; 201 | 202 | let tracing = if let Some(tracing_id) = query_result.tracing_id { 203 | Some(crate::types::tracing::TracingInfo::from( 204 | self 205 | .session 206 | .get_tracing_info(&tracing_id) 207 | .await 208 | .map_err(|e| { 209 | napi::Error::new( 210 | napi::Status::InvalidArg, 211 | format!( 212 | "Something went wrong with your tracing info. - [{}]\n{}", 213 | query, e 214 | ), 215 | ) 216 | })?, 217 | )) 218 | } else { 219 | None 220 | }; 221 | 222 | let result = QueryResult::parser(query_result)?; 223 | 224 | Ok(TracingReturn::from([ 225 | ("result".to_string(), Either::A(result)), 226 | ("tracing".to_string(), Either::B(tracing.into())), 227 | ])) 228 | } 229 | 230 | // Helper method to handle direct queries 231 | async fn execute_query( 232 | &self, 233 | query: Either, 234 | values: QueryParameter<'_>, 235 | ) -> napi::Result { 236 | let query_result = match &query { 237 | Either::A(query_str) => self.session.query(query_str.clone(), values).await, 238 | Either::B(query_ref) => self.session.query(query_ref.clone(), values).await, 239 | } 240 | .map_err(|e| { 241 | let query_str = match query.clone() { 242 | Either::A(query_str) => query_str, 243 | Either::B(query_ref) => query_ref.contents.clone(), 244 | }; 245 | napi::Error::new( 246 | napi::Status::InvalidArg, 247 | format!( 248 | "Something went wrong with your query. - [{}]\n{}", 249 | query_str, e 250 | ), 251 | ) 252 | })?; 253 | 254 | let tracing_info = if let Some(tracing_id) = query_result.tracing_id { 255 | Some(crate::types::tracing::TracingInfo::from( 256 | self 257 | .session 258 | .get_tracing_info(&tracing_id) 259 | .await 260 | .map_err(|e| { 261 | napi::Error::new( 262 | napi::Status::InvalidArg, 263 | format!( 264 | "Something went wrong with your tracing info. - [{}]\n{}", 265 | match query { 266 | Either::A(query_str) => query_str, 267 | Either::B(query_ref) => query_ref.contents.clone(), 268 | }, 269 | e 270 | ), 271 | ) 272 | })?, 273 | )) 274 | } else { 275 | None 276 | }; 277 | 278 | Ok(TracingReturn::from([ 279 | ( 280 | "result".to_string(), 281 | Either::A(QueryResult::parser(query_result)?), 282 | ), 283 | ("tracing".to_string(), Either::B(tracing_info.into())), 284 | ])) 285 | } 286 | 287 | #[allow(clippy::type_complexity)] 288 | #[napi] 289 | pub async fn query( 290 | &self, 291 | scylla_query: &Query, 292 | parameters: Option>>, 293 | ) -> JSQueryResult { 294 | let values = QueryParameter::parser(parameters.clone()).ok_or(napi::Error::new( 295 | napi::Status::InvalidArg, 296 | format!("Something went wrong with your query parameters. {parameters:?}"), 297 | ))?; 298 | 299 | let query_result = self 300 | .session 301 | .query(scylla_query.query.clone(), values) 302 | .await 303 | .map_err(|e| { 304 | napi::Error::new( 305 | napi::Status::InvalidArg, 306 | format!("Something went wrong with your query. - [{scylla_query}] - {parameters:?}\n{e}"), 307 | ) 308 | })?; 309 | 310 | QueryResult::parser(query_result) 311 | } 312 | 313 | #[napi] 314 | pub async fn prepare(&self, query: String) -> napi::Result { 315 | let prepared = self.session.prepare(query.clone()).await.map_err(|e| { 316 | napi::Error::new( 317 | napi::Status::InvalidArg, 318 | format!("Something went wrong with your prepared statement. - [{query}]\n{e}"), 319 | ) 320 | })?; 321 | 322 | Ok(PreparedStatement::new(prepared)) 323 | } 324 | 325 | /// Perform a batch query\ 326 | /// Batch contains many `simple` or `prepared` queries which are executed at once\ 327 | /// Batch doesn't return any rows 328 | /// 329 | /// Batch values must contain values for each of the queries 330 | /// 331 | /// See [the book](https://rust-driver.docs.scylladb.com/stable/queries/batch.html) for more information 332 | /// 333 | /// # Arguments 334 | /// * `batch` - Batch to be performed 335 | /// * `values` - List of values for each query, it's the easiest to use an array of arrays 336 | /// 337 | /// # Example 338 | /// ```javascript 339 | /// const nodes = process.env.CLUSTER_NODES?.split(",") ?? ["127.0.0.1:9042"]; 340 | /// 341 | /// const cluster = new Cluster({ nodes }); 342 | /// const session = await cluster.connect(); 343 | /// 344 | /// const batch = new BatchStatement(); 345 | /// 346 | /// await session.execute("CREATE KEYSPACE IF NOT EXISTS batch_statements WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); 347 | /// await session.useKeyspace("batch_statements"); 348 | /// await session.execute("CREATE TABLE IF NOT EXISTS users (id UUID PRIMARY KEY, name TEXT)"); 349 | /// 350 | /// const simpleStatement = new Query("INSERT INTO users (id, name) VALUES (?, ?)"); 351 | /// const preparedStatement = await session.prepare("INSERT INTO users (id, name) VALUES (?, ?)"); 352 | /// 353 | /// batch.appendStatement(simpleStatement); 354 | /// batch.appendStatement(preparedStatement); 355 | /// 356 | /// await session.batch(batch, [[Uuid.randomV4(), "Alice"], [Uuid.randomV4(), "Bob"]]); 357 | /// 358 | /// console.log(await session.execute("SELECT * FROM users")); 359 | /// ``` 360 | #[napi] 361 | #[allow(clippy::type_complexity)] 362 | pub async fn batch( 363 | &self, 364 | batch: &ScyllaBatchStatement, 365 | parameters: Vec>>>, 366 | ) -> JSQueryResult { 367 | let values = parameters 368 | .iter() 369 | .map(|params| { 370 | QueryParameter::parser(params.clone()).ok_or(napi::Error::new( 371 | napi::Status::InvalidArg, 372 | format!("Something went wrong with your batch parameters. {parameters:?}"), 373 | )) 374 | }) 375 | .collect::>>()?; 376 | 377 | let query_result = self 378 | .session 379 | .batch(&batch.batch, values) 380 | .await 381 | .map_err(|e| { 382 | napi::Error::new( 383 | napi::Status::InvalidArg, 384 | format!("Something went wrong with your batch. - [{batch}] - {parameters:?}\n{e}"), 385 | ) 386 | })?; 387 | 388 | QueryResult::parser(query_result) 389 | } 390 | 391 | /// Sends `USE ` request on all connections\ 392 | /// This allows to write `SELECT * FROM table` instead of `SELECT * FROM keyspace.table`\ 393 | /// 394 | /// Note that even failed `useKeyspace` can change currently used keyspace - the request is sent on all connections and 395 | /// can overwrite previously used keyspace. 396 | /// 397 | /// Call only one `useKeyspace` at a time.\ 398 | /// Trying to do two `useKeyspace` requests simultaneously with different names 399 | /// can end with some connections using one keyspace and the rest using the other. 400 | /// 401 | /// # Arguments 402 | /// 403 | /// * `keyspaceName` - keyspace name to use, 404 | /// keyspace names can have up to 48 alphanumeric characters and contain underscores 405 | /// * `caseSensitive` - if set to true the generated query will put keyspace name in quotes 406 | /// 407 | /// # Errors 408 | /// 409 | /// * `InvalidArg` - if the keyspace name is invalid 410 | /// 411 | /// # Example 412 | /// 413 | /// ```javascript 414 | /// import { Cluster } from "."; 415 | /// 416 | /// const cluster = new Cluster({ 417 | /// nodes: ["127.0.0.1:9042"], 418 | /// }); 419 | /// 420 | /// const session = await cluster.connect(); 421 | /// 422 | /// await session.useKeyspace("system_schema"); 423 | /// 424 | /// const result = await session 425 | /// .execute("SELECT * FROM scylla_tables limit ?", [1]) 426 | /// .catch(console.error); 427 | /// ``` 428 | #[napi] 429 | pub async fn use_keyspace( 430 | &self, 431 | keyspace_name: String, 432 | case_sensitive: Option, 433 | ) -> napi::Result<()> { 434 | self 435 | .session 436 | .use_keyspace(keyspace_name.clone(), case_sensitive.unwrap_or(false)) 437 | .await 438 | .map_err(|e| { 439 | napi::Error::new( 440 | napi::Status::InvalidArg, 441 | format!("Something went wrong with your keyspace. - [{keyspace_name}]\n{e}"), 442 | ) 443 | })?; 444 | 445 | Ok(()) 446 | } 447 | 448 | /// session.awaitSchemaAgreement returns a Promise that can be awaited as long as schema is not in an agreement. 449 | /// However, it won’t wait forever; ClusterConfig defines a timeout that limits the time of waiting. If the timeout elapses, 450 | /// the return value is an error, otherwise it is the schema_version. 451 | /// 452 | /// # Returns 453 | /// 454 | /// * `Promise` - schema_version 455 | /// 456 | /// # Errors 457 | /// * `GenericFailure` - if the timeout elapses 458 | /// 459 | /// # Example 460 | /// ```javascript 461 | /// import { Cluster } from "."; 462 | /// 463 | /// const cluster = new Cluster({ nodes: ["127.0.0.1:9042"] }); 464 | /// const session = await cluster.connect(); 465 | /// 466 | /// const schemaVersion = await session.awaitSchemaAgreement().catch(console.error); 467 | /// console.log(schemaVersion); 468 | /// 469 | /// const isAgreed = await session.checkSchemaAgreement().catch(console.error); 470 | /// console.log(isAgreed); 471 | /// ``` 472 | #[napi] 473 | pub async fn await_schema_agreement(&self) -> napi::Result { 474 | Ok( 475 | self 476 | .session 477 | .await_schema_agreement() 478 | .await 479 | .map_err(|e| { 480 | napi::Error::new( 481 | napi::Status::GenericFailure, 482 | format!("Something went wrong with your schema agreement. - {e}"), 483 | ) 484 | })? 485 | .into(), 486 | ) 487 | } 488 | 489 | #[napi] 490 | pub async fn check_schema_agreement(&self) -> napi::Result { 491 | Ok( 492 | self 493 | .session 494 | .check_schema_agreement() 495 | .await 496 | .map_err(|e| { 497 | napi::Error::new( 498 | napi::Status::GenericFailure, 499 | format!("Something went wrong with your schema agreement. - {e}"), 500 | ) 501 | })? 502 | .is_some(), 503 | ) 504 | } 505 | } 506 | -------------------------------------------------------------------------------- /src/session/topology.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use std::sync::Arc; 3 | 4 | use napi::bindgen_prelude::Either3; 5 | use scylla::transport::topology::{Keyspace, MaterializedView, Strategy, Table}; 6 | use scylla::transport::ClusterData; 7 | 8 | // ============= ClusterData ============= // 9 | #[napi] 10 | pub struct ScyllaClusterData { 11 | inner: Arc, 12 | } 13 | 14 | impl From> for ScyllaClusterData { 15 | fn from(cluster_data: Arc) -> Self { 16 | ScyllaClusterData { 17 | inner: cluster_data, 18 | } 19 | } 20 | } 21 | 22 | #[napi] 23 | impl ScyllaClusterData { 24 | #[napi] 25 | /// Access keyspaces details collected by the driver Driver collects various schema details like 26 | /// tables, partitioners, columns, types. They can be read using this method 27 | pub fn get_keyspace_info(&self) -> Option> { 28 | let keyspaces_info = self.inner.get_keyspace_info(); 29 | 30 | if keyspaces_info.is_empty() { 31 | None 32 | } else { 33 | Some( 34 | keyspaces_info 35 | .iter() 36 | .map(|(k, v)| (k.clone(), ScyllaKeyspace::from((*v).clone()))) 37 | .collect(), 38 | ) 39 | } 40 | } 41 | } 42 | // ======================================= // 43 | 44 | // ============= Keyspace ============= // 45 | #[napi(object)] 46 | #[derive(Clone)] 47 | pub struct ScyllaKeyspace { 48 | pub strategy: ScyllaStrategy, 49 | pub tables: HashMap, 50 | pub views: HashMap, 51 | // pub user_defined_types: HashMap, 52 | } 53 | 54 | impl From for ScyllaKeyspace { 55 | fn from(keyspace: Keyspace) -> Self { 56 | ScyllaKeyspace { 57 | tables: keyspace 58 | .tables 59 | .into_iter() 60 | .map(|(k, v)| (k, ScyllaTable::from(v))) 61 | .collect(), 62 | views: keyspace 63 | .views 64 | .into_iter() 65 | .map(|(k, v)| (k, ScyllaMaterializedView::from(v))) 66 | .collect(), 67 | strategy: keyspace.strategy.into(), 68 | // TODO: Implement ScyllaUserDefinedType 69 | // user_defined_types: keyspace.user_defined_types.into_iter().map(|(k, v)| (k, ScyllaUserDefinedType::from(v))).collect(), 70 | } 71 | } 72 | } 73 | // ======================================= // 74 | 75 | // ============= Strategy ============= // 76 | #[napi(object)] 77 | #[derive(Clone)] 78 | pub struct ScyllaStrategy { 79 | pub kind: String, 80 | pub data: Option>, 81 | } 82 | 83 | #[napi(object)] 84 | #[derive(Clone)] 85 | pub struct SimpleStrategy { 86 | pub replication_factor: u32, 87 | } 88 | 89 | #[napi(object)] 90 | #[derive(Clone)] 91 | pub struct NetworkTopologyStrategy { 92 | pub datacenter_repfactors: HashMap, 93 | } 94 | 95 | #[napi(object)] 96 | #[derive(Clone)] 97 | pub struct Other { 98 | pub name: String, 99 | pub data: HashMap, 100 | } 101 | 102 | impl From for ScyllaStrategy { 103 | fn from(strategy: Strategy) -> Self { 104 | match strategy { 105 | Strategy::SimpleStrategy { replication_factor } => ScyllaStrategy { 106 | kind: "SimpleStrategy".to_string(), 107 | data: Some(Either3::A(SimpleStrategy { 108 | replication_factor: replication_factor as u32, 109 | })), 110 | }, 111 | Strategy::NetworkTopologyStrategy { 112 | datacenter_repfactors, 113 | } => ScyllaStrategy { 114 | kind: "NetworkTopologyStrategy".to_string(), 115 | data: Some(Either3::B(NetworkTopologyStrategy { 116 | datacenter_repfactors: datacenter_repfactors 117 | .into_iter() 118 | .map(|(k, v)| (k, v as i32)) 119 | .collect(), 120 | })), 121 | }, 122 | Strategy::Other { name, data } => ScyllaStrategy { 123 | kind: name.clone(), 124 | data: Some(Either3::C(Other { 125 | name: name.clone(), 126 | data, 127 | })), 128 | }, 129 | Strategy::LocalStrategy => ScyllaStrategy { 130 | kind: "LocalStrategy".to_string(), 131 | data: None, 132 | }, 133 | } 134 | } 135 | } 136 | // ======================================= // 137 | 138 | // ============= Table ============= // 139 | #[napi(object)] 140 | #[derive(Clone)] 141 | pub struct ScyllaTable { 142 | pub columns: Vec, 143 | pub partition_key: Vec, 144 | pub clustering_key: Vec, 145 | pub partitioner: Option, 146 | } 147 | 148 | impl From for ScyllaTable { 149 | fn from(table: Table) -> Self { 150 | ScyllaTable { 151 | columns: table.columns.clone().into_keys().collect::>(), 152 | partition_key: table.partition_key.clone(), 153 | clustering_key: table.clustering_key.clone(), 154 | partitioner: table.partitioner.clone(), 155 | } 156 | } 157 | } 158 | // ======================================= // 159 | 160 | // ============= MaterializedView ============= // 161 | #[napi(object)] 162 | #[derive(Clone)] 163 | pub struct ScyllaMaterializedView { 164 | pub view_metadata: ScyllaTable, 165 | pub base_table_name: String, 166 | } 167 | 168 | impl From for ScyllaMaterializedView { 169 | fn from(view: MaterializedView) -> Self { 170 | ScyllaMaterializedView { 171 | view_metadata: ScyllaTable::from(view.view_metadata), 172 | base_table_name: view.base_table_name, 173 | } 174 | } 175 | } 176 | // ======================================= // 177 | -------------------------------------------------------------------------------- /src/types/decimal.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | use scylla::frame::value::CqlDecimal; 4 | 5 | #[napi] 6 | #[derive(Clone, PartialEq, Eq)] 7 | pub struct Decimal { 8 | int_val: Vec, 9 | scale: i32, 10 | } 11 | 12 | impl From for Decimal { 13 | fn from(value: CqlDecimal) -> Self { 14 | let (int_val, scale) = value.as_signed_be_bytes_slice_and_exponent(); 15 | 16 | Self { 17 | int_val: int_val.into(), 18 | scale, 19 | } 20 | } 21 | } 22 | 23 | impl From<&Decimal> for CqlDecimal { 24 | fn from(value: &Decimal) -> Self { 25 | CqlDecimal::from_signed_be_bytes_slice_and_exponent(value.int_val.as_ref(), value.scale) 26 | } 27 | } 28 | 29 | impl Debug for Decimal { 30 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 31 | f.debug_struct("Decimal") 32 | .field("int_val", &self.int_val) 33 | .field("scale", &self.scale) 34 | .finish() 35 | } 36 | } 37 | 38 | // TODO: implement operations for this wrapper 39 | #[napi] 40 | impl Decimal { 41 | #[napi(constructor)] 42 | pub fn new(int_val: Vec, scale: i32) -> Self { 43 | Self { int_val, scale } 44 | } 45 | 46 | /// Returns the string representation of the Decimal. 47 | // TODO: Check really how this is supposed to be displayed 48 | #[napi] 49 | #[allow(clippy::inherent_to_string)] 50 | pub fn to_string(&self) -> String { 51 | let mut result = String::new(); 52 | for b in &self.int_val { 53 | result.push_str(&format!("{:02x}", b)); 54 | } 55 | result.push('e'); 56 | result.push_str(&self.scale.to_string()); 57 | result 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/types/double.rs: -------------------------------------------------------------------------------- 1 | /// A double precision float number. 2 | /// 3 | /// Due to the nature of numbers in JavaScript, it's hard to distinguish between integers and floats, so this type is used to represent 4 | /// double precision float numbers while any other JS number will be treated as an integer. (This is not the case for BigInts, which are always treated as BigInts). 5 | #[napi] 6 | #[derive(Debug, Copy, Clone, PartialEq)] 7 | pub struct Double { 8 | pub(crate) inner: f64, 9 | } 10 | 11 | impl From for Double { 12 | fn from(inner: f64) -> Self { 13 | Self { inner } 14 | } 15 | } 16 | 17 | impl From<&Double> for f64 { 18 | fn from(float: &Double) -> Self { 19 | float.inner 20 | } 21 | } 22 | 23 | #[napi] 24 | impl Double { 25 | #[napi(constructor)] 26 | pub fn new_float(inner: f64) -> Double { 27 | Double::from(inner) 28 | } 29 | 30 | #[napi] 31 | #[allow(clippy::inherent_to_string)] 32 | pub fn to_string(&self) -> String { 33 | self.inner.to_string() 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/types/duration.rs: -------------------------------------------------------------------------------- 1 | use scylla::frame::value::CqlDuration; 2 | 3 | #[napi] 4 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 5 | pub struct Duration { 6 | pub months: i32, 7 | pub days: i32, 8 | pub nanoseconds: i64, 9 | } 10 | 11 | impl From for Duration { 12 | fn from(value: CqlDuration) -> Self { 13 | Self { 14 | months: value.months, 15 | days: value.days, 16 | nanoseconds: value.nanoseconds, 17 | } 18 | } 19 | } 20 | 21 | impl From for CqlDuration { 22 | fn from(value: Duration) -> Self { 23 | Self { 24 | months: value.months, 25 | days: value.days, 26 | nanoseconds: value.nanoseconds, 27 | } 28 | } 29 | } 30 | 31 | #[napi] 32 | impl Duration { 33 | #[napi(constructor)] 34 | pub fn new(months: i32, days: i32, nanoseconds: i64) -> Self { 35 | Self { 36 | months, 37 | days, 38 | nanoseconds, 39 | } 40 | } 41 | 42 | /// Returns the string representation of the Duration. 43 | // TODO: Check really how this is supposed to be displayed 44 | #[napi] 45 | #[allow(clippy::inherent_to_string)] 46 | pub fn to_string(&self) -> String { 47 | format!( 48 | "{} months, {} days, {} nanoseconds", 49 | self.months, self.days, self.nanoseconds 50 | ) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/types/float.rs: -------------------------------------------------------------------------------- 1 | /// A float number. 2 | /// 3 | /// Due to the nature of numbers in JavaScript, it's hard to distinguish between integers and floats, so this type is used to represent 4 | /// float numbers while any other JS number will be treated as an integer. (This is not the case for BigInts, which are always treated as BigInts). 5 | #[napi] 6 | #[derive(Debug, Copy, Clone, PartialEq)] 7 | pub struct Float { 8 | pub(crate) inner: f32, 9 | } 10 | 11 | impl From for Float { 12 | fn from(inner: f32) -> Self { 13 | Self { inner } 14 | } 15 | } 16 | 17 | impl From<&Float> for f32 { 18 | fn from(float: &Float) -> Self { 19 | float.inner 20 | } 21 | } 22 | 23 | #[napi] 24 | impl Float { 25 | #[napi(constructor)] 26 | pub fn new_float(inner: f64) -> Float { 27 | Float::from(inner as f32) 28 | } 29 | 30 | #[napi] 31 | #[allow(clippy::inherent_to_string)] 32 | pub fn to_string(&self) -> String { 33 | self.inner.to_string() 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /src/types/list.rs: -------------------------------------------------------------------------------- 1 | use scylla::frame::response::result::CqlValue; 2 | 3 | use crate::helpers::{cql_value_bridge::ParameterWithMapType, to_cql_value::ToCqlValue}; 4 | 5 | /// A list of any CqlType 6 | #[napi] 7 | #[derive(Debug, Clone, PartialEq)] 8 | pub struct List { 9 | pub(crate) inner: Vec, 10 | } 11 | 12 | impl From> for List { 13 | fn from(inner: Vec) -> Self { 14 | Self { inner } 15 | } 16 | } 17 | 18 | impl From for Vec { 19 | fn from(list: List) -> Self { 20 | list.inner 21 | } 22 | } 23 | 24 | impl From<&List> for Vec { 25 | fn from(list: &List) -> Self { 26 | list.inner.clone() 27 | } 28 | } 29 | 30 | #[napi] 31 | impl List { 32 | #[napi(constructor, ts_args_type = "values: T[]")] 33 | pub fn new_list(values: Vec) -> List { 34 | let inner = values.into_iter().map(|v| v.to_cql_value()).collect(); 35 | List { inner } 36 | } 37 | 38 | #[napi] 39 | #[allow(clippy::inherent_to_string)] 40 | pub fn to_string(&self) -> String { 41 | format!("{:?}", self.inner) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/types/map.rs: -------------------------------------------------------------------------------- 1 | use scylla::frame::response::result::CqlValue; 2 | 3 | use crate::helpers::{cql_value_bridge::ParameterWithMapType, to_cql_value::ToCqlValue}; 4 | 5 | /// A map of any CqlType to any CqlType 6 | #[napi] 7 | #[derive(Debug, Clone, PartialEq)] 8 | pub struct Map { 9 | pub(crate) inner: Vec<(CqlValue, CqlValue)>, 10 | } 11 | 12 | impl From> for Map { 13 | fn from(inner: Vec<(CqlValue, CqlValue)>) -> Self { 14 | Self { inner } 15 | } 16 | } 17 | 18 | impl From for Vec<(CqlValue, CqlValue)> { 19 | fn from(map: Map) -> Self { 20 | map.inner 21 | } 22 | } 23 | 24 | impl From<&Map> for Vec<(CqlValue, CqlValue)> { 25 | fn from(map: &Map) -> Self { 26 | map.inner.clone() 27 | } 28 | } 29 | 30 | #[napi] 31 | impl Map { 32 | #[napi(constructor, ts_args_type = "values: Array>")] 33 | pub fn new_map(values: Vec>) -> Map { 34 | Map { 35 | inner: values 36 | .into_iter() 37 | .map(|v| { 38 | let key = v[0].to_cql_value(); 39 | let value = v[1].to_cql_value(); 40 | (key, value) 41 | }) 42 | .collect(), 43 | } 44 | } 45 | 46 | #[napi] 47 | #[allow(clippy::inherent_to_string)] 48 | pub fn to_string(&self) -> String { 49 | format!("{:?}", self.inner) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/types/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod decimal; 2 | pub mod double; 3 | pub mod duration; 4 | pub mod float; 5 | pub mod list; 6 | pub mod map; 7 | pub mod set; 8 | pub mod tracing; 9 | pub mod uuid; 10 | pub mod varint; 11 | -------------------------------------------------------------------------------- /src/types/set.rs: -------------------------------------------------------------------------------- 1 | use scylla::frame::response::result::CqlValue; 2 | 3 | use crate::helpers::{cql_value_bridge::ParameterWithMapType, to_cql_value::ToCqlValue}; 4 | 5 | /// A list of any CqlType 6 | #[napi] 7 | #[derive(Debug, Clone, PartialEq)] 8 | pub struct Set { 9 | pub(crate) inner: Vec, 10 | } 11 | 12 | impl From> for Set { 13 | fn from(inner: Vec) -> Self { 14 | Self { inner } 15 | } 16 | } 17 | 18 | impl From for Vec { 19 | fn from(list: Set) -> Self { 20 | list.inner 21 | } 22 | } 23 | 24 | impl From<&Set> for Vec { 25 | fn from(list: &Set) -> Self { 26 | list.inner.clone() 27 | } 28 | } 29 | 30 | #[napi] 31 | impl Set { 32 | #[napi(constructor, ts_args_type = "values: T[]")] 33 | pub fn new_set(values: Vec) -> Set { 34 | let inner = values.into_iter().map(|v| v.to_cql_value()).collect(); 35 | Set { inner } 36 | } 37 | 38 | #[napi] 39 | #[allow(clippy::inherent_to_string)] 40 | pub fn to_string(&self) -> String { 41 | format!("{:?}", self.inner) 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/types/tracing.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, net::IpAddr}; 2 | 3 | use napi::Either; 4 | use serde::Serialize; 5 | 6 | use crate::helpers::query_results::WithMapType; 7 | 8 | #[derive(Debug, Clone, PartialEq, Eq)] 9 | pub struct CqlTimestampWrapper(pub scylla::frame::value::CqlTimestamp); 10 | #[derive(Debug, Clone, PartialEq, Eq)] 11 | pub struct CqlTimeuuidWrapper(pub scylla::frame::value::CqlTimeuuid); 12 | 13 | impl Serialize for CqlTimestampWrapper { 14 | fn serialize(&self, serializer: S) -> Result 15 | where 16 | S: serde::Serializer, 17 | { 18 | serializer.serialize_i64(self.0 .0) 19 | } 20 | } 21 | 22 | impl Serialize for CqlTimeuuidWrapper { 23 | fn serialize(&self, serializer: S) -> Result 24 | where 25 | S: serde::Serializer, 26 | { 27 | serializer.serialize_str(format!("{}", self.0).as_str()) 28 | } 29 | } 30 | 31 | /// Tracing info retrieved from `system_traces.sessions` 32 | /// with all events from `system_traces.events` 33 | #[derive(Debug, Clone, PartialEq, Eq, Serialize)] 34 | pub struct TracingInfo { 35 | pub client: Option, 36 | pub command: Option, 37 | pub coordinator: Option, 38 | pub duration: Option, 39 | pub parameters: Option>, 40 | pub request: Option, 41 | /// started_at is a timestamp - time since unix epoch 42 | pub started_at: Option, 43 | 44 | pub events: Vec, 45 | } 46 | 47 | /// A single event happening during a traced query 48 | #[derive(Debug, Clone, PartialEq, Eq, Serialize)] 49 | pub struct TracingEvent { 50 | pub event_id: CqlTimeuuidWrapper, 51 | pub activity: Option, 52 | pub source: Option, 53 | pub source_elapsed: Option, 54 | pub thread: Option, 55 | } 56 | 57 | impl From for serde_json::Value { 58 | fn from(info: TracingInfo) -> Self { 59 | serde_json::json!(info) 60 | } 61 | } 62 | 63 | impl From for TracingInfo { 64 | fn from(info: scylla::tracing::TracingInfo) -> Self { 65 | Self { 66 | client: info.client, 67 | command: info.command, 68 | coordinator: info.coordinator, 69 | duration: info.duration, 70 | parameters: info.parameters, 71 | request: info.request, 72 | started_at: info.started_at.map(CqlTimestampWrapper), 73 | events: info.events.into_iter().map(TracingEvent::from).collect(), 74 | } 75 | } 76 | } 77 | 78 | impl From for TracingEvent { 79 | fn from(event: scylla::tracing::TracingEvent) -> Self { 80 | Self { 81 | event_id: CqlTimeuuidWrapper(event.event_id), 82 | activity: event.activity, 83 | source: event.source, 84 | source_elapsed: event.source_elapsed, 85 | thread: event.thread, 86 | } 87 | } 88 | } 89 | 90 | pub type TracingReturn = 91 | HashMap>, serde_json::Value>>; 92 | -------------------------------------------------------------------------------- /src/types/uuid.rs: -------------------------------------------------------------------------------- 1 | use napi::Result; 2 | use scylla::frame::value::CqlTimeuuid; 3 | 4 | #[napi] 5 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 6 | pub struct Uuid { 7 | pub(crate) uuid: uuid::Uuid, 8 | } 9 | 10 | impl From for Uuid { 11 | fn from(uuid: uuid::Uuid) -> Self { 12 | Self { uuid } 13 | } 14 | } 15 | 16 | impl From for uuid::Uuid { 17 | fn from(uuid: Uuid) -> Self { 18 | uuid.uuid 19 | } 20 | } 21 | 22 | impl From for Uuid { 23 | fn from(uuid: CqlTimeuuid) -> Self { 24 | Self { 25 | uuid: *uuid.as_ref(), // NOTE: not sure if this is the best way 26 | } 27 | } 28 | } 29 | 30 | impl Uuid { 31 | pub(crate) fn get_inner(&self) -> uuid::Uuid { 32 | self.uuid 33 | } 34 | } 35 | 36 | #[napi] 37 | impl Uuid { 38 | /// Generates a random UUID v4. 39 | #[napi(js_name = "randomV4")] 40 | pub fn random_v4() -> Self { 41 | Self { 42 | uuid: uuid::Uuid::new_v4(), 43 | } 44 | } 45 | 46 | /// Parses a UUID from a string. It may fail if the string is not a valid UUID. 47 | #[napi] 48 | pub fn from_string(str: String) -> Result { 49 | let uuid = uuid::Uuid::parse_str(&str).map_err(|e| { 50 | napi::Error::new( 51 | napi::Status::GenericFailure, 52 | format!("Failed to parse UUID: {}", e), 53 | ) 54 | })?; 55 | 56 | Ok(Self { uuid }) 57 | } 58 | 59 | /// Returns the string representation of the UUID. 60 | #[napi] 61 | #[allow(clippy::inherent_to_string)] 62 | pub fn to_string(&self) -> String { 63 | self.uuid.to_string() 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/types/varint.rs: -------------------------------------------------------------------------------- 1 | use scylla::frame::value::CqlVarint; 2 | 3 | /// Native CQL `varint` representation. 4 | /// 5 | /// Represented as two's-complement binary in big-endian order. 6 | /// 7 | /// This type is a raw representation in bytes. It's the default 8 | /// implementation of `varint` type - independent of any 9 | /// external crates and crate features. 10 | /// 11 | /// # DB data format 12 | /// Notice that constructors don't perform any normalization 13 | /// on the provided data. This means that underlying bytes may 14 | /// contain leading zeros. 15 | /// 16 | /// Currently, Scylla and Cassandra support non-normalized `varint` values. 17 | /// Bytes provided by the user via constructor are passed to DB as is. 18 | #[napi] 19 | #[derive(Debug, Clone, PartialEq, Eq)] 20 | pub struct Varint { 21 | pub(crate) inner: Vec, 22 | } 23 | 24 | impl From> for Varint { 25 | fn from(inner: Vec) -> Self { 26 | Self { inner } 27 | } 28 | } 29 | 30 | impl From for Vec { 31 | fn from(varint: Varint) -> Self { 32 | varint.inner 33 | } 34 | } 35 | 36 | impl From<&Varint> for CqlVarint { 37 | fn from(varint: &Varint) -> Self { 38 | CqlVarint::from_signed_bytes_be(varint.inner.clone()) 39 | } 40 | } 41 | 42 | #[napi] 43 | impl Varint { 44 | #[napi(constructor)] 45 | pub fn new_varint(inner: Vec) -> Varint { 46 | Varint::from(inner) 47 | } 48 | 49 | #[napi] 50 | #[allow(clippy::inherent_to_string)] 51 | pub fn to_string(&self) -> String { 52 | format!("{:?}", self.inner) 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ESNext", 4 | "strict": true, 5 | "moduleResolution": "node", 6 | "module": "CommonJS", 7 | "noUnusedLocals": true, 8 | "noUnusedParameters": true, 9 | "esModuleInterop": true, 10 | "allowSyntheticDefaultImports": true 11 | }, 12 | "include": ["."], 13 | "exclude": ["node_modules", "benchmark", "__test__"] 14 | } 15 | -------------------------------------------------------------------------------- /wasi-worker-browser.mjs: -------------------------------------------------------------------------------- 1 | import { instantiateNapiModuleSync, MessageHandler, WASI } from '@napi-rs/wasm-runtime' 2 | 3 | const handler = new MessageHandler({ 4 | onLoad({ wasmModule, wasmMemory }) { 5 | const wasi = new WASI({ 6 | print: function () { 7 | // eslint-disable-next-line no-console 8 | console.log.apply(console, arguments) 9 | }, 10 | printErr: function() { 11 | // eslint-disable-next-line no-console 12 | console.error.apply(console, arguments) 13 | }, 14 | }) 15 | return instantiateNapiModuleSync(wasmModule, { 16 | childThread: true, 17 | wasi, 18 | overwriteImports(importObject) { 19 | importObject.env = { 20 | ...importObject.env, 21 | ...importObject.napi, 22 | ...importObject.emnapi, 23 | memory: wasmMemory, 24 | } 25 | }, 26 | }) 27 | }, 28 | }) 29 | 30 | globalThis.onmessage = function (e) { 31 | handler.handle(e) 32 | } 33 | -------------------------------------------------------------------------------- /wasi-worker.mjs: -------------------------------------------------------------------------------- 1 | import fs from "node:fs"; 2 | import { createRequire } from "node:module"; 3 | import { parse } from "node:path"; 4 | import { WASI } from "node:wasi"; 5 | import { parentPort, Worker } from "node:worker_threads"; 6 | 7 | const require = createRequire(import.meta.url); 8 | 9 | const { instantiateNapiModuleSync, MessageHandler, getDefaultContext } = require("@napi-rs/wasm-runtime"); 10 | 11 | if (parentPort) { 12 | parentPort.on("message", (data) => { 13 | globalThis.onmessage({ data }); 14 | }); 15 | } 16 | 17 | Object.assign(globalThis, { 18 | self: globalThis, 19 | require, 20 | Worker, 21 | importScripts: function (f) { 22 | ;(0, eval)(fs.readFileSync(f, "utf8") + "//# sourceURL=" + f); 23 | }, 24 | postMessage: function (msg) { 25 | if (parentPort) { 26 | parentPort.postMessage(msg); 27 | } 28 | }, 29 | }); 30 | 31 | const emnapiContext = getDefaultContext(); 32 | 33 | const __rootDir = parse(process.cwd()).root; 34 | 35 | const handler = new MessageHandler({ 36 | onLoad({ wasmModule, wasmMemory }) { 37 | const wasi = new WASI({ 38 | version: 'preview1', 39 | env: process.env, 40 | preopens: { 41 | [__rootDir]: __rootDir, 42 | }, 43 | }); 44 | 45 | return instantiateNapiModuleSync(wasmModule, { 46 | childThread: true, 47 | wasi, 48 | context: emnapiContext, 49 | overwriteImports(importObject) { 50 | importObject.env = { 51 | ...importObject.env, 52 | ...importObject.napi, 53 | ...importObject.emnapi, 54 | memory: wasmMemory 55 | }; 56 | }, 57 | }); 58 | }, 59 | }); 60 | 61 | globalThis.onmessage = function (e) { 62 | handler.handle(e); 63 | }; 64 | --------------------------------------------------------------------------------