├── .github
└── workflows
│ ├── ci.yml
│ └── release.yml
├── .gitignore
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Cargo.lock
├── Cargo.toml
├── LICENSE
├── README.md
├── crates
├── base
│ ├── Cargo.toml
│ ├── build.rs
│ ├── src
│ │ ├── context
│ │ │ ├── context.rs
│ │ │ └── mod.rs
│ │ ├── error.rs
│ │ ├── helpers
│ │ │ └── mod.rs
│ │ ├── lib.rs
│ │ ├── manager
│ │ │ ├── mod.rs
│ │ │ ├── task.rs
│ │ │ ├── task_duration.rs
│ │ │ └── tasks
│ │ │ │ ├── mod.rs
│ │ │ │ └── reconcile_task.rs
│ │ ├── pool
│ │ │ ├── mod.rs
│ │ │ └── pool_provider.rs
│ │ ├── runner.rs
│ │ ├── runtime.rs
│ │ ├── snapshot.rs
│ │ └── thread
│ │ │ └── mod.rs
│ └── test_cases
│ │ └── default-db
│ │ ├── CustomSchemaJS.toml
│ │ ├── SchemaJS.toml
│ │ └── public
│ │ └── tables
│ │ ├── products.ts
│ │ └── users.ts
├── cli
│ ├── Cargo.toml
│ └── src
│ │ ├── cmd
│ │ ├── init.rs
│ │ ├── mod.rs
│ │ ├── repl.rs
│ │ └── start.rs
│ │ ├── flags.rs
│ │ └── main.rs
├── config
│ ├── Cargo.toml
│ └── src
│ │ ├── default_config_values.rs
│ │ └── lib.rs
├── core
│ ├── Cargo.toml
│ └── src
│ │ ├── js
│ │ ├── bootstrap.ts
│ │ ├── fieldUtils.ts
│ │ └── global.ts
│ │ ├── lib.rs
│ │ └── transpiler.rs
├── data
│ ├── Cargo.toml
│ ├── src
│ │ ├── commit_log
│ │ │ ├── collection.rs
│ │ │ ├── error.rs
│ │ │ ├── iterator.rs
│ │ │ ├── mod.rs
│ │ │ ├── operations.rs
│ │ │ └── reconciliation_file.rs
│ │ ├── cursor
│ │ │ ├── error.rs
│ │ │ └── mod.rs
│ │ ├── data_handler.rs
│ │ ├── errors.rs
│ │ ├── fdm
│ │ │ ├── file_descriptor.rs
│ │ │ └── mod.rs
│ │ ├── lib.rs
│ │ ├── shard
│ │ │ ├── insert_item.rs
│ │ │ ├── item_type.rs
│ │ │ ├── map_shard.rs
│ │ │ ├── mod.rs
│ │ │ └── shards
│ │ │ │ ├── data_shard
│ │ │ │ ├── config.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── shard.rs
│ │ │ │ └── shard_header.rs
│ │ │ │ ├── kv
│ │ │ │ ├── config.rs
│ │ │ │ ├── mod.rs
│ │ │ │ ├── shard.rs
│ │ │ │ ├── shard_header.rs
│ │ │ │ └── util.rs
│ │ │ │ └── mod.rs
│ │ ├── temp_offset_types.rs
│ │ └── utils
│ │ │ ├── fs.rs
│ │ │ ├── hash.rs
│ │ │ └── mod.rs
│ └── test_cases
│ │ ├── .gitignore
│ │ └── fake-db-folder
│ │ ├── fake-empty-table
│ │ └── data_c222a11d-c80f-4d6e-8c8a-7b83f79f9ef2_0.data
│ │ └── fake-partial-folder
│ │ ├── data_38af2223-d339-4f45-994e-eef41a69fcaa_2.data
│ │ ├── data_9e9d8ad5-6f76-4720-85de-4ca2497a0231_0.data
│ │ └── data_c4cebac4-037c-4af7-9dc3-87829d5f0217_1.data
├── dirs
│ ├── Cargo.toml
│ └── src
│ │ └── lib.rs
├── engine
│ ├── Cargo.toml
│ └── src
│ │ ├── engine.rs
│ │ ├── engine_db.rs
│ │ ├── js
│ │ ├── context.ts
│ │ ├── ops.ts
│ │ └── query.ts
│ │ ├── lib.rs
│ │ ├── ops
│ │ ├── insert.rs
│ │ ├── mod.rs
│ │ └── query.rs
│ │ ├── query_error.rs
│ │ ├── utils
│ │ ├── fs.rs
│ │ └── mod.rs
│ │ └── validation_error.rs
├── grpc
│ ├── Cargo.toml
│ ├── build.rs
│ ├── proto
│ │ ├── connection
│ │ │ └── connection.proto
│ │ ├── query
│ │ │ └── query.proto
│ │ └── shared
│ │ │ ├── data_value.proto
│ │ │ └── row.proto
│ └── src
│ │ ├── interceptors
│ │ ├── auth_interceptor.rs
│ │ └── mod.rs
│ │ ├── lib.rs
│ │ ├── server.rs
│ │ ├── services
│ │ ├── connection.rs
│ │ ├── macros.rs
│ │ ├── mod.rs
│ │ ├── query
│ │ │ ├── custom_query.rs
│ │ │ ├── insert.rs
│ │ │ ├── mod.rs
│ │ │ └── query_data.rs
│ │ └── shared.rs
│ │ └── utils
│ │ ├── common.rs
│ │ ├── json.rs
│ │ └── mod.rs
├── helpers
│ ├── Cargo.toml
│ └── src
│ │ ├── helper.rs
│ │ ├── js
│ │ └── helper.ts
│ │ └── lib.rs
├── index
│ ├── Cargo.toml
│ └── src
│ │ ├── composite_key.rs
│ │ ├── data
│ │ ├── index_data_unit.rs
│ │ ├── index_shard.rs
│ │ └── mod.rs
│ │ ├── errors.rs
│ │ ├── implementations
│ │ ├── hash
│ │ │ ├── hash_index.rs
│ │ │ ├── hash_index_header.rs
│ │ │ └── mod.rs
│ │ └── mod.rs
│ │ ├── index_keys.rs
│ │ ├── index_type.rs
│ │ ├── keys
│ │ ├── index_key_sha256.rs
│ │ ├── mod.rs
│ │ └── string_index.rs
│ │ ├── lib.rs
│ │ ├── types.rs
│ │ ├── utils
│ │ └── mod.rs
│ │ └── vals
│ │ ├── mod.rs
│ │ └── raw_value.rs
├── internal
│ ├── Cargo.toml
│ └── src
│ │ ├── auth
│ │ ├── auth_manager.rs
│ │ ├── mod.rs
│ │ └── types.rs
│ │ ├── lib.rs
│ │ ├── manager.rs
│ │ └── users
│ │ ├── mod.rs
│ │ ├── roles.rs
│ │ └── user.rs
├── module_loader
│ ├── Cargo.toml
│ └── src
│ │ ├── internal
│ │ ├── auth_tokens.rs
│ │ ├── cache_setting.rs
│ │ ├── file_fetcher.rs
│ │ ├── http_util.rs
│ │ ├── mod.rs
│ │ └── versions.rs
│ │ ├── lib.rs
│ │ └── ts_module_loader.rs
├── primitives
│ ├── Cargo.toml
│ └── src
│ │ ├── collection.rs
│ │ ├── column
│ │ ├── mod.rs
│ │ └── types.rs
│ │ ├── database.rs
│ │ ├── index
│ │ └── mod.rs
│ │ ├── js
│ │ ├── column.ts
│ │ ├── dataTypes.ts
│ │ ├── index.ts
│ │ └── table.ts
│ │ ├── lib.rs
│ │ └── table
│ │ ├── metadata.rs
│ │ └── mod.rs
├── query
│ ├── Cargo.toml
│ └── src
│ │ ├── errors.rs
│ │ ├── lib.rs
│ │ ├── managers
│ │ ├── mod.rs
│ │ ├── query_result.rs
│ │ └── single
│ │ │ ├── mod.rs
│ │ │ ├── table_commit_log_collection.rs
│ │ │ └── table_shard.rs
│ │ ├── ops
│ │ ├── mod.rs
│ │ └── query_ops.rs
│ │ ├── row.rs
│ │ ├── row_json.rs
│ │ └── search
│ │ ├── mod.rs
│ │ └── search_manager.rs
├── repl
│ ├── Cargo.toml
│ └── src
│ │ ├── errors.rs
│ │ ├── js
│ │ └── repl.ts
│ │ ├── lib.rs
│ │ └── query_state.rs
└── workers
│ ├── Cargo.toml
│ └── src
│ ├── context.rs
│ └── lib.rs
├── docs
└── logo.png
├── install.ps1
├── install.sh
├── scripts
├── fmt.sh
├── release.sh
└── test.sh
└── style.md
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | pull_request:
5 | push:
6 | branches:
7 | - main
8 | workflow_dispatch:
9 |
10 | env:
11 | CARGO_INCREMENTAL: 0
12 | CARGO_NET_RETRY: 10
13 | CARGO_TERM_COLOR: always
14 | RUSTUP_MAX_RETRIES: 10
15 |
16 | jobs:
17 | cargo-fmt:
18 | name: "cargo fmt"
19 | runs-on: ubuntu-latest
20 | steps:
21 | - uses: actions/checkout@v4
22 | - run: rustup show
23 | - run: ./scripts/fmt.sh
24 |
25 | cargo-test:
26 | name: "cargo test"
27 | runs-on: ${{ matrix.os }}
28 | strategy:
29 | matrix:
30 | include:
31 | - os: ubuntu-latest
32 | - os: windows-latest
33 |
34 | steps:
35 | - name: Remove unwanted software
36 | if: matrix.os == 'ubuntu-latest'
37 | run: |
38 | sudo rm -rf /usr/share/dotnet
39 | sudo rm -rf /usr/local/lib/android
40 | sudo rm -rf /opt/ghc
41 | sudo rm -rf /opt/hostedtoolcache/CodeQL
42 | sudo docker image prune --all --force
43 |
44 | sudo apt-get remove --purge -y man-db
45 | sudo apt-get remove 'clang-13*' 'clang-14*' 'clang-15*' 'llvm-13*' 'llvm-14*' 'llvm-15*' 'lld-13*' 'lld-14*' 'lld-15*'
46 |
47 | - uses: actions/checkout@v4
48 | - run: rustup show
49 | - uses: Swatinem/rust-cache@v2
50 |
51 | - name: Install Protoc
52 | uses: arduino/setup-protoc@v3
53 |
54 | - run: ./scripts/test.sh
55 | if: matrix.os == 'ubuntu-latest'
56 |
57 | - run: cargo test
58 | if: matrix.os == 'windows-latest'
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Publish
2 |
3 | on:
4 | push:
5 | tags:
6 | - '*'
7 |
8 | permissions:
9 | contents: write
10 |
11 | jobs:
12 | build:
13 | name: Publish for ${{ matrix.os }}
14 | runs-on: ${{ matrix.os }}
15 | continue-on-error: true
16 | strategy:
17 | matrix:
18 | include:
19 | - os: ubuntu-latest
20 | target: x86_64-unknown-linux-gnu
21 | cross: false
22 |
23 | - os: windows-latest
24 | target: x86_64-pc-windows-msvc
25 | cross: false
26 |
27 | - os: macos-13
28 | target: x86_64-apple-darwin
29 | cross: false
30 |
31 | - os: macos-latest
32 | target: aarch64-apple-darwin
33 | cross: false
34 |
35 | steps:
36 | - name: Checkout repo
37 | uses: actions/checkout@v3
38 |
39 | - uses: dtolnay/rust-toolchain@stable
40 |
41 | - name: Install Protoc
42 | uses: arduino/setup-protoc@v3
43 |
44 | - name: Build
45 | uses: actions-rs/cargo@v1
46 | with:
47 | command: build
48 | args: --color always --release --locked --target ${{ matrix.target }}
49 | use-cross: ${{ matrix.cross }}
50 |
51 | - name: Get Binary path
52 | id: binary
53 | run: echo "::set-output name=path::target/${{ matrix.target }}/release/schemajs${{ runner.os == 'windows' && '.exe' || '' }}"
54 |
55 | - name: Zip Builds
56 | shell: pwsh
57 | run: Compress-Archive -CompressionLevel Optimal -Force -Path ${{ steps.binary.outputs.path }}, README.md, LICENSE -DestinationPath schemajs-${{ matrix.target }}.zip
58 |
59 | - name: Upload Release Builds
60 | uses: actions/upload-artifact@v4
61 | with:
62 | name: schemajs-${{ matrix.target }}
63 | path: schemajs-${{ matrix.target }}.zip
64 | if-no-files-found: error
65 | retention-days: 1
66 |
67 | publish:
68 | needs: build
69 | runs-on: ubuntu-latest
70 | timeout-minutes: 10
71 |
72 | steps:
73 | - name: Download Builds
74 | uses: actions/download-artifact@v4
75 | with:
76 | path: artifacts
77 | merge-multiple: true
78 |
79 | - run: ls -R artifacts
80 |
81 | - name: Upload binaries to release
82 | uses: svenstaro/upload-release-action@v2
83 | with:
84 | repo_token: ${{ secrets.GITHUB_TOKEN }}
85 | file: artifacts/schemajs-*
86 | tag: ${{ github.ref }}
87 | overwrite: true
88 | file_glob: true
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | ./.idea
2 | ./target
3 | /target
4 | .idea
5 | ./crates/query/test_cases
6 | /crates/query/test_cases
7 | .DS_Store
8 | /**/.DS_Store
9 | /.DS_Store
10 | .DS_Store?
11 | .data
12 | /**/.data
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to making participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, sex characteristics, gender identity and expression,
9 | level of experience, education, socio-economic status, nationality, personal
10 | appearance, race, religion, or sexual identity and orientation.
11 |
12 | ## Our Standards
13 |
14 | Examples of behavior that contributes to creating a positive environment
15 | include:
16 |
17 | - Using welcoming and inclusive language
18 | - Being respectful of differing viewpoints and experiences
19 | - Gracefully accepting constructive criticism
20 | - Focusing on what is best for the community
21 | - Showing empathy towards other community members
22 |
23 | Examples of unacceptable behavior by participants include:
24 |
25 | - The use of sexualized language or imagery and unwelcome sexual attention or
26 | advances
27 | - Trolling, insulting/derogatory comments, and personal or political attacks
28 | - Public or private harassment
29 | - Publishing others' private information, such as a physical or electronic
30 | address, without explicit permission
31 | - Other conduct which could reasonably be considered inappropriate in a
32 | professional setting
33 |
34 | ## Our Responsibilities
35 |
36 | Project maintainers are responsible for clarifying the standards of acceptable
37 | behavior and are expected to take appropriate and fair corrective action in
38 | response to any instances of unacceptable behavior.
39 |
40 | Project maintainers have the right and responsibility to remove, edit, or
41 | reject comments, commits, code, wiki edits, issues, and other contributions
42 | that are not aligned to this Code of Conduct, or to ban temporarily or
43 | permanently any contributor for other behaviors that they deem inappropriate,
44 | threatening, offensive, or harmful.
45 |
46 | ## Scope
47 |
48 | This Code of Conduct applies both within project spaces and in public spaces
49 | when an individual is representing the project or its community. Examples of
50 | representing a project or community include using an official project e-mail
51 | address, posting via an official social media account, or acting as an appointed
52 | representative at an online or offline event. Representation of a project may be
53 | further defined and clarified by project maintainers.
54 |
55 | ## Enforcement
56 |
57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
58 | reported by contacting the project team at andreespirela@outlook.com. All
59 | complaints will be reviewed and investigated and will result in a response that
60 | is deemed necessary and appropriate to the circumstances. The project team is
61 | obligated to maintain confidentiality with regard to the reporter of an incident.
62 | Further details of specific enforcement policies may be posted separately.
63 |
64 | Project maintainers who do not follow or enforce the Code of Conduct in good
65 | faith may face temporary or permanent repercussions as determined by other
66 | members of the project's leadership.
67 |
68 | ## Attribution
69 |
70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
72 |
73 | [homepage]: https://www.contributor-covenant.org
74 |
75 | For answers to common questions about this code of conduct, see
76 | https://www.contributor-covenant.org/faq
77 | Pick a color on this page!Copied!
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | Welcome to SchemaJS! Before you make a contribution, be it a bug report, documentation improvement,
4 | pull request (PR), etc., please read and follow these guidelines.
5 |
6 | ## Start with filing an issue
7 |
8 | More often than not, **start by filing an issue on GitHub**. If you have a bug report or feature
9 | request, open a GitHub issue. Non-trivial PRs will also require a GitHub issue. The issue provides
10 | us with a space to discuss proposed changes with you and the community.
11 |
12 | Having a discussion via GitHub issue upfront is the best way to ensure your contribution lands in
13 | SchemaJS. We don't want you to spend your time making a PR, only to find that we won't accept it on
14 | a design basis. For example, we may find that your proposed feature works better as a third-party
15 | module built on top of or for use with SchemaJS and encourage you to pursue that direction instead.
16 |
17 | **You do not need to file an issue for small fixes.** What counts as a "small" or trivial fix is a
18 | judgment call, so here's a few examples to clarify:
19 | - fixing a typo
20 | - refactoring a bit of code
21 | - most documentation or comment edits
22 |
23 | Still, _sometimes_ we may review your PR and ask you to file an issue if we expect there are larger
24 | design decisions to be made.
25 |
26 | ## Making a PR
27 |
28 | After you've filed an issue, you can make your PR referencing that issue number. Once you open your
29 | PR, it will be labelled _Needs Review_. A maintainer will review your PR as soon as they can. The
30 | reviewer may ask for changes - they will mark the PR as _Changes Requested_ and will give you
31 | details about the requested changes. Feel free to ask lots of questions! The maintainers are there
32 | to help you.
33 |
34 | ### Caveats
35 |
36 | Currently, internal contributions will take priority.
37 |
38 | ### Code of Conduct
39 |
40 | Read our [Code of Conduct](/README.md)
41 |
42 | ## Contact
43 |
44 | If you have any questions, please reach out to [Andres](https://github.com/andreespirela).
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | members = [
3 | "./crates/cli",
4 | "./crates/base",
5 | "./crates/core",
6 | "./crates/primitives",
7 | "./crates/config",
8 | "./crates/workers",
9 | "./crates/engine",
10 | "./crates/module_loader",
11 | "./crates/data",
12 | "./crates/dirs",
13 | "./crates/query",
14 | "./crates/index",
15 | "./crates/internal",
16 | "./crates/grpc",
17 | "./crates/helpers",
18 | "./crates/repl",
19 | ]
20 | resolver = "2"
21 |
22 | [workspace.package]
23 | version = "0.1.1"
24 | authors = ["Andres Pirela", "SchemaJS Authors"]
25 | description = "A flexible, blazingly fast NoSQL database built on V8 with dynamic JavaScript-based schema and query management."
26 | documentation = "https://docs.schemajs.com"
27 | edition = "2021"
28 | license = "MIT"
29 |
30 | [workspace.dependencies]
31 | toml = "0.8.17"
32 | serde = { version = "1.0.204", features = ["derive"] }
33 | deno_ast = "=0.40.0"
34 | deno_core = "=0.299.0"
35 | deno_fetch = "0.188.0"
36 | anyhow = { version = "1.0.57" }
37 | enum-as-inner = "0.6.0"
38 | tokio = { version = "1", features = ["full"] }
39 | walkdir = "2.5.0"
40 | base64 = "0.21.4"
41 | deno_semver = "0.5.7"
42 | eszip = "0.72.2"
43 | import_map = "=0.20.0"
44 | cache_control = "=0.2.0"
45 | chrono = { version = "=0.4.22", default-features = false, features = ["clock"] }
46 | once_cell = { version = "^1.17.1" }
47 | reqwest = "0.12.5"
48 | deno_tls = "=0.150.0"
49 | deno_lockfile = "0.20.0"
50 | deno_fs = "=0.73.0"
51 | deno_graph = "0.80.1"
52 | deno_cache_dir = "=0.10.2"
53 | data-url = { version= "=0.3.0" }
54 | encoding_rs = { version = "=0.8.33" }
55 | deno_web = "=0.194.0"
56 | indexmap = { version = "2.0.0", features = ["serde"] }
57 | thiserror = "1.0.40"
58 | rand = "0.8.5"
59 | fs3 = "0.5.0"
60 | tokio-util = "0.7.4"
61 | percent-encoding = "=2.3.1"
62 | rusqlite = "0.32.1"
63 | glob = "0.3.1"
64 | faster-hex = "0.9.0"
65 | path-clean = "1.0.1"
66 | async-trait = "^0.1.73"
67 | deno_permissions = "=0.23.0"
68 | http = "^1.0.0"
69 | http-body-util = "^0.1.2"
70 | http-body = "1.0.0"
71 | hyper-util = "0.1.6"
72 | tempfile = "3.10.1"
73 | chashmap = "2.2.2"
74 | dirs = "5.0.1"
75 | uuid = { version = "1.10.0", features = ["v4", "serde"] }
76 | serde_json = "1.0.122"
77 | borsh = { version = "1.5.1", features = ["derive", "borsh-derive"] }
78 | memmap2 = "0.9.4"
79 | sha2 = "0.10.8"
80 | ahash = "0.8.11"
81 | flaky_test = "0.2.2"
82 | tonic = "0.12.2"
83 | prost = "0.13.3"
84 | tonic-async-interceptor = "0.12.0"
85 | bcrypt = "0.15.1"
86 | clap = { version = "4.5.18", features = ["cargo", "string", "env", "derive"] }
87 | dashmap = "6.1.0"
88 | tonic-middleware = "0.2.2"
89 | r2d2 = "0.8.10"
90 | scopeguard = "1.2.0"
91 | prost-types = "0.13.3"
92 | paste = "1.0.15"
93 | lru = "0.12.4"
94 | parking_lot = "0.12.3"
95 |
96 | [profile.dind]
97 | inherits = "dev"
98 |
99 | [profile.no-debug-assertions]
100 | inherits = "dev"
101 | debug-assertions = false
102 |
103 | [profile.release]
104 | lto = true
105 |
106 | [env]
107 | RUST_BACKTRACE = "1"
108 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 andreespirela
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | # `SchemaJS`
10 |
11 | ## Introduction
12 |
13 | SchemaJS is a powerful NoSQL database inspired by [Google Datastore](https://cloud.google.com/products/datastore?hl=en) and [Supabase Edge Runtime](https://github.com/supabase/edge-runtime), designed for developers seeking flexibility and performance. Built on top of the [V8 JavaScript engine](https://github.com/denoland/rusty_v8), SchemaJS offers a unique runtime environment where the entire database schema and query logic are defined in JavaScript.
14 |
15 | With SchemaJS, you can manage your data using familiar JavaScript syntax, enabling seamless integration with modern JavaScript-based applications. Its primary goal is to deliver a highly customizable and dynamic query language while maintaining the simplicity and ease of use that JavaScript developers expect.
16 |
17 | ### _Why JavaScript?_
18 |
19 | > JavaScript has become the backbone of modern web development, with many applications built using JavaScript or its ecosystem. By enabling SchemaJS to run in the same language, developers can now unify their front-end, back-end, and database logic using a single language. This minimizes the context switching between different technologies, making development faster and more intuitive.
20 | Leveraging JavaScript for database operations also unlocks a new level of flexibility. What might traditionally require complex SQL procedures with inherent limitations can be transformed into straightforward JavaScript functions.
21 |
22 | ## Special Thanks
23 | - [Bartek Iwańczuk](https://github.com/bartlomieju) - For the incredible support in V8 related questions.
24 | - [Supabase Team](https://github.com/supabase) - For inspirational work taken from the Edge Runtime
25 |
26 | ## Status of the project
27 |
28 | SchemaJS is still very young and until version 1.0.0, its production usage will not be recommended. Any feedback is welcome.
29 |
30 | ## License
31 | This project is licensed under the [MIT License](./LICENSE)
32 |
--------------------------------------------------------------------------------
/crates/base/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "base"
3 | version.workspace = true
4 | edition.workspace = true
5 |
6 | [dependencies]
7 | deno_core.workspace = true
8 | schemajs_primitives = { version = "0.1.0", path = "../primitives" }
9 | schemajs_helpers = { version = "0.1.0", path = "../helpers" }
10 | schemajs_workers = { version = "0.1.0", path = "../workers" }
11 | schemajs_config = { version = "0.1.0", path = "../config" }
12 | schemajs_engine = { version = "0.1.0", path = "../engine" }
13 | schemajs_core = { version = "0.1.0", path = "../core" }
14 | schemajs_module_loader = { version = "0.1.0", path = "../module_loader" }
15 | schemajs_internal = { version = "0.1.0", path = "../internal" }
16 | schemajs_data = { version = "0.1.0", path = "../data" }
17 | schemajs_repl = { version = "0.1.0", path = "../repl" }
18 | serde.workspace = true
19 | anyhow.workspace = true
20 | tokio.workspace = true
21 | walkdir.workspace = true
22 | deno_ast.workspace = true
23 | uuid = { version = "1.10.0", features = ["v4"] }
24 | tokio-util = { workspace = true, features = ["full"] }
25 | r2d2.workspace = true
26 | thiserror.workspace = true
27 | serde_json.workspace = true
28 | once_cell.workspace = true
29 | scopeguard.workspace = true
30 | dashmap.workspace = true
31 | lru.workspace = true
32 | parking_lot.workspace = true
33 |
34 | [dev-dependencies]
35 | schemajs_query = { version = "0.1.0", path = "../query" }
36 | flaky_test.workspace = true
37 |
38 | [build-dependencies]
39 | schemajs_core = { version = "0.1.0", path = "../core" }
40 | schemajs_primitives = { version = "0.1.0", path = "../primitives" }
41 | schemajs_engine = { version = "0.1.0", path = "../engine" }
42 | schemajs_helpers = { version = "0.1.0", path = "../helpers" }
43 | schemajs_repl = { version = "0.1.0", path = "../repl" }
44 | deno_core.workspace = true
--------------------------------------------------------------------------------
/crates/base/build.rs:
--------------------------------------------------------------------------------
1 | use std::env;
2 | use std::path::PathBuf;
3 |
4 | mod schema_js_snapshot {
5 | use deno_core::snapshot::{create_snapshot, CreateSnapshotOptions};
6 | use deno_core::Extension;
7 | use schemajs_core::transpiler::maybe_transpile_source;
8 | use schemajs_engine::engine::SchemeJsEngine;
9 | use std::cell::RefCell;
10 | use std::io::Write;
11 | use std::path::PathBuf;
12 | use std::rc::Rc;
13 | use std::sync::Arc;
14 |
15 | pub fn create_runtime_snapshot(snapshot_path: PathBuf) {
16 | let extensions: Vec = vec![
17 | schemajs_core::sjs_core::init_ops_and_esm(),
18 | schemajs_primitives::sjs_primitives::init_ops_and_esm(),
19 | schemajs_engine::sjs_engine::init_ops_and_esm(),
20 | schemajs_helpers::sjs_helpers::init_ops_and_esm(),
21 | schemajs_repl::sjs_repl::init_ops_and_esm(),
22 | ];
23 | let snapshot = create_snapshot(
24 | CreateSnapshotOptions {
25 | cargo_manifest_dir: env!("CARGO_MANIFEST_DIR"),
26 | startup_snapshot: None,
27 | skip_op_registration: false,
28 | extensions,
29 | extension_transpiler: Some(Rc::new(|specifier, source| {
30 | maybe_transpile_source(specifier, source)
31 | })),
32 | with_runtime_cb: None,
33 | },
34 | None,
35 | );
36 |
37 | let output = snapshot.unwrap();
38 |
39 | let mut snapshot = std::fs::File::create(snapshot_path).unwrap();
40 | snapshot.write_all(&output.output).unwrap();
41 |
42 | for path in output.files_loaded_during_snapshot {
43 | println!("cargo:rerun-if-changed={}", path.display());
44 | }
45 | }
46 | }
47 | fn main() {
48 | println!("cargo:rustc-env=TARGET={}", env::var("TARGET").unwrap());
49 | println!("cargo:rustc-env=PROFILE={}", env::var("PROFILE").unwrap());
50 |
51 | let o = PathBuf::from(env::var_os("OUT_DIR").unwrap());
52 |
53 | // Main snapshot
54 | let runtime_snapshot_path = o.join("RUNTIME_SNAPSHOT.bin");
55 |
56 | schema_js_snapshot::create_runtime_snapshot(runtime_snapshot_path.clone());
57 | }
58 |
--------------------------------------------------------------------------------
/crates/base/src/context/context.rs:
--------------------------------------------------------------------------------
1 | use crate::manager::SchemeJsManager;
2 | use parking_lot::RwLock;
3 | use schemajs_config::SchemeJsConfig;
4 | use schemajs_data::fdm::FileDescriptorManager;
5 | use schemajs_engine::engine::SchemeJsEngine;
6 | use schemajs_helpers::helper::HelperCall;
7 | use schemajs_internal::manager::InternalManager;
8 | use std::path::PathBuf;
9 | use std::sync::atomic::{AtomicBool, Ordering};
10 | use std::sync::Arc;
11 | use tokio::sync::mpsc::Sender;
12 | use uuid::Uuid;
13 |
14 | pub struct SjsContext {
15 | pub config_file: PathBuf,
16 | pub data_path_folder: Option,
17 | pub current_folder: PathBuf,
18 | pub engine: Arc>,
19 | pub internal_manager: Arc,
20 | pub task_manager: Arc>,
21 | pub config: Arc,
22 | pub initialized: AtomicBool,
23 | pub fdm: Arc,
24 | repl: AtomicBool,
25 | }
26 |
27 | impl SjsContext {
28 | pub fn new(
29 | config_path: PathBuf,
30 | data_path: Option,
31 | helper_tx: Sender,
32 | ) -> anyhow::Result {
33 | // Determine the base path by joining the current directory with the config path
34 | let base_path = std::env::current_dir()?.join(&config_path);
35 |
36 | // Determine the appropriate folder path and config file path
37 | let (folder_path, config_file) = if base_path.is_dir() {
38 | (base_path.clone(), base_path.join("SchemaJS.toml"))
39 | } else {
40 | let folder_path = base_path.parent().map_or_else(
41 | || std::env::current_dir(),
42 | |parent| Ok(parent.to_path_buf()),
43 | )?;
44 | (folder_path.clone(), base_path)
45 | };
46 |
47 | let config = Arc::new(SchemeJsConfig::new(config_file.clone())?);
48 | let file_descriptor_manager = Arc::new(FileDescriptorManager::new(
49 | config.process.max_file_descriptors_in_cache,
50 | ));
51 |
52 | let data_path = if cfg!(test) {
53 | let data_folder = folder_path
54 | .clone()
55 | .join(".data")
56 | .join(Uuid::new_v4().to_string());
57 | if !data_folder.exists() {
58 | println!("Using test path");
59 | let _ = std::fs::create_dir_all(&data_folder);
60 | }
61 | Some(data_path.unwrap_or(data_folder))
62 | } else {
63 | data_path
64 | };
65 |
66 | let mut engine = Arc::new(RwLock::new(SchemeJsEngine::new(
67 | data_path.clone(),
68 | config.clone(),
69 | helper_tx,
70 | file_descriptor_manager.clone(),
71 | )));
72 | let mut internal_manager = Arc::new(InternalManager::new(engine.clone()));
73 | let mut manager = Arc::new(RwLock::new(SchemeJsManager::new(engine.clone())));
74 |
75 | Ok(Self {
76 | config_file,
77 | data_path_folder: data_path.clone(),
78 | current_folder: folder_path,
79 | engine,
80 | internal_manager,
81 | task_manager: manager,
82 | config,
83 | initialized: AtomicBool::new(false),
84 | fdm: file_descriptor_manager,
85 | repl: AtomicBool::new(true),
86 | })
87 | }
88 |
89 | pub fn mark_loaded(&self) {
90 | self.initialized.store(true, Ordering::SeqCst);
91 | }
92 |
93 | // Function to check if the struct is loaded
94 | pub fn is_loaded(&self) -> bool {
95 | self.initialized.load(Ordering::SeqCst)
96 | }
97 |
98 | pub fn mark_repl(&self) {
99 | self.repl.store(true, Ordering::SeqCst);
100 | }
101 |
102 | // Function to check if the struct is loaded
103 | pub fn is_repl(&self) -> bool {
104 | self.repl.load(Ordering::SeqCst)
105 | }
106 | }
107 |
--------------------------------------------------------------------------------
/crates/base/src/context/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod context;
2 |
--------------------------------------------------------------------------------
/crates/base/src/error.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 | use thiserror::Error;
3 | #[derive(Debug, Clone, Serialize, Deserialize, Error)]
4 | pub enum SjsRtError {
5 | #[error("Runtime could not be created")]
6 | UnexpectedRuntimeCreation,
7 |
8 | #[error("Runtime is currently being used")]
9 | BusyRuntime,
10 | }
11 |
--------------------------------------------------------------------------------
/crates/base/src/helpers/mod.rs:
--------------------------------------------------------------------------------
1 | use crate::context::context::SjsContext;
2 | use crate::pool::pool_provider::SjsPoolProvider;
3 | use crate::runtime::SchemeJsRuntime;
4 | use crate::thread::WORKER_RT;
5 | use r2d2::Pool;
6 | use schemajs_helpers::helper::HelperCall;
7 | use std::ops::DerefMut;
8 | use std::sync::Arc;
9 | use tokio::sync::mpsc::Receiver;
10 | use tokio::task::JoinHandle;
11 |
12 | pub struct HelpersManager {
13 | pub ctx: Arc,
14 | }
15 |
16 | impl HelpersManager {
17 | pub fn new(
18 | sjs_runtime_pool: Arc>,
19 | rx: Receiver,
20 | ctx: Arc,
21 | ) -> HelpersManager {
22 | let handler_thread = Self::init(sjs_runtime_pool, rx, ctx.clone());
23 |
24 | Self { ctx }
25 | }
26 |
27 | pub fn init(
28 | sjs_runtime_pool: Arc>,
29 | mut rx: Receiver,
30 | ctx: Arc,
31 | ) {
32 | let rt = &WORKER_RT;
33 | rt.spawn_pinned(move || {
34 | tokio::task::spawn_local(async move {
35 | while let Some(cmd) = rx.recv().await {
36 | let permit = SchemeJsRuntime::acquire().await;
37 | match SchemeJsRuntime::new(ctx.clone()).await {
38 | Ok(rt) => {
39 | let lock = rt.acquire_lock(); // TODO: Wait for lock
40 | match lock {
41 | Ok(_) => {
42 | let mut runtime = scopeguard::guard(rt, |mut runtime| unsafe {
43 | runtime.js_runtime.v8_isolate().enter();
44 | runtime.release_lock();
45 | });
46 |
47 | runtime.call_helper(cmd).await;
48 |
49 | unsafe {
50 | runtime.js_runtime.v8_isolate().exit();
51 | }
52 |
53 | drop(permit);
54 | }
55 | Err(_) => {}
56 | }
57 | }
58 | Err(_) => {}
59 | }
60 | }
61 | })
62 | });
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/crates/base/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub mod context;
2 | mod error;
3 | mod helpers;
4 | mod manager;
5 | pub mod pool;
6 | pub mod runner;
7 | pub mod runtime;
8 | pub mod snapshot;
9 | mod thread;
10 |
--------------------------------------------------------------------------------
/crates/base/src/manager/task.rs:
--------------------------------------------------------------------------------
1 | use crate::manager::task_duration::TaskDuration;
2 | use parking_lot::RwLock;
3 | use schemajs_engine::engine::SchemeJsEngine;
4 | use std::fmt::{Debug, Formatter};
5 | use std::sync::Arc;
6 | use tokio_util::sync::CancellationToken;
7 |
8 | pub type TaskSignature =
9 | Box>) -> Result<(), ()> + Send + Sync + 'static>;
10 |
11 | #[derive(Clone)]
12 | pub struct TaskCallback {
13 | pub cb: Arc,
14 | }
15 |
16 | impl Debug for TaskCallback {
17 | fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
18 | write!(f, "Function pointer")
19 | }
20 | }
21 |
22 | #[derive(Clone)]
23 | pub struct Task {
24 | pub id: String,
25 | pub func: TaskCallback,
26 | pub duration: TaskDuration,
27 | pub cancellation_token: Arc,
28 | pub loop_execution: bool,
29 | }
30 |
31 | impl Task {
32 | pub fn new(
33 | id: String,
34 | func: TaskSignature,
35 | task_duration: TaskDuration,
36 | loop_execution: bool,
37 | ) -> Self {
38 | Self {
39 | id,
40 | func: TaskCallback { cb: Arc::new(func) },
41 | duration: task_duration,
42 | cancellation_token: Arc::new(CancellationToken::new()),
43 | loop_execution,
44 | }
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/crates/base/src/manager/task_duration.rs:
--------------------------------------------------------------------------------
1 | use std::time::Duration;
2 |
3 | #[derive(Clone)]
4 | pub enum TaskDuration {
5 | Defined(Duration),
6 | Once,
7 | }
8 |
--------------------------------------------------------------------------------
/crates/base/src/manager/tasks/mod.rs:
--------------------------------------------------------------------------------
1 | use crate::manager::task::Task;
2 | use crate::manager::tasks::reconcile_task::RECONCILE_DB_TASK;
3 |
4 | mod reconcile_task;
5 |
6 | pub fn get_all_internal_tasks() -> Vec {
7 | vec![(*RECONCILE_DB_TASK).clone()]
8 | }
9 |
--------------------------------------------------------------------------------
/crates/base/src/manager/tasks/reconcile_task.rs:
--------------------------------------------------------------------------------
1 | use crate::manager::task::Task;
2 | use crate::manager::task_duration::TaskDuration;
3 | use std::cell::LazyCell;
4 | use std::time::{Duration, Instant};
5 |
6 | pub const RECONCILE_DB_TASK: LazyCell = LazyCell::new(|| {
7 | Task::new(
8 | "1".to_string(),
9 | Box::new(move |rt| {
10 | let engine = rt.write();
11 | for db in engine.databases.iter() {
12 | let query_manager = &db.query_manager;
13 | for table in query_manager.table_names.read().unwrap().iter() {
14 | let table = query_manager.tables.get(table).unwrap();
15 | let _ = table.temps.reconcile();
16 | }
17 | }
18 | Ok(())
19 | }),
20 | TaskDuration::Defined(Duration::from_millis(250)),
21 | true,
22 | )
23 | });
24 |
--------------------------------------------------------------------------------
/crates/base/src/pool/mod.rs:
--------------------------------------------------------------------------------
1 | use crate::context::context::SjsContext;
2 | use crate::pool::pool_provider::SjsPoolProvider;
3 | use r2d2::Pool;
4 | use std::sync::Arc;
5 |
6 | pub mod pool_provider;
7 |
8 | pub struct SjsRuntimePool {
9 | pub pool: Arc>,
10 | }
11 |
12 | impl SjsRuntimePool {
13 | pub fn new(shared_context: Arc, max_runtimes: u32) -> Self {
14 | let provider = SjsPoolProvider { shared_context };
15 |
16 | let a = "";
17 |
18 | let pool = r2d2::Pool::builder()
19 | .max_size(max_runtimes)
20 | .min_idle(Some(0))
21 | .build(provider)
22 | .unwrap();
23 |
24 | let b = "";
25 |
26 | Self {
27 | pool: Arc::new(pool),
28 | }
29 | }
30 | }
31 |
--------------------------------------------------------------------------------
/crates/base/src/pool/pool_provider.rs:
--------------------------------------------------------------------------------
1 | use crate::context::context::SjsContext;
2 | use crate::error::SjsRtError;
3 | use crate::runtime::SchemeJsRuntime;
4 | use std::sync::Arc;
5 |
6 | pub struct SjsPoolProvider {
7 | pub shared_context: Arc,
8 | }
9 |
10 | impl r2d2::ManageConnection for SjsPoolProvider {
11 | type Connection = SchemeJsRuntime;
12 | type Error = SjsRtError;
13 |
14 | fn connect(&self) -> Result {
15 | let ctx = self.shared_context.clone();
16 | let current = tokio::runtime::Runtime::new().unwrap();
17 |
18 | let rt = current
19 | .block_on(async move { SchemeJsRuntime::new(ctx).await })
20 | .map_err(|e| SjsRtError::UnexpectedRuntimeCreation)?;
21 |
22 | Ok(rt)
23 | }
24 |
25 | fn is_valid(&self, _conn: &mut Self::Connection) -> Result<(), Self::Error> {
26 | Ok(())
27 | }
28 |
29 | fn has_broken(&self, conn: &mut Self::Connection) -> bool {
30 | conn.acquire_lock()
31 | .map_err(|_| Self::Error::BusyRuntime)
32 | .is_ok()
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/crates/base/src/runner.rs:
--------------------------------------------------------------------------------
1 | use crate::context::context::SjsContext;
2 | use crate::helpers::HelpersManager;
3 | use crate::pool::SjsRuntimePool;
4 | use schemajs_helpers::create_helper_channel;
5 | use schemajs_helpers::helper::HelperCall;
6 | use std::path::PathBuf;
7 | use std::sync::Arc;
8 | use tokio::sync::mpsc::Sender;
9 |
10 | pub struct SjsRunner {
11 | pub sjs_context: Arc,
12 | pub rt_pool: Arc,
13 | pub helpers_manager: HelpersManager,
14 | pub helper_tx: Sender,
15 | }
16 |
17 | pub struct SjsRunnerConfig {
18 | pub max_helper_processing_capacity: usize,
19 | pub max_runtimes: u32,
20 | pub config_path: PathBuf,
21 | pub data_path: Option,
22 | }
23 |
24 | impl SjsRunner {
25 | pub fn new(config: SjsRunnerConfig) -> Self {
26 | let (helper_tx, helper_rx) = create_helper_channel(config.max_helper_processing_capacity);
27 | let context = Arc::new(
28 | SjsContext::new(config.config_path, config.data_path, helper_tx.clone()).unwrap(),
29 | );
30 | let rt_pool = Arc::new(SjsRuntimePool::new(context.clone(), config.max_runtimes));
31 | let helpers_manager = HelpersManager::new(rt_pool.pool.clone(), helper_rx, context.clone());
32 |
33 | Self {
34 | helper_tx,
35 | sjs_context: context,
36 | rt_pool,
37 | helpers_manager,
38 | }
39 | }
40 | }
41 |
42 | #[cfg(test)]
43 | mod runner_tests {
44 | use crate::runner::{SjsRunner, SjsRunnerConfig};
45 | use schemajs_helpers::helper::{HelperCall, HelperDbContext};
46 | use serde_json::json;
47 | use std::path::PathBuf;
48 | use std::time::Duration;
49 | use tokio::sync::mpsc::unbounded_channel;
50 |
51 | #[tokio::test]
52 | pub async fn test_runner_with_helpers() {
53 | println!("Runner created");
54 | let runner = SjsRunner::new(SjsRunnerConfig {
55 | max_helper_processing_capacity: 10,
56 | max_runtimes: 3,
57 | config_path: PathBuf::from("./test_cases/default-db"),
58 | data_path: None,
59 | });
60 |
61 | println!("Before tx created");
62 |
63 | let resp = unbounded_channel();
64 |
65 | runner
66 | .helper_tx
67 | .send(HelperCall::CustomQuery {
68 | db_ctx: HelperDbContext {
69 | table: Some("users".to_string()),
70 | db: Some("public".to_string()),
71 | },
72 | identifier: "helloWorld".to_string(),
73 | req: json!({
74 | "id": 1,
75 | "msg": "Hello"
76 | }),
77 | response: resp.0,
78 | })
79 | .await
80 | .unwrap();
81 |
82 | println!("After tx created");
83 |
84 | tokio::time::sleep(Duration::from_secs(10)).await;
85 |
86 | // runner.helpers_manager.rx_thread.abort();
87 | }
88 | }
89 |
--------------------------------------------------------------------------------
/crates/base/src/snapshot.rs:
--------------------------------------------------------------------------------
1 | pub static CLI_SNAPSHOT: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/RUNTIME_SNAPSHOT.bin"));
2 |
3 | pub fn snapshot() -> Option<&'static [u8]> {
4 | let data = CLI_SNAPSHOT;
5 | Some(data)
6 | }
7 |
--------------------------------------------------------------------------------
/crates/base/src/thread/mod.rs:
--------------------------------------------------------------------------------
1 | use once_cell::sync::Lazy;
2 | use std::num::NonZeroUsize;
3 |
4 | pub const DEFAULT_USER_WORKER_POOL_SIZE: usize = 1;
5 |
6 | pub static WORKER_RT: Lazy = Lazy::new(|| {
7 | let maybe_pool_size = std::env::var("SJS_WORKER_POOL_SIZE")
8 | .ok()
9 | .and_then(|it| it.parse::().ok())
10 | .map(|it| {
11 | if it < DEFAULT_USER_WORKER_POOL_SIZE {
12 | DEFAULT_USER_WORKER_POOL_SIZE
13 | } else {
14 | it
15 | }
16 | });
17 |
18 | tokio_util::task::LocalPoolHandle::new(if cfg!(debug_assertions) {
19 | maybe_pool_size.unwrap_or(DEFAULT_USER_WORKER_POOL_SIZE)
20 | } else {
21 | maybe_pool_size.unwrap_or(
22 | std::thread::available_parallelism()
23 | .ok()
24 | .map(NonZeroUsize::get)
25 | .unwrap_or(DEFAULT_USER_WORKER_POOL_SIZE),
26 | )
27 | })
28 | });
29 |
--------------------------------------------------------------------------------
/crates/base/test_cases/default-db/CustomSchemaJS.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | databases = [
3 | "./public"
4 | ]
5 |
6 | [default]
--------------------------------------------------------------------------------
/crates/base/test_cases/default-db/SchemaJS.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | databases = [
3 | "./public"
4 | ]
5 |
6 | [default]
--------------------------------------------------------------------------------
/crates/base/test_cases/default-db/public/tables/products.ts:
--------------------------------------------------------------------------------
1 | export default function main() {
2 | const { Table, Column } = SchemaJS;
3 | return new Table("products")
4 | .addColumn(new Column("id").string())
5 | }
--------------------------------------------------------------------------------
/crates/base/test_cases/default-db/public/tables/users.ts:
--------------------------------------------------------------------------------
1 | export default function main() {
2 | const { Table, Column, print, QueryBuilder, query } = SchemaJS;
3 | return new Table("users")
4 | .addColumn(new Column("id").string())
5 | .addColumn(new Column("username").string())
6 | .addColumn(new Column("password").string())
7 | .addColumn(new Column("enabled").boolean().withDefaultValue(true))
8 | .addQuery("searchRowLuis", async (req) => {
9 | let q = new QueryBuilder().and((and) => and.where("username", "=", "Luis"));
10 | let a = await query(q);
11 | print(JSON.stringify(a));
12 | return a;
13 | })
14 | .addQuery("helloWorld", (req) => { print(JSON.stringify(req)); })
15 | }
--------------------------------------------------------------------------------
/crates/cli/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "cli"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | authors.workspace = true
7 |
8 | [[bin]]
9 | name = "schemajs"
10 | path = "src/main.rs"
11 |
12 | [dependencies]
13 | base = { version = "0.1.0", path = "../base" }
14 | tokio.workspace = true
15 | anyhow.workspace = true
16 | clap.workspace = true
17 | schemajs_grpc = { version = "0.1.0", path = "../grpc" }
18 | schemajs_internal = { version = "0.1.0", path = "../internal" }
19 | colored = "2.1.0"
20 | rustyline = "14.0.0"
21 | enum-as-inner.workspace = true
22 | serde_json.workspace = true
23 | schemajs_helpers = { version = "0.1.0", path = "../helpers" }
24 | schemajs_repl = { version = "0.1.0", path = "../repl" }
25 | schemajs_core = { version = "0.1.0", path = "../core" }
--------------------------------------------------------------------------------
/crates/cli/src/cmd/init.rs:
--------------------------------------------------------------------------------
1 | use colored::Colorize;
2 | use std::io::Write;
3 | use std::path::PathBuf;
4 |
5 | static USERS_TABLE_FILE_CONTENT: &str = r#"
6 | export default function main() {
7 | const { Table, Column } = SchemaJS;
8 | return new Table("users")
9 | .addColumn(new Column("id").string())
10 | .addColumn(new Column("username").string())
11 | .addColumn(new Column("password").string())
12 | .addColumn(new Column("enabled").boolean().withDefaultValue(true));
13 | }
14 | "#;
15 |
16 | pub(crate) struct InitOpts {
17 | pub(crate) dir: Option,
18 | }
19 | pub(crate) fn init_cmd(opts: InitOpts) {
20 | let InitOpts { dir } = opts;
21 |
22 | let dir_path = dir
23 | .map(|p| PathBuf::from(p))
24 | .unwrap_or_else(|| std::env::current_dir().unwrap());
25 |
26 | if !dir_path.exists() {
27 | if let Err(err) = std::fs::create_dir_all(&dir_path) {
28 | eprintln!(
29 | "[{}] Folder could not be found or created: {:?}. Error: {:?}",
30 | "Error".red(),
31 | dir_path,
32 | err
33 | );
34 | return;
35 | }
36 | }
37 |
38 | println!("[{}] Working on directory {:?}", "Info".yellow(), dir_path);
39 | println!();
40 |
41 | let schema_js_toml = dir_path.join("SchemaJS.toml");
42 | let public_schema_tables = dir_path.join("public/tables");
43 |
44 | if let Err(err) = std::fs::File::create(&schema_js_toml) {
45 | eprintln!(
46 | "[{}] SchemaJS.toml could not be created. Error: {:?}",
47 | "Error".red(),
48 | err
49 | );
50 | return;
51 | }
52 |
53 | if let Err(err) = std::fs::create_dir_all(&public_schema_tables) {
54 | eprintln!(
55 | "[{}] 'public' schema tables could not be created. Error: {:?}",
56 | "Error".red(),
57 | err
58 | );
59 | return;
60 | }
61 |
62 | let users_table = public_schema_tables.join("users.ts");
63 |
64 | if let Err(err) = std::fs::File::create(&users_table)
65 | .and_then(|mut file| file.write_all(USERS_TABLE_FILE_CONTENT.as_bytes()))
66 | {
67 | eprintln!(
68 | "[{}] Default table 'users.ts' could not be created or initialized: {:?}",
69 | "Error".red(),
70 | err
71 | );
72 | return;
73 | }
74 |
75 | println!();
76 | println!("[{}] SchemaJS initialized successfully", "Success".green());
77 | println!();
78 |
79 | println!("To start the server, run:");
80 | println!(" cd {:?} && schemajs start", dir_path);
81 | println!();
82 | println!("Stuck? Join our Discord https://discord.gg/nRzTHygKn5");
83 | println!();
84 | }
85 |
--------------------------------------------------------------------------------
/crates/cli/src/cmd/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod init;
2 | mod repl;
3 | pub mod start;
4 |
--------------------------------------------------------------------------------
/crates/cli/src/cmd/repl.rs:
--------------------------------------------------------------------------------
1 | use base::runner::SjsRunner;
2 | use base::runtime::SchemeJsRuntime;
3 | use colored::Colorize;
4 | use rustyline::error::ReadlineError;
5 | use rustyline::history::DefaultHistory;
6 | use rustyline::{DefaultEditor, Editor};
7 | use schemajs_core::GlobalContext;
8 | use schemajs_repl::errors::{ReplError, ReplErrorResponse};
9 | use schemajs_repl::query_state::ReplQueryState;
10 | use schemajs_repl::{get_current_db_context, get_query_state, run_repl_script};
11 | use serde_json::Value;
12 | use std::sync::Arc;
13 |
14 | enum InputResult {
15 | Line(String),
16 | CtrlC,
17 | CtrlD,
18 | Error(String),
19 | }
20 |
21 | fn handle_repl_error(val: &Value) -> bool {
22 | if let Some(_) = val.get("REPL_ERR") {
23 | let err = ReplErrorResponse::from(val);
24 | match err.error {
25 | ReplError::AlreadyInContext => {
26 | println!(
27 | "[{}] Already in database context. Exit by calling `exit()` or by doing `use(dbName, tableName)`",
28 | "Info".yellow()
29 | );
30 | }
31 | ReplError::UnexpectedUseArgsLength => {
32 | println!(
33 | "[{}] Method `use` is expecting two arguments.",
34 | "Error".red()
35 | );
36 | }
37 | ReplError::AlreadyInGlobal => {
38 | println!(
39 | "[{}] Already in global context. Press CTRL+C or type `close()` to exit.",
40 | "Info".yellow()
41 | );
42 | }
43 | }
44 | return true;
45 | }
46 |
47 | false
48 | }
49 |
50 | pub(crate) async fn repl(runner: Arc) {
51 | runner.sjs_context.mark_repl();
52 |
53 | let mut rt = SchemeJsRuntime::new(runner.sjs_context.clone())
54 | .await
55 | .unwrap();
56 |
57 | println!("> {}", "REPL is running".yellow());
58 | println!();
59 |
60 | let mut context = GlobalContext::default();
61 | let mut rl = DefaultEditor::new().unwrap();
62 |
63 | loop {
64 | let current_query_state = get_query_state(&context);
65 |
66 | if context.repl_exit {
67 | break;
68 | }
69 |
70 | let cmd_prefix = {
71 | match ¤t_query_state {
72 | ReplQueryState::Global => format!("({}) > ", "global".green()),
73 | ReplQueryState::Database(db) => format!("({}) > ", db.green()),
74 | ReplQueryState::Table(db, tbl) => format!("({}.{}) > ", db.green(), tbl.blue()),
75 | }
76 | };
77 |
78 | let input = get_user_input(cmd_prefix.as_str(), &mut rl);
79 |
80 | match input {
81 | InputResult::Line(input) => {
82 | let result = run_repl_script(&mut rt.js_runtime, input).await;
83 | context = get_current_db_context(&mut rt.js_runtime);
84 |
85 | if let Ok(res) = result {
86 | if let Some(res) = res {
87 | let err = handle_repl_error(&res);
88 | if !err {
89 | if !res.is_null() {
90 | println!("{}", res);
91 | }
92 | }
93 | }
94 | } else {
95 | let err = result.err().unwrap();
96 | println!("[{}] {}", "Error".red(), err);
97 | }
98 | println!();
99 | }
100 | InputResult::CtrlC => {
101 | break;
102 | }
103 | InputResult::CtrlD => {
104 | break;
105 | }
106 | InputResult::Error(e) => {
107 | println!("[Error] {}", e.red());
108 | break;
109 | }
110 | }
111 | }
112 | }
113 |
114 | fn get_user_input(prompt: &str, history: &mut Editor<(), DefaultHistory>) -> InputResult {
115 | match history.readline(prompt) {
116 | Ok(line) => {
117 | let _ = history.add_history_entry(line.as_str());
118 | InputResult::Line(line.trim().to_string()) // Remove leading/trailing spaces
119 | }
120 | Err(ReadlineError::Interrupted) => InputResult::CtrlC,
121 | Err(ReadlineError::Eof) => InputResult::CtrlD,
122 | Err(err) => InputResult::Error(err.to_string()),
123 | }
124 | }
125 |
--------------------------------------------------------------------------------
/crates/cli/src/cmd/start.rs:
--------------------------------------------------------------------------------
1 | use crate::cmd::repl::repl;
2 | use base::runner::{SjsRunner, SjsRunnerConfig};
3 | use base::runtime::SchemeJsRuntime;
4 | use clap::crate_version;
5 | use colored::Colorize;
6 | use schemajs_grpc::server::{GrpcServer, GrpcServerArgs};
7 | use std::path::PathBuf;
8 | use std::sync::Arc;
9 | use std::time::Duration;
10 |
11 | pub(crate) struct StartOpts {
12 | pub ip: Option,
13 | pub config_file: String,
14 | pub repl: bool,
15 | }
16 |
17 | async fn start_server(ip: Option, runner: Arc) {
18 | let ip = ip.unwrap_or_else(|| runner.sjs_context.config.grpc.host.clone());
19 | let grpc_server = GrpcServer::new(GrpcServerArgs {
20 | db_manager: runner.sjs_context.internal_manager.clone(),
21 | ip: Some(ip.clone()),
22 | });
23 |
24 | println!();
25 | println!("SJS {}", crate_version!());
26 | println!("Exit using ctrl+c");
27 | println!();
28 |
29 | println!("> {}", format!("Starting GRPC at {}", ip).blue());
30 |
31 | grpc_server.start().await.unwrap();
32 | }
33 |
34 | pub(crate) async fn start(opts: StartOpts) {
35 | let StartOpts {
36 | config_file,
37 | ip,
38 | repl: no_repl,
39 | } = opts;
40 |
41 | let runner = SjsRunner::new(SjsRunnerConfig {
42 | max_helper_processing_capacity: 100,
43 | max_runtimes: 10,
44 | config_path: PathBuf::from(config_file),
45 | data_path: None,
46 | });
47 |
48 | let arc_runner = Arc::new(runner);
49 |
50 | {
51 | // Loader runtime
52 | let rt = SchemeJsRuntime::new(arc_runner.sjs_context.clone())
53 | .await
54 | .unwrap();
55 | drop(rt);
56 | }
57 |
58 | let runner = arc_runner.clone();
59 | tokio::spawn(async move { start_server(ip, runner).await });
60 | tokio::time::sleep(Duration::from_secs(1)).await;
61 |
62 | if !no_repl {
63 | let _repl = repl(arc_runner).await;
64 | } else {
65 | loop {}
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/crates/cli/src/flags.rs:
--------------------------------------------------------------------------------
1 | use clap::{arg, crate_version, ArgAction, Command};
2 |
3 | pub(super) fn get_cli() -> Command {
4 | Command::new(env!("CARGO_BIN_NAME"))
5 | .about(env!("CARGO_PKG_DESCRIPTION"))
6 | .version(format!("SJS {}", crate_version!()))
7 | .subcommand(get_start_command())
8 | .subcommand(get_init_command())
9 | }
10 |
11 | fn get_start_command() -> Command {
12 | Command::new("start")
13 | .about("Start a new SJS server")
14 | .arg(
15 | arg!(-i --ip )
16 | .help("Host IP address to listen on")
17 | .default_value("[::1]:34244")
18 | .env("SJS_HOST"),
19 | )
20 | .arg(
21 | arg!(-c --config )
22 | .help("Path to SchemaJS.toml or directory containing it")
23 | .default_value("./")
24 | .env("SJS_CONFIG"),
25 | )
26 | .arg(
27 | arg!(--"no-repl")
28 | .help("Whether it should initialize the REPL when running")
29 | .action(ArgAction::SetTrue),
30 | )
31 | }
32 |
33 | fn get_init_command() -> Command {
34 | Command::new("init")
35 | .about("Initializes boilerplate files for SJS to run in the specified directory (Default to running dir)")
36 | .arg(
37 | arg!(-d --directory )
38 | .help("The directory where boilerplate files will be initialized")
39 | .required(false)
40 | )
41 | }
42 |
--------------------------------------------------------------------------------
/crates/cli/src/main.rs:
--------------------------------------------------------------------------------
1 | mod cmd;
2 | mod flags;
3 |
4 | use crate::cmd::init::{init_cmd, InitOpts};
5 | use crate::cmd::start::{start, StartOpts};
6 | use crate::flags::get_cli;
7 | use clap::crate_version;
8 | use colored::Colorize;
9 |
10 | #[tokio::main]
11 | async fn main() {
12 | let cli_matches = get_cli().get_matches();
13 |
14 | match cli_matches.subcommand() {
15 | Some(("start", sub_matches)) => {
16 | let ip = sub_matches.get_one::("ip").cloned();
17 | let config_file = sub_matches.get_one::("config").cloned().unwrap();
18 | let no_repl = sub_matches.get_one::("no-repl").cloned();
19 |
20 | let _ = start(StartOpts {
21 | ip,
22 | config_file,
23 | repl: no_repl.unwrap_or(false),
24 | })
25 | .await;
26 | }
27 | Some(("init", sub_matches)) => {
28 | let dir = sub_matches.get_one::("directory").cloned();
29 | init_cmd(InitOpts { dir });
30 | }
31 | _ => {
32 | println!();
33 | println!("SJS {}", crate_version!());
34 | println!();
35 | println!("Run '{}' for help.", "schemajs --help".blue());
36 | println!();
37 | println!("Stuck? Join our Discord https://discord.gg/nRzTHygKn5");
38 | println!();
39 | }
40 | };
41 | }
42 |
--------------------------------------------------------------------------------
/crates/config/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "schemajs_config"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | authors.workspace = true
7 | resolver = "2"
8 |
9 | [dependencies]
10 | toml.workspace = true
11 | serde.workspace = true
12 | anyhow.workspace = true
13 | paste.workspace = true
--------------------------------------------------------------------------------
/crates/config/src/default_config_values.rs:
--------------------------------------------------------------------------------
1 | macro_rules! config_constants {
2 | (
3 | $(const $name:ident: $type:ty = $value:expr;)*
4 | ) => {
5 | $(
6 | const $name: $type = $value;
7 |
8 | paste::paste! {
9 | pub fn []() -> $type {
10 | $name
11 | }
12 |
13 | pub fn []() -> String {
14 | $name.to_string()
15 | }
16 | }
17 | )*
18 | }
19 | }
20 |
21 | config_constants! {
22 | const MAX_TEMPORARY_SHARDS: u64 = 5;
23 | const MAX_ROWS_PER_TEMP_SHARD: u64 = 1000;
24 | const MAX_ROWS_PER_SHARD: u64 = 2_500_000;
25 | const MAX_RECORDS_PER_HASH_INDEX_SHARD: u64 = 10_000_000;
26 | const DEFAULT_SCHEME_NAME: &'static str = "public";
27 |
28 | const DEFAULT_ROOT_USER: &'static str = "admin";
29 | const DEFAULT_ROOT_PWD: &'static str = "admin";
30 |
31 | const DEFAULT_GRPC_HOST: &'static str = "[::1]:34244";
32 | const DEFAULT_CUSTOM_QUERY_TIMEOUT: u64 = 30;
33 |
34 | const DEFAULT_MAX_FILE_DESCRIPTORS: usize = 2500;
35 | }
36 |
--------------------------------------------------------------------------------
/crates/core/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "schemajs_core"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | authors.workspace = true
7 | resolver = "2"
8 |
9 | [dependencies]
10 | deno_core.workspace = true
11 | deno_ast = { workspace = true, features = ["transpiling", "bundler", "cjs", "codegen", "proposal", "react", "sourcemap", "transforms", "typescript", "view", "visit"] }
12 | once_cell.workspace = true
13 |
14 | data-url.workspace = true
15 | base64 = "0.21.7"
16 | tokio.workspace = true
17 | indexmap.workspace = true
18 | thiserror.workspace = true
19 | libc = "0.2.155"
20 | rand.workspace = true
21 | percent-encoding.workspace = true
22 | serde.workspace = true
--------------------------------------------------------------------------------
/crates/core/src/js/bootstrap.ts:
--------------------------------------------------------------------------------
1 | import { addImmutableGlobal } from "ext:sjs_core/src/js/fieldUtils.ts";
2 | import { SJSGlobal } from "ext:sjs_core/src/js/global.ts";
3 | import { initializeDbContext } from "ext:sjs_engine/src/js/context.ts";
4 | import { use, exit, close } from "ext:sjs_repl/src/js/repl.ts";
5 |
6 | interface BootstrapParams {
7 | repl: boolean
8 | }
9 |
10 | globalThis.bootstrap = (params: BootstrapParams) => {
11 |
12 | // We should delete this after initialization,
13 | // Deleting it during bootstrapping can backfire
14 | delete globalThis.__bootstrap;
15 | delete globalThis.bootstrap;
16 |
17 | addImmutableGlobal("SchemaJS", SJSGlobal.SchemaJS);
18 |
19 | if(params.repl) {
20 | addImmutableGlobal("SJS_REPL", true);
21 | addImmutableGlobal("use", use);
22 | addImmutableGlobal("exit", exit);
23 | addImmutableGlobal("close", close);
24 | }
25 |
26 | globalThis.initializeDbContext({
27 | tblName: undefined,
28 | dbName: undefined,
29 | REPL_EXIT: false
30 | })
31 |
32 |
33 | delete globalThis.bootstrap;
34 | }
35 |
36 | globalThis.initializeDbContext = (params) => {
37 | initializeDbContext(params);
38 | if(!globalThis.SJS_REPL) {
39 | delete globalThis.initializeDbContext;
40 | }
41 | }
--------------------------------------------------------------------------------
/crates/core/src/js/fieldUtils.ts:
--------------------------------------------------------------------------------
1 | export function addImmutableGlobal(name: string, value: any): void {
2 | Object.defineProperty(globalThis, name, {
3 | value: value,
4 | writable: false,
5 | configurable: globalThis.SJS_REPL || false,
6 | enumerable: true,
7 | });
8 | }
--------------------------------------------------------------------------------
/crates/core/src/js/global.ts:
--------------------------------------------------------------------------------
1 | import * as SJsPrimitives from "ext:sjs_primitives/src/js/index.ts"
2 | import { insertRow, searchRows } from "ext:sjs_engine/src/js/ops.ts";
3 | import { QueryBuilder } from "ext:sjs_engine/src/js/query.ts";
4 | const core = globalThis.Deno.core;
5 | class SchemaJS {
6 |
7 | static get Table() {
8 | return SJsPrimitives.Table;
9 | }
10 |
11 | static get Column() {
12 | return SJsPrimitives.Column;
13 | }
14 |
15 | static get DataTypes() {
16 | return SJsPrimitives.DataTypes;
17 | }
18 |
19 | static get QueryBuilder() {
20 | return QueryBuilder;
21 | }
22 |
23 | static get rawInsert() {
24 | return insertRow;
25 | }
26 |
27 | static get insert() {
28 | return (...data) => {
29 | if(!globalThis.SJS_CONTEXT) {
30 | throw new Error("SJS_CONTEXT is necessary when using a `insert`. Consider using `rawInsert` otherwise.");
31 | } else {
32 | let { dbName, tblName } = globalThis.SJS_CONTEXT;
33 |
34 | tblName = data.length === 2 ? data[0] : tblName;
35 |
36 | if(!dbName) {
37 | throw new Error("SchemaJS.insert requires a database");
38 | } else if(!tblName) {
39 | throw new Error("SchemaJS.insert requires a table. `SchemaJS.insert(table_name, row)`");
40 | }
41 |
42 | return insertRow(dbName, tblName, data.length === 2 ? data[1] : data[0]);
43 | }
44 | }
45 | }
46 |
47 | static get query() {
48 | return (q: QueryBuilder) => {
49 | if(!(q instanceof QueryBuilder)) {
50 | throw new Error("Queries must be performed with SchemaJS.QueryBuilder");
51 | } else {
52 | return searchRows(q.dbName, q.tableName, q.build())
53 | }
54 | }
55 | }
56 |
57 | static print(msg: string) {
58 | core.ops.sjs_op_print(msg);
59 | }
60 |
61 | }
62 |
63 | export const SJSGlobal = {
64 | SchemaJS
65 | }
--------------------------------------------------------------------------------
/crates/core/src/lib.rs:
--------------------------------------------------------------------------------
1 | use serde::{Deserialize, Serialize};
2 |
3 | pub mod transpiler;
4 |
5 | deno_core::extension!(
6 | sjs_core,
7 | esm_entry_point = "ext:sjs_core/src/js/bootstrap.ts",
8 | esm = [
9 | "src/js/fieldUtils.ts",
10 | "src/js/global.ts",
11 | "src/js/bootstrap.ts",
12 | ]
13 | );
14 |
15 | #[derive(Serialize, Deserialize, Default)]
16 | pub struct GlobalContext {
17 | #[serde(rename = "tblName")]
18 | pub table_name: Option,
19 | #[serde(rename = "dbName")]
20 | pub database_name: Option,
21 | #[serde(rename = "REPL_EXIT")]
22 | pub repl_exit: bool,
23 | }
24 |
--------------------------------------------------------------------------------
/crates/core/src/transpiler.rs:
--------------------------------------------------------------------------------
1 | use deno_ast::{MediaType, ParseParams, SourceMapOption, SourceTextInfo};
2 | use deno_core::error::AnyError;
3 | use deno_core::{ModuleCodeString, ModuleName, SourceMapData};
4 | use std::path::Path;
5 | use std::sync::Arc;
6 |
7 | /// Obtained from: https://github.com/denoland/deno/blob/cd59fc53a528603112addfe8b10fe4e30d04e7f0/runtime/shared.rs#L67
8 | pub fn maybe_transpile_source(
9 | name: ModuleName,
10 | source: ModuleCodeString,
11 | ) -> Result<(ModuleCodeString, Option), AnyError> {
12 | // Always transpile `node:` built-in modules, since they might be TypeScript.
13 | let media_type = if name.starts_with("node:") {
14 | MediaType::TypeScript
15 | } else {
16 | MediaType::from_path(Path::new(&name))
17 | };
18 |
19 | match media_type {
20 | MediaType::TypeScript => {}
21 | MediaType::JavaScript => return Ok((source, None)),
22 | MediaType::Mjs => return Ok((source, None)),
23 | _ => panic!(
24 | "Unsupported media type for snapshotting {media_type:?} for file {}",
25 | name
26 | ),
27 | }
28 |
29 | let parsed = deno_ast::parse_module(ParseParams {
30 | specifier: deno_core::url::Url::parse(&name).unwrap(),
31 | text: source.into(),
32 | media_type,
33 | capture_tokens: false,
34 | scope_analysis: false,
35 | maybe_syntax: None,
36 | })?;
37 |
38 | let transpiled_source = parsed
39 | .transpile(
40 | &deno_ast::TranspileOptions {
41 | imports_not_used_as_values: deno_ast::ImportsNotUsedAsValues::Remove,
42 | ..Default::default()
43 | },
44 | &deno_ast::EmitOptions {
45 | source_map: if cfg!(debug_assertions) {
46 | SourceMapOption::Separate
47 | } else {
48 | SourceMapOption::None
49 | },
50 | ..Default::default()
51 | },
52 | )?
53 | .into_source();
54 |
55 | let maybe_source_map: Option = transpiled_source.source_map.map(|sm| sm.into());
56 | let source_text = String::from_utf8(transpiled_source.source)?;
57 |
58 | Ok((source_text.into(), maybe_source_map))
59 | }
60 |
--------------------------------------------------------------------------------
/crates/data/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "schemajs_data"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | authors.workspace = true
7 | resolver = "2"
8 |
9 | [dependencies]
10 | serde.workspace = true
11 | tokio.workspace = true
12 | tempfile.workspace = true
13 | enum-as-inner.workspace = true
14 | uuid.workspace = true
15 | borsh.workspace = true
16 | memmap2.workspace = true
17 | sha2.workspace = true
18 | rand.workspace = true
19 | indexmap.workspace = true
20 | thiserror.workspace = true
21 | parking_lot.workspace = true
22 | lru.workspace = true
23 | flaky_test.workspace = true
--------------------------------------------------------------------------------
/crates/data/src/commit_log/collection.rs:
--------------------------------------------------------------------------------
1 | use crate::commit_log::error::CommitLogError;
2 | use crate::commit_log::operations::CommitLogEntry;
3 | use crate::commit_log::CommitLog;
4 | use crate::utils::fs::list_files_with_prefix;
5 | use parking_lot::RwLock;
6 | use std::path::PathBuf;
7 | use std::sync::atomic::{AtomicBool, Ordering};
8 | use uuid::Uuid;
9 |
10 | #[derive(Debug)]
11 | pub struct CommitLogCollection {
12 | max_logs: usize,
13 | log_size: usize,
14 | pub logs: RwLock>,
15 | folder: PathBuf,
16 | prefix: String,
17 | pub waiting_for_reconciliation: AtomicBool,
18 | }
19 |
20 | impl CommitLogCollection {
21 | pub fn new(folder: PathBuf, prefix: &str, max_logs: usize, log_size: usize) -> Self {
22 | let mut logs = vec![];
23 | let mut log_files = list_files_with_prefix(&folder, prefix).unwrap();
24 | // Sort the files by their file names
25 | log_files.sort_by_key(|path| path.file_name().map(|name| name.to_os_string()));
26 |
27 | for log_file in log_files {
28 | logs.push(CommitLog::new(log_file, log_size));
29 | }
30 |
31 | if logs.is_empty() {
32 | let log = Self::new_log(&folder, prefix, log_size);
33 | logs.push(log);
34 | }
35 |
36 | let locked_count = logs.iter().filter(|e| e.is_locked()).count();
37 | let waiting_for_reconciliation = if locked_count >= max_logs {
38 | true
39 | } else {
40 | false
41 | };
42 |
43 | Self {
44 | max_logs,
45 | log_size,
46 | logs: RwLock::new(logs),
47 | folder,
48 | prefix: prefix.to_string(),
49 | waiting_for_reconciliation: AtomicBool::new(waiting_for_reconciliation),
50 | }
51 | }
52 |
53 | pub fn reset(&self) {
54 | let mut logs = self.logs.write();
55 |
56 | {
57 | // Clean paths
58 | let paths: Vec<&PathBuf> = logs.iter().map(|e| &e.path).collect();
59 | for path in paths {
60 | let _ = std::fs::remove_file(path);
61 | }
62 | }
63 |
64 | // Clear
65 | logs.clear();
66 |
67 | let initial_log = Self::new_log(&self.folder, &self.prefix, self.log_size);
68 | logs.push(initial_log);
69 | }
70 |
71 | fn mark_as_waiting(&self, waiting: bool) {
72 | self.waiting_for_reconciliation
73 | .store(waiting, Ordering::Release);
74 | }
75 |
76 | fn new_log(folder: &PathBuf, prefix: &str, log_size: usize) -> CommitLog {
77 | let log_path = folder.join(format!("{}_{}", prefix, Uuid::new_v4().to_string()));
78 |
79 | CommitLog::new(log_path, log_size)
80 | }
81 |
82 | fn add_log(&self) -> Result {
83 | let mut logs = self.logs.write();
84 |
85 | if logs.len() >= self.max_logs {
86 | self.mark_as_waiting(true);
87 | return Err(CommitLogError::MaxLogsReached);
88 | }
89 |
90 | let new_log = Self::new_log(&self.folder, &self.prefix, self.log_size);
91 | logs.push(new_log);
92 |
93 | // Return the index of the newly added log
94 | Ok(logs.len() - 1)
95 | }
96 |
97 | pub fn write(&self, data: &CommitLogEntry) -> Result {
98 | // Acquire a read lock to check existing logs
99 | {
100 | let logs = self.logs.read();
101 |
102 | // Attempt to write to an available log
103 | for (i, log) in logs.iter().enumerate() {
104 | if !log.is_locked() {
105 | return log.write(data).map(|_| i);
106 | }
107 | }
108 | }
109 |
110 | // If no log is available, add a new one and write to it
111 | let new_log_index = self.add_log()?; // Add a log and get its index
112 | let logs = self.logs.read(); // Acquire a read lock to access the new log
113 | logs[new_log_index].write(data)?;
114 |
115 | Ok(new_log_index)
116 | }
117 | }
118 |
--------------------------------------------------------------------------------
/crates/data/src/commit_log/error.rs:
--------------------------------------------------------------------------------
1 | use enum_as_inner::EnumAsInner;
2 | use serde::{Deserialize, Serialize};
3 | use thiserror::Error;
4 |
5 | #[derive(Debug, Clone, EnumAsInner, Serialize, Deserialize, Error, PartialEq)]
6 | pub enum CommitLogError {
7 | #[error("Broken Record")]
8 | BrokenRecord,
9 | #[error("Unkown Start Delimiter")]
10 | UnknownStartDelimiter,
11 | #[error("Out of Memory")]
12 | OutOfMemory,
13 | #[error("Can't write in the current range")]
14 | InvalidRange,
15 | #[error("Failed Flushing")]
16 | FailedFlushing,
17 | #[error("EOF")]
18 | Eof,
19 | #[error("Commit Log is locked")]
20 | LogLocked,
21 | #[error("No more locks available in collection")]
22 | MaxLogsReached,
23 | #[error("Broken Record Known Valid Point (KVP)")]
24 | BrokenRecordKvp(usize),
25 | }
26 |
--------------------------------------------------------------------------------
/crates/data/src/cursor/error.rs:
--------------------------------------------------------------------------------
1 | use enum_as_inner::EnumAsInner;
2 | use serde::{Deserialize, Serialize};
3 | use thiserror::Error;
4 |
5 | #[derive(Debug, Clone, EnumAsInner, Serialize, Deserialize, Error)]
6 | pub enum CursorError {
7 | #[error("Not enough bytes")]
8 | InvalidRange,
9 | }
10 |
--------------------------------------------------------------------------------
/crates/data/src/cursor/mod.rs:
--------------------------------------------------------------------------------
1 | use crate::cursor::error::CursorError;
2 | use memmap2::{Mmap, MmapMut};
3 | use std::ops::Range;
4 |
5 | mod error;
6 |
7 | #[derive(Debug)]
8 | pub enum CursorData<'a> {
9 | Raw(&'a [u8]),
10 | Mmap(&'a Mmap),
11 | MmapMut(&'a MmapMut),
12 | }
13 |
14 | #[derive(Debug)]
15 | pub struct Cursor<'a> {
16 | data: CursorData<'a>,
17 | pub position: usize,
18 | pub last_consumed_size: usize,
19 | pub len: usize,
20 | pub starting_pos: Option,
21 | }
22 |
23 | impl<'a> Cursor<'a> {
24 | pub fn raw(data: &'a [u8]) -> Self {
25 | Cursor {
26 | data: CursorData::Raw(data),
27 | position: 0,
28 | len: data.len(),
29 | starting_pos: None,
30 | last_consumed_size: 0,
31 | }
32 | }
33 |
34 | pub fn mmap(data: &'a Mmap) -> Self {
35 | Cursor {
36 | data: CursorData::Mmap(data),
37 | position: 0,
38 | len: data.len(),
39 | starting_pos: None,
40 | last_consumed_size: 0,
41 | }
42 | }
43 |
44 | pub fn mmap_mut(data: &'a MmapMut) -> Self {
45 | Cursor {
46 | data: CursorData::MmapMut(data),
47 | position: 0,
48 | len: data.len(),
49 | starting_pos: None,
50 | last_consumed_size: 0,
51 | }
52 | }
53 |
54 | pub fn set_starting_pos(mut self, pos: usize) -> Self {
55 | self.starting_pos = Some(pos);
56 | self.position = pos;
57 | self
58 | }
59 |
60 | pub fn new(data: &'a [u8]) -> Self {
61 | Self::raw(data)
62 | }
63 |
64 | pub fn get_range(&self, range: Range) -> &'a [u8] {
65 | let data = match self.data {
66 | CursorData::Raw(data) => &data[range],
67 | CursorData::Mmap(data) => &data[range],
68 | CursorData::MmapMut(data) => &data[range],
69 | };
70 |
71 | data
72 | }
73 |
74 | pub fn peek(&self, size: usize) -> Result<&'a [u8], CursorError> {
75 | if self.position + size > self.len {
76 | return Err(CursorError::InvalidRange);
77 | }
78 |
79 | let range = self.position..(self.position + size);
80 |
81 | Ok(self.get_range(range))
82 | }
83 |
84 | pub fn consume(&mut self, size: usize) -> Result<&'a [u8], CursorError> {
85 | let data = self.peek(size)?;
86 | self.position += size;
87 | self.last_consumed_size = size;
88 |
89 | Ok(data)
90 | }
91 |
92 | pub fn set_back(&mut self, steps: usize) {
93 | self.position = self.position - steps;
94 | }
95 |
96 | pub fn forward(&mut self, steps: usize) {
97 | self.position = self.position + steps;
98 | }
99 |
100 | pub fn move_to(&mut self, pos: usize) {
101 | self.position = pos;
102 | }
103 |
104 | pub fn reset(&mut self) {
105 | self.last_consumed_size = 0;
106 | self.position = self.starting_pos.unwrap_or(0);
107 | }
108 |
109 | pub fn is_eof(&self) -> bool {
110 | self.position >= self.len
111 | }
112 | }
113 |
114 | #[cfg(test)]
115 | mod cursor_tests {
116 | use crate::cursor::Cursor;
117 |
118 | #[test]
119 | pub fn test() {
120 | let bytes = b"Hello World";
121 | let mut cursor = Cursor::new(bytes);
122 | let hello = cursor.consume(5).unwrap();
123 | assert_eq!(hello, b"Hello");
124 | let world = cursor.consume(6).unwrap();
125 | assert_eq!(world, b" World");
126 | let out_of_range = cursor.consume(1);
127 | assert!(out_of_range.err().unwrap().is_invalid_range());
128 | }
129 | }
130 |
--------------------------------------------------------------------------------
/crates/data/src/data_handler.rs:
--------------------------------------------------------------------------------
1 | use crate::fdm::FileDescriptorManager;
2 | use memmap2::Mmap;
3 | use parking_lot::RwLock;
4 | use std::fs::File;
5 | use std::io::{Error, ErrorKind, Write};
6 | use std::path::{Path, PathBuf};
7 | use std::sync::Arc;
8 |
9 | #[derive(Debug)]
10 | pub struct DataHandler {
11 | pub path: PathBuf,
12 | fdm: Arc,
13 | mmap: Mmap,
14 | }
15 |
16 | impl DataHandler {
17 | unsafe fn new_from_path + Clone>(
18 | path: P,
19 | fdm: Arc,
20 | ) -> std::io::Result {
21 | if let Some(descriptor) = fdm.pop_insert(&path) {
22 | let file = descriptor.file.read();
23 | Ok(Self {
24 | path: path.as_ref().to_path_buf(),
25 | fdm,
26 | mmap: Self::mmap(&file)?,
27 | })
28 | } else {
29 | Err(Error::new(ErrorKind::Other, "Too many files open in FDM"))
30 | }
31 | }
32 |
33 | unsafe fn mmap(file: &File) -> std::io::Result {
34 | Mmap::map(file)
35 | }
36 |
37 | #[cfg(test)]
38 | pub unsafe fn access_map(&self) -> &Mmap {
39 | &self.mmap
40 | }
41 |
42 | pub unsafe fn new + Clone>(
43 | path: P,
44 | fdm: Arc,
45 | ) -> std::io::Result> {
46 | Ok(RwLock::new(Self::new_from_path(path, fdm)?))
47 | }
48 |
49 | pub fn len(&self) -> usize {
50 | self.mmap.len()
51 | }
52 |
53 | pub fn is_empty(&self) -> bool {
54 | self.len() == 0
55 | }
56 |
57 | pub fn get_bytes(&self, from: usize, to: usize) -> Option<&[u8]> {
58 | self.mmap.get(from..to)
59 | }
60 |
61 | pub fn read_pointer(&self, start: u64, max_bytes: usize) -> Option> {
62 | self.get_bytes(start as usize, start as usize + max_bytes)
63 | .map(|i| i.to_vec())
64 | }
65 |
66 | pub fn operate(&mut self, callback: F) -> std::io::Result
67 | where
68 | F: FnOnce(&mut File) -> std::io::Result,
69 | {
70 | let fdm = self.fdm.clone();
71 | if let Some(fd) = fdm.get(&self.path) {
72 | let mut writer = fd.file.write();
73 | let cb = callback(&mut writer)?;
74 | let new_mmap = unsafe { Self::mmap(&writer) };
75 | self.mmap = new_mmap?;
76 |
77 | Ok(cb)
78 | } else {
79 | Err(Error::new(ErrorKind::Other, "Too many files open in FDM"))
80 | }
81 | }
82 | }
83 |
84 | #[cfg(test)]
85 | mod data_handler_tests {
86 | use memmap2::MmapOptions;
87 | use std::fs::{File, OpenOptions};
88 | use std::io::Write;
89 | use std::path::PathBuf;
90 | use tempfile::tempdir;
91 | use uuid::Uuid;
92 |
93 | fn read_mmap(path: PathBuf) -> File {
94 | OpenOptions::new()
95 | .read(true)
96 | .write(true)
97 | .create(true)
98 | .open(path)
99 | .unwrap()
100 | }
101 |
102 | #[tokio::test]
103 | pub async fn test_mmap() {
104 | let fake_partial_folder_path = std::env::current_dir()
105 | .unwrap()
106 | .join("./test_cases/mmap.bin".to_string());
107 |
108 | {
109 | let mut file = read_mmap(fake_partial_folder_path.clone());
110 |
111 | file.write_all(b"Hello World").unwrap();
112 | // Create a mutable memory-mapped buffer.
113 | let mut mmap = unsafe { MmapOptions::new().map_mut(&file).unwrap() };
114 | mmap[0] = b"X".get(0).unwrap().clone();
115 |
116 | mmap.flush().unwrap();
117 | }
118 |
119 | let file = read_mmap(fake_partial_folder_path.clone());
120 | let mut mmap = unsafe { MmapOptions::new().map_mut(&file).unwrap() };
121 | assert_eq!(mmap.to_vec(), b"Xello World".to_vec());
122 | let _ = std::fs::remove_file(fake_partial_folder_path);
123 | }
124 |
125 | #[tokio::test]
126 | pub async fn test_mmap_2() {
127 | let fake_partial_folder_path = std::env::current_dir()
128 | .unwrap()
129 | .join("test_cases/mmap2.bin");
130 |
131 | // Ensure the test directory exists
132 | std::fs::create_dir_all(fake_partial_folder_path.parent().unwrap()).unwrap();
133 |
134 | {
135 | let mut file = read_mmap(fake_partial_folder_path.clone());
136 |
137 | // Resize the file to ensure it is at least 11 bytes long
138 | file.set_len(11).unwrap();
139 |
140 | // Write initial content to the file
141 | file.write_all(b"Hello").unwrap();
142 |
143 | // Create a mutable memory-mapped buffer
144 | let mut mmap = unsafe { MmapOptions::new().map_mut(&file).unwrap() };
145 |
146 | // Modify content via mmap
147 | mmap[0] = b'X';
148 | mmap[5..11].copy_from_slice(b"123456");
149 |
150 | // Flush changes to disk
151 | mmap.flush().unwrap();
152 | }
153 |
154 | // Verify the file content after modifications
155 | let file = read_mmap(fake_partial_folder_path.clone());
156 | let mmap = unsafe { MmapOptions::new().map_mut(&file).unwrap() };
157 | assert_eq!(mmap.to_vec(), b"Xello123456".to_vec());
158 |
159 | // Clean up the test file
160 | let _ = std::fs::remove_file(fake_partial_folder_path);
161 | }
162 | }
163 |
--------------------------------------------------------------------------------
/crates/data/src/errors.rs:
--------------------------------------------------------------------------------
1 | use enum_as_inner::EnumAsInner;
2 | use serde::{Deserialize, Serialize};
3 | use thiserror::Error;
4 |
5 | #[derive(Debug, Clone, EnumAsInner, Serialize, Deserialize, Error)]
6 | pub enum ShardErrors {
7 | #[error("No more positions available")]
8 | OutOfPositions,
9 | #[error("Offset is not in file")]
10 | UnknownOffset,
11 | #[error("Could not flush")]
12 | FlushingError,
13 | #[error("Unknown byte range")]
14 | ErrorReadingByteRange,
15 | #[error("Invalid Header")]
16 | ErrorAddingHeaderOffset,
17 | #[error("Unknown entry")]
18 | UnknownEntry,
19 | #[error("Error adding entry")]
20 | ErrorAddingEntry,
21 | #[error("Unknown breaking point")]
22 | UnknownBreakingPoint,
23 | #[error("Out of range")]
24 | OutOfRange,
25 | #[error("Shard does not exist")]
26 | UnknownShard,
27 | #[error("Invalid locking detected")]
28 | InvalidLocking,
29 | #[error("Invalid Deletion")]
30 | FailedDeletion,
31 | #[error("Invalid Update")]
32 | FailedUpdate,
33 | }
34 |
--------------------------------------------------------------------------------
/crates/data/src/fdm/file_descriptor.rs:
--------------------------------------------------------------------------------
1 | use parking_lot::RwLock;
2 | use std::fs::{File, OpenOptions};
3 | use std::path::Path;
4 | use std::sync::Arc;
5 |
6 | pub struct FileDescriptor {
7 | pub file: Arc>,
8 | }
9 |
10 | impl FileDescriptor {
11 | pub fn new_from_path + Clone>(path: P) -> std::io::Result {
12 | let load_file = OpenOptions::new()
13 | .create(true)
14 | .read(true)
15 | .write(true)
16 | .open(path.clone())?;
17 |
18 | Ok(Self {
19 | file: Arc::new(RwLock::new(load_file)),
20 | })
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/crates/data/src/fdm/mod.rs:
--------------------------------------------------------------------------------
1 | mod file_descriptor;
2 |
3 | use crate::fdm::file_descriptor::FileDescriptor;
4 | use lru::LruCache;
5 | use parking_lot::RwLock;
6 | use std::num::NonZeroUsize;
7 | use std::path::{Path, PathBuf};
8 | use std::sync::Arc;
9 |
10 | #[derive(Debug)]
11 | pub struct FileDescriptorManager {
12 | cache: Arc>>>,
13 | max_size: usize,
14 | }
15 |
16 | impl FileDescriptorManager {
17 | pub fn new(max_size: usize) -> Self {
18 | Self {
19 | cache: Arc::new(RwLock::new(LruCache::new(
20 | NonZeroUsize::new(max_size).unwrap(),
21 | ))),
22 | max_size,
23 | }
24 | }
25 |
26 | pub fn get(&self, path: &PathBuf) -> Option> {
27 | let reader = self.cache.read();
28 | let file_descriptor = reader.peek(path);
29 | if let Some(descriptor) = file_descriptor {
30 | return Some(descriptor.clone());
31 | }
32 |
33 | None
34 | }
35 |
36 | // Insert a new file descriptor, using try_write to avoid blocking
37 | fn insert + Clone>(
38 | &self,
39 | path: P,
40 | cache: &mut LruCache>,
41 | ) -> Option> {
42 | let path_buf = path.as_ref().to_path_buf();
43 | let descriptor = Arc::new(FileDescriptor::new_from_path(path).ok()?);
44 | cache.push(path_buf, descriptor.clone());
45 | Some(descriptor)
46 | }
47 |
48 | // Pop a file descriptor if it's available (i.e., not busy)
49 | pub fn pop_if_available(&self, cache: &mut LruCache>) -> bool {
50 | let mut candidate = None;
51 | {
52 | for (id, descriptor) in cache.iter() {
53 | if !descriptor.file.is_locked_exclusive() {
54 | candidate = Some(id.clone());
55 | break;
56 | }
57 | }
58 | }
59 |
60 | // If we found a non-busy descriptor, remove it from the cache
61 | if let Some(key_to_pop) = candidate {
62 | cache.pop(&key_to_pop); // Actually remove the descriptor from the cache
63 | return true;
64 | }
65 |
66 | false
67 | }
68 |
69 | pub fn pop_insert + Clone>(&self, path: P) -> Option> {
70 | let mut cache = self.cache.write();
71 | if cache.len() < self.max_size {
72 | self.insert(path, &mut cache)
73 | } else {
74 | let succeeded = self.pop_if_available(&mut cache);
75 | if succeeded {
76 | self.insert(path, &mut cache)
77 | } else {
78 | None
79 | }
80 | }
81 | }
82 |
83 | pub fn remove_paths(&self, paths: Vec) {
84 | let fdm = self.cache.clone();
85 | if !paths.is_empty() && self.max_size >= { fdm.read().len() } {
86 | tokio::spawn(async move {
87 | let mut writer = fdm.write();
88 | for path in paths.iter() {
89 | writer.pop(path);
90 | }
91 | });
92 | }
93 | }
94 | }
95 |
96 | #[cfg(test)]
97 | mod fdm_tests {
98 | use crate::fdm::FileDescriptorManager;
99 | use std::sync::Arc;
100 | use std::time::Duration;
101 | use tempfile::tempdir;
102 |
103 | #[tokio::test]
104 | async fn test_fdm() {
105 | let fdm = Arc::new(FileDescriptorManager::new(3));
106 | let temp_dir = tempdir().unwrap();
107 | let temp_dir_path = temp_dir.into_path();
108 |
109 | let fs_1 = temp_dir_path.join("1.data");
110 | let fs_2 = temp_dir_path.join("2.data");
111 | let fs_3 = temp_dir_path.join("3.data");
112 | let fs_4 = temp_dir_path.join("4.data");
113 |
114 | assert!(fdm.pop_insert(&fs_1).is_some());
115 | assert!(fdm.pop_insert(&fs_2).is_some());
116 | assert!(fdm.pop_insert(&fs_3).is_some());
117 | assert_eq!(fdm.cache.read().len(), 3);
118 |
119 | // Use fs_2
120 | let fdm_2 = fdm.clone();
121 | let get_val = fdm_2.get(&fs_2.clone()).unwrap();
122 | let handle = std::thread::spawn(move || {
123 | let _file = get_val.file.write();
124 | std::thread::sleep(Duration::from_secs(5));
125 | });
126 | tokio::time::sleep(Duration::from_secs(1)).await;
127 | let get_fdm_2 = fdm.get(&fs_2);
128 | assert!(get_fdm_2.unwrap().file.is_locked_exclusive());
129 |
130 | assert!(fdm.pop_insert(&fs_4).is_some());
131 | let mut bools = [fdm.get(&fs_1).is_some(), fdm.get(&fs_3).is_some()];
132 | bools.sort_by(|a, b| b.cmp(a));
133 | assert_eq!(bools[0], true);
134 | assert_eq!(bools[1], false);
135 | assert!(fdm.get(&fs_4).is_some());
136 | tokio::time::sleep(Duration::from_secs(6)).await;
137 | let get_fdm_2 = fdm.get(&fs_2);
138 | assert!(get_fdm_2.is_some());
139 | assert!(!get_fdm_2.unwrap().file.is_locked_exclusive());
140 | }
141 | }
142 |
--------------------------------------------------------------------------------
/crates/data/src/lib.rs:
--------------------------------------------------------------------------------
1 | extern crate core;
2 |
3 | pub mod commit_log;
4 | pub mod data_handler;
5 | pub mod errors;
6 | pub mod fdm;
7 | pub mod shard;
8 | pub mod temp_offset_types;
9 | pub mod utils;
10 |
11 | pub mod cursor;
12 |
13 | // https://doc.rust-lang.org/std/mem/fn.size_of.html
14 | pub const U64_SIZE: usize = size_of::();
15 | pub const I64_SIZE: usize = size_of::();
16 |
--------------------------------------------------------------------------------
/crates/data/src/shard/insert_item.rs:
--------------------------------------------------------------------------------
1 | use uuid::Uuid;
2 |
3 | pub struct InsertItem<'a> {
4 | pub data: &'a [u8],
5 | pub uuid: Uuid,
6 | }
7 |
8 | impl<'a> InsertItem<'a> {
9 | pub fn new(data: &'a [u8], uuid: Uuid) -> Self {
10 | Self { data, uuid }
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/crates/data/src/shard/mod.rs:
--------------------------------------------------------------------------------
1 | use crate::errors::ShardErrors;
2 | use crate::fdm::FileDescriptorManager;
3 | use crate::shard::insert_item::InsertItem;
4 | use crate::shard::item_type::ShardItem;
5 | use crate::shard::map_shard::MapShard;
6 | use std::path::PathBuf;
7 | use std::sync::Arc;
8 | use uuid::Uuid;
9 |
10 | pub mod insert_item;
11 | pub mod item_type;
12 | pub mod map_shard;
13 | pub mod shards;
14 |
15 | pub trait ShardConfig: Clone {}
16 |
17 | pub enum AvailableSpace {
18 | Fixed(usize),
19 | Unlimited,
20 | }
21 |
22 | pub trait Shard {
23 | fn new(path: PathBuf, opts: Opts, uuid: Option, fdm: Arc) -> Self;
24 |
25 | fn has_space(&self) -> bool;
26 |
27 | /// Determines the breaking point for this shard within the sharding mechanism.
28 | ///
29 | /// The "breaking point" is a critical threshold in the context of sharding,
30 | /// where the current shard has accumulated enough items to warrant splitting
31 | /// into multiple smaller shards. This method helps identify when such a
32 | /// condition is met and whether the shard is due for splitting.
33 | ///
34 | /// # Returns
35 | ///
36 | /// - `Some(u64)` - If the shard has a known finite breaking point, this method
37 | /// returns `Some` containing the `u64` value that represents the threshold
38 | /// number of items. When this number of items is reached or exceeded, the
39 | /// shard is allowed to split into smaller pieces.
40 | /// - `None` - If the breaking point is considered infinite (i.e., there is no
41 | /// practical limit to the number of items the shard can contain without
42 | /// needing to split), the method returns `None`. This typically indicates
43 | /// that the shard does not have a predefined threshold or the threshold is
44 | /// so large that it is effectively infinite.
45 | ///
46 | /// # Context
47 | ///
48 | /// This method is part of the `Shard` trait, which is used in systems that
49 | /// employ sharding as a strategy to manage large datasets by dividing them
50 | /// into smaller, more manageable pieces. The `breaking_point` method plays a
51 | /// crucial role in determining when a shard should be split to maintain
52 | /// performance, balance load, or adhere to system constraints.
53 | ///
54 | /// # Examples
55 | ///
56 | /// ```
57 | /// # trait Shard {
58 | /// # fn breaking_point(&self) -> Option;
59 | /// # }
60 | /// # struct MyShard {
61 | /// # breaking_point_value: Option,
62 | /// # }
63 | /// # impl Shard for MyShard {
64 | /// # fn breaking_point(&self) -> Option {
65 | /// # self.breaking_point_value
66 | /// # }
67 | /// # }
68 | /// let shard = MyShard { breaking_point_value: Some(1000) };
69 | ///
70 | /// // This shard is allowed to split when it reaches 1000 items.
71 | /// assert_eq!(shard.breaking_point(), Some(1000));
72 | ///
73 | /// let infinite_shard = MyShard { breaking_point_value: None };
74 | ///
75 | /// // This shard has no practical limit and won't split.
76 | /// assert_eq!(infinite_shard.breaking_point(), None);
77 | /// ```
78 | ///
79 | /// # Note
80 | ///
81 | /// The breaking point is a pivotal aspect of the sharding strategy. Understanding
82 | /// when and why a shard should split is essential for maintaining the efficiency
83 | /// and scalability of the system. The exact threshold value and its implications
84 | /// should be well-defined and aligned with the overall design and requirements
85 | /// of the system.
86 | fn breaking_point(&self) -> Option;
87 |
88 | fn get_path(&self) -> PathBuf;
89 |
90 | fn get_last_index(&self) -> i64;
91 |
92 | fn read_item_from_index(&self, index: usize) -> Result;
93 |
94 | fn available_space(&self) -> AvailableSpace;
95 |
96 | fn insert_item(&self, data: &[InsertItem]) -> Result;
97 |
98 | fn update_items(
99 | &self,
100 | data: Vec<(u64, &[u8])>,
101 | map_shard: &mut MapShard,
102 | ) -> Result<(), ShardErrors>
103 | where
104 | Self: Sized;
105 |
106 | fn remove_items(&self, offsets: &[u64]) -> Result<(), ShardErrors>;
107 |
108 | fn get_id(&self) -> String;
109 | }
110 |
111 | pub trait TempShardConfig: Clone {
112 | fn to_config(&self) -> Opts;
113 | }
114 |
--------------------------------------------------------------------------------
/crates/data/src/shard/shards/data_shard/config.rs:
--------------------------------------------------------------------------------
1 | use crate::shard::{ShardConfig, TempShardConfig};
2 | use crate::temp_offset_types::TempOffsetTypes;
3 |
4 | #[derive(Clone, Debug)]
5 | pub struct DataShardConfig {
6 | pub max_offsets: Option,
7 | }
8 |
9 | impl ShardConfig for DataShardConfig {}
10 |
11 | #[derive(Debug, Clone)]
12 | pub struct TempDataShardConfig {
13 | pub max_offsets: TempOffsetTypes,
14 | }
15 |
16 | impl TempShardConfig for TempDataShardConfig {
17 | fn to_config(&self) -> DataShardConfig {
18 | DataShardConfig {
19 | max_offsets: self.max_offsets.get_real_offset(),
20 | }
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/crates/data/src/shard/shards/data_shard/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod config;
2 | pub mod shard;
3 | pub mod shard_header;
4 |
--------------------------------------------------------------------------------
/crates/data/src/shard/shards/kv/config.rs:
--------------------------------------------------------------------------------
1 | use crate::shard::ShardConfig;
2 |
3 | #[derive(Debug, Clone)]
4 | pub struct KvShardConfig {
5 | pub value_size: usize,
6 | pub max_capacity: Option,
7 | }
8 |
9 | impl ShardConfig for KvShardConfig {}
10 |
--------------------------------------------------------------------------------
/crates/data/src/shard/shards/kv/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod config;
2 | pub mod shard;
3 | mod shard_header;
4 | mod util;
5 |
--------------------------------------------------------------------------------
/crates/data/src/shard/shards/kv/shard_header.rs:
--------------------------------------------------------------------------------
1 | use crate::data_handler::DataHandler;
2 | use crate::shard::shards::UUID_BYTE_LEN;
3 | use crate::utils::fs::write_at;
4 | use crate::U64_SIZE;
5 | use parking_lot::RwLock;
6 | use std::fs::File;
7 | use std::io::{Seek, SeekFrom, Write};
8 | use std::sync::Arc;
9 | use uuid::Uuid;
10 |
11 | #[derive(Debug)]
12 | pub struct KvShardHeader {
13 | pub max_capacity: Option,
14 | pub items_len: u64,
15 | pub value_size: u64,
16 | pub id: Uuid,
17 | data: Arc>,
18 | }
19 |
20 | impl KvShardHeader {
21 | pub fn new(
22 | items_len: u64,
23 | max_capacity: Option,
24 | value_size: u64,
25 | uuid: Option,
26 | data: Arc>,
27 | ) -> Self {
28 | Self {
29 | items_len,
30 | max_capacity,
31 | data,
32 | id: uuid.unwrap_or_else(|| Uuid::new_v4()),
33 | value_size,
34 | }
35 | }
36 |
37 | pub fn new_from_file(
38 | file: Arc>,
39 | uuid: Option,
40 | items_len: Option,
41 | max_capacity: Option,
42 | value_size: u64,
43 | ) -> Self {
44 | let mut header = KvShardHeader::new(
45 | items_len.unwrap_or(0),
46 | max_capacity,
47 | value_size,
48 | uuid,
49 | file.clone(),
50 | );
51 |
52 | let file_len = file.read().len();
53 |
54 | if file_len == 0 {
55 | header.initialize_empty_file();
56 | } else {
57 | header.read_header();
58 | }
59 |
60 | header
61 | }
62 |
63 | pub fn header_size() -> usize {
64 | let max_capacity_size = U64_SIZE;
65 | let items_len_size = U64_SIZE;
66 | let value_size = U64_SIZE;
67 | let id_len = UUID_BYTE_LEN as usize;
68 | max_capacity_size + items_len_size + value_size + id_len
69 | }
70 |
71 | fn initialize_empty_file(&mut self) {
72 | self.data
73 | .write()
74 | .operate(|file| {
75 | file.seek(SeekFrom::Start(0))
76 | .expect("Failed to seek to start of file");
77 |
78 | // Create a buffer for the header
79 | let mut buffer = Vec::with_capacity(Self::header_size());
80 |
81 | {
82 | // Write max_offsets to the buffer
83 | let max_capacity_bytes = (self.max_capacity).unwrap_or(0).to_le_bytes();
84 | buffer.extend_from_slice(&max_capacity_bytes);
85 | }
86 |
87 | {
88 | // Write max_offsets to the buffer
89 | let items_len_bytes = (self.items_len).to_le_bytes();
90 | buffer.extend_from_slice(&items_len_bytes);
91 | }
92 |
93 | {
94 | // Write value_size to the buffer
95 | let value_size_bytes = (self.value_size).to_le_bytes();
96 | buffer.extend_from_slice(&value_size_bytes);
97 | }
98 |
99 | {
100 | // Write shard id
101 | let id_bytes = self.id.to_bytes_le();
102 | buffer.extend_from_slice(&id_bytes);
103 | }
104 |
105 | // Write the buffer to the file
106 | file.write_all(&buffer)
107 | .expect("Failed to write Index header");
108 |
109 | Ok(())
110 | })
111 | .unwrap();
112 | }
113 |
114 | fn read_header(&mut self) {
115 | let reader = self.data.read();
116 | {
117 | let max_capacity_bytes = reader.get_bytes(0, U64_SIZE).unwrap();
118 | let max_capacity_bytes: [u8; 8] = max_capacity_bytes.try_into().unwrap();
119 | self.max_capacity = Some(u64::from_le_bytes(max_capacity_bytes));
120 | }
121 |
122 | {
123 | let items_len_bytes = reader.read_pointer(U64_SIZE as u64, U64_SIZE).unwrap();
124 | let items_len_bytes: [u8; 8] = items_len_bytes.try_into().unwrap();
125 | self.items_len = u64::from_le_bytes(items_len_bytes);
126 | }
127 |
128 | {
129 | let value_size_bytes = reader
130 | .read_pointer(U64_SIZE as u64 + U64_SIZE as u64, U64_SIZE)
131 | .unwrap();
132 | let value_size_bytes: [u8; 8] = value_size_bytes.try_into().unwrap();
133 | self.value_size = u64::from_le_bytes(value_size_bytes);
134 | }
135 |
136 | {
137 | let id_bytes = reader
138 | .read_pointer(
139 | (U64_SIZE + U64_SIZE + U64_SIZE) as u64,
140 | UUID_BYTE_LEN as usize,
141 | )
142 | .unwrap();
143 | let id_bytes = id_bytes.try_into().unwrap();
144 | self.id = Uuid::from_bytes_le(id_bytes);
145 | }
146 | }
147 |
148 | pub fn increment_len(&mut self, len: Option, file: &mut File) -> u64 {
149 | self.items_len += len.unwrap_or(1);
150 | write_at(file, &self.items_len.to_le_bytes(), U64_SIZE as u64).unwrap();
151 |
152 | self.items_len
153 | }
154 | }
155 |
--------------------------------------------------------------------------------
/crates/data/src/shard/shards/kv/util.rs:
--------------------------------------------------------------------------------
1 | use crate::shard::shards::kv::shard_header::KvShardHeader;
2 |
3 | pub fn get_element_offset(index: usize, value_size: usize) -> usize {
4 | let index_header_size = KvShardHeader::header_size();
5 | index_header_size + (index * value_size)
6 | }
7 |
--------------------------------------------------------------------------------
/crates/data/src/shard/shards/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod data_shard;
2 | pub mod kv;
3 |
4 | pub const UUID_BYTE_LEN: u64 = 16;
5 |
--------------------------------------------------------------------------------
/crates/data/src/temp_offset_types.rs:
--------------------------------------------------------------------------------
1 | use enum_as_inner::EnumAsInner;
2 | use serde::{Deserialize, Serialize};
3 |
4 | #[derive(Debug, Clone, EnumAsInner, Serialize, Deserialize)]
5 | pub enum TempOffsetTypes {
6 | WALBased,
7 | Custom(Option),
8 | }
9 |
10 | impl TempOffsetTypes {
11 | pub fn get_real_offset(&self) -> Option {
12 | match self {
13 | TempOffsetTypes::WALBased => Some(1),
14 | TempOffsetTypes::Custom(val) => *val,
15 | }
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/crates/data/src/utils/fs.rs:
--------------------------------------------------------------------------------
1 | use std::fs::File;
2 | use std::io;
3 | use std::io::{Seek, SeekFrom, Write};
4 | #[cfg(target_family = "unix")]
5 | use std::os::unix::fs::FileExt;
6 | use std::path::{Path, PathBuf};
7 |
8 | pub fn list_files_with_prefix + Clone>(
9 | directory: P,
10 | prefix: &str,
11 | ) -> io::Result> {
12 | Ok(std::fs::read_dir(directory)?
13 | .filter_map(Result::ok)
14 | .filter_map(|entry| {
15 | let path = entry.path();
16 | let file_name = path.file_name()?.to_str()?;
17 | if file_name.starts_with(prefix) {
18 | Some(path)
19 | } else {
20 | None
21 | }
22 | })
23 | .collect::>())
24 | }
25 |
26 | pub fn write_at(file: &mut File, buf: &[u8], offset: u64) -> io::Result {
27 | #[cfg(target_family = "unix")]
28 | {
29 | file.write_at(buf, offset)
30 | }
31 |
32 | #[cfg(target_family = "windows")]
33 | {
34 | let _ = file.seek(SeekFrom::Start(offset))?;
35 | file.write_all(buf).map(|e| buf.len())
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/crates/data/src/utils/hash.rs:
--------------------------------------------------------------------------------
1 | use sha2::{Digest, Sha256};
2 |
3 | pub fn to_sha256(vec: Vec) -> [u8; 32] {
4 | let mut hasher = Sha256::new();
5 | hasher.update(vec);
6 |
7 | // Finalize the hash and return it as a 32-byte array
8 | let result = hasher.finalize();
9 | let mut hash = [0u8; 32];
10 | hash.copy_from_slice(&result);
11 | hash
12 | }
13 |
14 | pub fn sha256_to_string(bytes: Vec) -> String {
15 | bytes
16 | .iter()
17 | .map(|byte| format!("{:02x}", byte))
18 | .collect::()
19 | }
20 |
--------------------------------------------------------------------------------
/crates/data/src/utils/mod.rs:
--------------------------------------------------------------------------------
1 | use std::marker::PhantomData;
2 |
3 | pub mod fs;
4 | pub mod hash;
5 |
6 | pub(crate) fn flatten_with_callback<'a, F>(input: &'a [&'a [u8]], process_cb: Option) -> Vec
7 | where
8 | F: Fn(&'a [u8]) -> Vec,
9 | {
10 | let mut buffer = Vec::new();
11 | let _marker: PhantomData = PhantomData; // Helps compiler with `None` type inference
12 |
13 | for &slice in input {
14 | match &process_cb {
15 | Some(cb) => {
16 | let processed_slice = cb(slice);
17 | buffer.extend_from_slice(&processed_slice); // Borrow Vec output as &[u8]
18 | }
19 | None => {
20 | buffer.extend_from_slice(slice);
21 | }
22 | }
23 | }
24 |
25 | buffer
26 | }
27 |
28 | pub(crate) fn flatten(input: Vec<&[u8]>) -> Vec {
29 | let total_len = input.iter().map(|slice| slice.len()).sum();
30 | let mut buffer = Vec::with_capacity(total_len);
31 |
32 | for slice in input {
33 | buffer.extend_from_slice(slice);
34 | }
35 |
36 | buffer
37 | }
38 |
39 | pub fn is_zero_aligned(buf: &[u8]) -> bool {
40 | let (prefix, aligned, suffix) = unsafe { buf.align_to::() };
41 |
42 | prefix.iter().all(|&x| x == 0)
43 | && suffix.iter().all(|&x| x == 0)
44 | && aligned.iter().all(|&x| x == 0)
45 | }
46 |
--------------------------------------------------------------------------------
/crates/data/test_cases/.gitignore:
--------------------------------------------------------------------------------
1 | commit-log-*.bin
2 | reconcile*.bin
3 | fake-db-folder
4 | reconcile-finish.data
5 | reconcile.data
--------------------------------------------------------------------------------
/crates/data/test_cases/fake-db-folder/fake-empty-table/data_c222a11d-c80f-4d6e-8c8a-7b83f79f9ef2_0.data:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Schema-JS/schema-js/8791c63c62a7eba23e788d6e725bccdcd9bf8c68/crates/data/test_cases/fake-db-folder/fake-empty-table/data_c222a11d-c80f-4d6e-8c8a-7b83f79f9ef2_0.data
--------------------------------------------------------------------------------
/crates/data/test_cases/fake-db-folder/fake-partial-folder/data_38af2223-d339-4f45-994e-eef41a69fcaa_2.data:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Schema-JS/schema-js/8791c63c62a7eba23e788d6e725bccdcd9bf8c68/crates/data/test_cases/fake-db-folder/fake-partial-folder/data_38af2223-d339-4f45-994e-eef41a69fcaa_2.data
--------------------------------------------------------------------------------
/crates/data/test_cases/fake-db-folder/fake-partial-folder/data_9e9d8ad5-6f76-4720-85de-4ca2497a0231_0.data:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Schema-JS/schema-js/8791c63c62a7eba23e788d6e725bccdcd9bf8c68/crates/data/test_cases/fake-db-folder/fake-partial-folder/data_9e9d8ad5-6f76-4720-85de-4ca2497a0231_0.data
--------------------------------------------------------------------------------
/crates/data/test_cases/fake-db-folder/fake-partial-folder/data_c4cebac4-037c-4af7-9dc3-87829d5f0217_1.data:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Schema-JS/schema-js/8791c63c62a7eba23e788d6e725bccdcd9bf8c68/crates/data/test_cases/fake-db-folder/fake-partial-folder/data_c4cebac4-037c-4af7-9dc3-87829d5f0217_1.data
--------------------------------------------------------------------------------
/crates/dirs/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "schemajs_dirs"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | authors.workspace = true
7 | resolver = "2"
8 |
9 | [dependencies]
10 | dirs.workspace = true
11 |
--------------------------------------------------------------------------------
/crates/dirs/src/lib.rs:
--------------------------------------------------------------------------------
1 | use std::path::PathBuf;
2 |
3 | pub const BASE_SCHEME_JS_FOLDER: &str = ".schema-js";
4 |
5 | pub fn get_base_path(base_path: Option) -> PathBuf {
6 | base_path.unwrap_or_else(|| dirs::data_dir().unwrap().join(BASE_SCHEME_JS_FOLDER))
7 | }
8 | pub fn create_scheme_js_folder(base_path: Option) {
9 | let paths = [
10 | get_base_path(base_path.clone()),
11 | get_base_path(base_path).join("dbs"),
12 | ]
13 | .into_iter();
14 |
15 | for path in paths {
16 | if !path.exists() {
17 | let _ = std::fs::create_dir(&path);
18 | }
19 | }
20 | }
21 |
22 | pub fn create_scheme_js_db(base_path: Option, db_name: &str) -> PathBuf {
23 | let path = get_base_path(base_path).join("dbs").join(db_name);
24 |
25 | if !path.exists() {
26 | let _ = std::fs::create_dir(path.clone());
27 | }
28 |
29 | path
30 | }
31 |
32 | pub fn create_schema_js_table(
33 | base_path: Option,
34 | db_name: &str,
35 | table_name: &str,
36 | ) -> PathBuf {
37 | let path = get_base_path(base_path)
38 | .join("dbs")
39 | .join(db_name)
40 | .join(table_name);
41 |
42 | if !path.exists() {
43 | let _ = std::fs::create_dir(path.clone());
44 | }
45 |
46 | path
47 | }
48 |
49 | pub fn create_indx_folder(base_path: Option, db_name: &str, table_name: &str) -> PathBuf {
50 | let path = get_base_path(base_path)
51 | .join("dbs")
52 | .join(db_name)
53 | .join(table_name)
54 | .join("indxs");
55 |
56 | if !path.exists() {
57 | let _ = std::fs::create_dir(path.clone());
58 | }
59 |
60 | path
61 | }
62 |
--------------------------------------------------------------------------------
/crates/engine/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "schemajs_engine"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | authors.workspace = true
7 | resolver = "2"
8 |
9 | [dependencies]
10 | deno_core.workspace = true
11 | schemajs_primitives = { version = "0.1.0", path = "../primitives" }
12 | serde.workspace = true
13 | anyhow.workspace = true
14 | chashmap.workspace = true
15 | schemajs_data = { version = "0.1.0", path = "../data" }
16 | schemajs_dirs = { version = "0.1.0", path = "../dirs" }
17 | tokio.workspace = true
18 | uuid.workspace = true
19 | serde_json.workspace = true
20 | borsh.workspace = true
21 | enum-as-inner.workspace = true
22 | thiserror.workspace = true
23 | walkdir.workspace = true
24 | schemajs_query = { version = "0.1.0", path = "../query" }
25 | schemajs_config = { version = "0.1.0", path = "../config" }
26 | schemajs_helpers = { version = "0.1.0", path = "../helpers" }
27 | parking_lot.workspace = true
28 |
29 | [dev-dependencies]
30 | flaky_test.workspace = true
--------------------------------------------------------------------------------
/crates/engine/src/engine_db.rs:
--------------------------------------------------------------------------------
1 | use schemajs_config::DatabaseConfig;
2 | use schemajs_data::fdm::FileDescriptorManager;
3 | use schemajs_dirs::create_scheme_js_db;
4 | use schemajs_helpers::helper::HelperCall;
5 | use schemajs_primitives::table::Table;
6 | use schemajs_query::managers::single::SingleQueryManager;
7 | use schemajs_query::row_json::RowJson;
8 | use std::path::PathBuf;
9 | use std::sync::Arc;
10 | use tokio::sync::mpsc::error::SendError;
11 | use tokio::sync::mpsc::Sender;
12 |
13 | #[derive(Debug)]
14 | pub struct EngineDb {
15 | pub db_folder: PathBuf,
16 | pub query_manager: Arc>,
17 | pub name: String,
18 | pub db_config: Arc,
19 | helper_tx: Sender,
20 | }
21 |
22 | impl EngineDb {
23 | pub fn new(
24 | base_path: Option,
25 | name: &str,
26 | helper_tx: Sender,
27 | db_config: DatabaseConfig,
28 | file_descriptor_manager: Arc,
29 | ) -> Self {
30 | let db_folder = create_scheme_js_db(base_path.clone(), name);
31 | let db_config = Arc::new(db_config);
32 |
33 | let mut query_manager = SingleQueryManager::new(
34 | name.to_string(),
35 | helper_tx.clone(),
36 | db_config.clone(),
37 | file_descriptor_manager.clone(),
38 | );
39 | query_manager.data_path = base_path.clone();
40 |
41 | EngineDb {
42 | name: name.to_string(),
43 | db_folder,
44 | db_config,
45 | query_manager: Arc::new(query_manager),
46 | helper_tx,
47 | }
48 | }
49 |
50 | pub async fn call_helper(&self, call: HelperCall) -> Result<(), SendError> {
51 | self.helper_tx.send(call).await
52 | }
53 |
54 | pub fn add_table(&self, table: Table) {
55 | self.query_manager.register_table(table);
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/crates/engine/src/js/context.ts:
--------------------------------------------------------------------------------
1 | import { addImmutableGlobal } from "ext:sjs_core/src/js/fieldUtils.ts";
2 | interface Params {
3 | dbName: string,
4 | tblName: string,
5 | }
6 | export const initializeDbContext = (params: Params) => {
7 | addImmutableGlobal("SJS_CONTEXT", {
8 | ...(globalThis.SJS_CONTEXT || {}),
9 | ...params
10 | });
11 | }
--------------------------------------------------------------------------------
/crates/engine/src/js/ops.ts:
--------------------------------------------------------------------------------
1 | const core = globalThis.Deno.core;
2 | export const insertRow = async (dbName: string, tableName: string, data: any) => {
3 | return await core.ops.op_engine_insert_row(
4 | dbName,
5 | tableName,
6 | data
7 | );
8 | }
9 |
10 | export const searchRows = async (dbName: string, tableName: string, data: any) => {
11 | return await core.ops.op_engine_search_rows(dbName, tableName, data);
12 | }
--------------------------------------------------------------------------------
/crates/engine/src/js/query.ts:
--------------------------------------------------------------------------------
1 | type DataValue = {
2 | String: string,
3 | } | { Uuid: Uuid } | "Null" | {
4 | Boolean: boolean,
5 | } | {
6 | Number: number
7 | }
8 |
9 | interface QueryVal {
10 | key: string;
11 | filter_type: string;
12 | value: DataValue;
13 | }
14 |
15 | interface Condition {
16 | Condition: QueryVal;
17 | }
18 |
19 | interface And {
20 | And: QueryOps[];
21 | }
22 |
23 | interface Or {
24 | Or: QueryOps[];
25 | }
26 |
27 | type QueryOps = Condition | And | Or;
28 |
29 | export class Uuid {
30 | private value: string;
31 | constructor(value: string) {
32 | this.value = value;
33 | }
34 | }
35 |
36 | const parseType = (val: any): DataValue => {
37 | if(typeof val === 'string') {
38 | return {
39 | String: val
40 | }
41 | } else if(typeof val === 'number') {
42 | return {
43 | Number: val
44 | }
45 | } else if (typeof val === 'boolean') {
46 | return {
47 | Boolean: val
48 | }
49 | } else if(typeof val === 'object' && val instanceof Uuid) {
50 | return {
51 | Uuid: val
52 | }
53 | } else if(val === null) {
54 | return "Null"
55 | } else {
56 | throw new Error("Invalid Data Type")
57 | }
58 | }
59 |
60 | export class QueryBuilder {
61 | private query: QueryOps[] = [];
62 | public readonly dbName: string
63 | public readonly tableName: string;
64 |
65 | constructor(dbName?: string, tableName?: string) {
66 | let ctx = globalThis.SJS_CONTEXT;
67 |
68 | this.dbName = dbName || ctx?.dbName;
69 | this.tableName = tableName || ctx?.tblName;
70 | }
71 |
72 | // Static methods for convenience
73 | static where(dbName: string, tableName: string, key: string, filter_type: string, value: any) {
74 | const builder = new QueryBuilder(dbName, tableName);
75 | return builder.where(key, filter_type, value);
76 | }
77 |
78 | static and(dbName: string, tableName: string, callback: (builder: QueryBuilder) => void) {
79 | const builder = new QueryBuilder(dbName, tableName);
80 | return builder.and(callback);
81 | }
82 |
83 | static or(dbName: string, tableName: string, callback: (builder: QueryBuilder) => void) {
84 | const builder = new QueryBuilder(dbName, tableName);
85 | return builder.or(callback);
86 | }
87 |
88 | // Method to add a basic condition
89 | where(key: string, filter_type: string, value: any) {
90 | this.query.push({
91 | Condition: {
92 | key,
93 | filter_type,
94 | value: parseType(value)
95 | }
96 | });
97 | return this;
98 | }
99 |
100 | // Method to add an AND condition
101 | and(callback: (builder: QueryBuilder) => void) {
102 | const builder = new QueryBuilder(this.dbName, this.tableName);
103 | callback(builder);
104 | this.query.push({
105 | And: builder.build(false)
106 | });
107 | return this;
108 | }
109 |
110 | // Method to add an OR condition
111 | or(callback: (builder: QueryBuilder) => void) {
112 | const builder = new QueryBuilder(this.dbName, this.tableName);
113 | callback(builder);
114 | this.query.push({
115 | Or: builder.build(false)
116 | });
117 | return this;
118 | }
119 |
120 | // Build the final query structure
121 | build(notFinal?: boolean) {
122 | const query = notFinal === false ? this.query : this.query[0];
123 | return query;
124 | }
125 | }
--------------------------------------------------------------------------------
/crates/engine/src/lib.rs:
--------------------------------------------------------------------------------
1 | use crate::ops::insert::op_engine_insert_row;
2 | use crate::ops::query::op_engine_search_rows;
3 | use deno_core::error::AnyError;
4 | use deno_core::{op2, OpState};
5 |
6 | pub mod engine;
7 | pub mod engine_db;
8 | mod ops;
9 | mod query_error;
10 | pub mod utils;
11 | pub mod validation_error;
12 |
13 | #[op2(fast)]
14 | pub fn sjs_op_print(state: &mut OpState, #[string] msg: &str) -> Result<(), AnyError> {
15 | println!("{}", msg);
16 |
17 | Ok(())
18 | }
19 |
20 | deno_core::extension!(
21 | sjs_engine,
22 | ops = [op_engine_insert_row, op_engine_search_rows, sjs_op_print],
23 | esm = ["src/js/ops.ts", "src/js/context.ts", "src/js/query.ts",]
24 | );
25 |
--------------------------------------------------------------------------------
/crates/engine/src/ops/insert.rs:
--------------------------------------------------------------------------------
1 | use crate::engine::SchemeJsEngine;
2 | use deno_core::{op2, serde_json, OpState};
3 | use parking_lot::RwLock;
4 | use schemajs_query::errors::QueryError;
5 | use schemajs_query::managers::query_result::QueryResult;
6 | use schemajs_query::row::Row;
7 | use schemajs_query::row_json::{RowData, RowJson};
8 | use std::cell::RefCell;
9 | use std::rc::Rc;
10 | use std::sync::Arc;
11 | use uuid::Uuid;
12 |
13 | #[op2(async)]
14 | #[serde]
15 | pub async fn op_engine_insert_row(
16 | state: Rc>,
17 | #[string] db_name: String,
18 | #[string] table_name: String,
19 | #[serde] mut row: serde_json::Value,
20 | ) -> Result {
21 | let mut mut_state = state.borrow_mut();
22 | let state = mut_state
23 | .borrow_mut::>>()
24 | .clone();
25 |
26 | let query_manager = {
27 | let read_engine = state.read();
28 | let db = read_engine.find_by_name_ref(db_name.as_str()).unwrap();
29 | db.query_manager.clone()
30 | };
31 |
32 | let table = query_manager.get_table(&table_name);
33 | if let Some(table) = table {
34 | return query_manager.insert(
35 | RowJson::from_json(row, table, 0).map_err(|_| QueryError::InvalidSerialization)?,
36 | );
37 | }
38 |
39 | return Err(QueryError::InvalidInsertion);
40 | }
41 |
--------------------------------------------------------------------------------
/crates/engine/src/ops/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod insert;
2 | pub mod query;
3 |
--------------------------------------------------------------------------------
/crates/engine/src/ops/query.rs:
--------------------------------------------------------------------------------
1 | use crate::engine::SchemeJsEngine;
2 | use deno_core::{op2, OpState};
3 | use parking_lot::RwLock;
4 | use schemajs_query::errors::QueryError;
5 | use schemajs_query::ops::query_ops::QueryOps;
6 | use schemajs_query::row::Row;
7 | use serde_json::Value;
8 | use std::cell::RefCell;
9 | use std::rc::Rc;
10 | use std::sync::Arc;
11 |
12 | #[op2(async)]
13 | #[serde]
14 | pub async fn op_engine_search_rows(
15 | state: Rc>,
16 | #[string] db_name: String,
17 | #[string] table_name: String,
18 | #[serde] args: QueryOps,
19 | ) -> Result, QueryError> {
20 | let mut mut_state = state.borrow_mut();
21 | let state = mut_state
22 | .borrow_mut::>>()
23 | .clone();
24 |
25 | let query_manager = {
26 | let read_engine = state.read();
27 | let db = read_engine.find_by_name_ref(db_name.as_str()).unwrap();
28 | db.query_manager.clone()
29 | };
30 |
31 | let table = query_manager.get_table(&table_name);
32 | if let Some(_) = table {
33 | let s = query_manager
34 | .search_manager
35 | .search(&table_name, &args)
36 | .map_err(|_| QueryError::InvalidQuerySearch(table_name.clone()))?;
37 | let vals: Vec = s.iter().filter_map(|row| row.to_json().ok()).collect();
38 | return Ok(vals);
39 | }
40 |
41 | Err(QueryError::InvalidQuerySearch(table_name))
42 | }
43 |
--------------------------------------------------------------------------------
/crates/engine/src/query_error.rs:
--------------------------------------------------------------------------------
1 | use crate::validation_error::ValidationError;
2 | use enum_as_inner::EnumAsInner;
3 | use schemajs_query::RowSerializationError;
4 | use serde::{Deserialize, Serialize};
5 | use thiserror::Error;
6 |
7 | #[derive(Debug, EnumAsInner, Error, Serialize, Deserialize)]
8 | pub enum InsertionError {
9 | #[error("Invalid Row Values")]
10 | ValidationError(#[from] ValidationError),
11 | #[error("Row could not be serialized")]
12 | SerializationError(#[from] RowSerializationError),
13 | #[error("Insertion Error '{0}'")]
14 | Generic(String),
15 | }
16 |
17 | #[derive(Debug, EnumAsInner, Error, Serialize, Deserialize)]
18 | pub enum QueryError {
19 | #[error("Insertion error: {0}")]
20 | InvalidInsertion(#[from] InsertionError),
21 | }
22 |
--------------------------------------------------------------------------------
/crates/engine/src/utils/fs.rs:
--------------------------------------------------------------------------------
1 | use walkdir::DirEntry;
2 |
3 | pub fn is_js_or_ts(entry: &DirEntry) -> bool {
4 | entry
5 | .path()
6 | .extension()
7 | .map_or(false, |ext| ext == "js" || ext == "ts")
8 | }
9 |
--------------------------------------------------------------------------------
/crates/engine/src/utils/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod fs;
2 |
--------------------------------------------------------------------------------
/crates/engine/src/validation_error.rs:
--------------------------------------------------------------------------------
1 | use enum_as_inner::EnumAsInner;
2 | use serde::{Deserialize, Serialize};
3 | use thiserror::Error;
4 |
5 | #[derive(Debug, EnumAsInner, Error, Serialize, Deserialize)]
6 | pub enum ValidationError {
7 | #[error("Expected a string for column '{0}'")]
8 | ExpectedString(String),
9 | #[error("Expected a boolean for column '{0}'")]
10 | ExpectedBoolean(String),
11 | #[error("Expected an integer for column '{0}'")]
12 | ExpectedInteger(String),
13 | #[error("Expected a float for column '{0}'")]
14 | ExpectedFloat(String),
15 | #[error("Missing column '{0}'")]
16 | MissingColumn(String),
17 | }
18 |
--------------------------------------------------------------------------------
/crates/grpc/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "schemajs_grpc"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | authors.workspace = true
7 | resolver = "2"
8 |
9 | [dependencies]
10 | tonic.workspace = true
11 | prost.workspace = true
12 | tokio.workspace = true
13 | tonic-async-interceptor.workspace = true
14 | schemajs_internal = { path = "../internal" }
15 | schemajs_query = { path = "../query" }
16 | schemajs_engine = { path = "../engine" }
17 | schemajs_primitives = { path = "../primitives" }
18 | tonic-middleware.workspace = true
19 | serde.workspace = true
20 | uuid = { version = "1.10.0", features = ["v4"] }
21 | serde_json.workspace = true
22 | prost-types.workspace = true
23 | schemajs_helpers = { version = "0.1.0", path = "../helpers" }
24 |
25 | [build-dependencies]
26 | tonic-build = "0.12.2"
--------------------------------------------------------------------------------
/crates/grpc/build.rs:
--------------------------------------------------------------------------------
1 | fn main() {
2 | let protos = [
3 | "proto/connection/connection.proto",
4 | "proto/shared/data_value.proto",
5 | "proto/shared/row.proto",
6 | "proto/query/query.proto",
7 | ];
8 |
9 | tonic_build::configure()
10 | .emit_rerun_if_changed(true)
11 | .compile(&protos, &["./proto"])
12 | .unwrap();
13 | }
14 |
--------------------------------------------------------------------------------
/crates/grpc/proto/connection/connection.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 | package sjs.connection;
3 |
4 | service ProtoConnectionService {
5 | rpc CheckConnection(CheckConnectionRequest) returns (CheckConnectionResponse) {}
6 | }
7 |
8 | // Request message for checking the connection
9 | message CheckConnectionRequest {
10 | // The database name or identifier (required)
11 | string database = 1;
12 |
13 | // The username for authentication (required)
14 | string username = 2;
15 |
16 | // The password for authentication (required)
17 | string password = 3;
18 | }
19 |
20 | message CheckConnectionResponse {
21 | bool is_connected = 1;
22 | optional string token = 2;
23 | }
--------------------------------------------------------------------------------
/crates/grpc/proto/query/query.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 | import "shared/data_value.proto";
3 | import "google/protobuf/struct.proto";
4 |
5 | package sjs.query;
6 |
7 | message RowInsert {
8 | string table_name = 1;
9 | optional string id = 2;
10 | map row_values = 3;
11 | }
12 |
13 | // Define the request message that contains a list of RowInsert.
14 | message InsertRowsRequest {
15 | repeated RowInsert rows = 1;
16 | }
17 |
18 | // Define the response message that can return a success or failure status.
19 | message InsertRowsResponse {
20 | bool success = 1;
21 | string message = 2; // Optional message to provide more info (like an error description).
22 | }
23 |
24 | // Define the gRPC service.
25 | service ProtoRowInsertService {
26 | // RPC for inserting rows into a table.
27 | rpc InsertRows (InsertRowsRequest) returns (InsertRowsResponse);
28 | }
29 |
30 | // Define the QueryVal struct
31 | message QueryVal {
32 | string key = 1;
33 | string filter_type = 2;
34 | sjs.shared.DataValue value = 3;
35 | }
36 |
37 | // Define QueryOps enum as a message with a oneof for the different variants
38 | message QueryOps {
39 | oneof operation {
40 | AndOp and_op = 1;
41 | OrOp or_op = 2;
42 | QueryVal condition = 3;
43 | }
44 | }
45 |
46 | // Message for And operation (repeated QueryOps for nested operations)
47 | message AndOp {
48 | repeated QueryOps ops = 1;
49 | }
50 |
51 | // Message for Or operation (repeated QueryOps for nested operations)
52 | message OrOp {
53 | repeated QueryOps ops = 1;
54 | }
55 |
56 | message QueryDataRequest {
57 | string table_name = 1;
58 | QueryOps query = 2;
59 | }
60 |
61 | message DataMap {
62 | map values = 1;
63 | }
64 |
65 | message QueryResponse {
66 | repeated DataMap values = 1;
67 | }
68 |
69 | service ProtoQueryService {
70 | // RPC for inserting rows into a table.
71 | rpc QueryRows (QueryDataRequest) returns (QueryResponse);
72 | }
73 |
74 | message CustomQueryRequest {
75 | string table_name = 1;
76 | string identifier = 2;
77 | string req = 3;
78 | }
79 |
80 | message CustomQueryResponse {
81 | google.protobuf.Value value = 1;
82 | }
83 |
84 | service ProtoCustomQueryService {
85 | rpc CustomQuery(CustomQueryRequest) returns (CustomQueryResponse);
86 | }
--------------------------------------------------------------------------------
/crates/grpc/proto/shared/data_value.proto:
--------------------------------------------------------------------------------
1 | package sjs.shared;
2 |
3 | message DataValue {
4 | oneof value_type {
5 | bool null_value = 1;
6 | string uuid_value = 2;
7 | string string_value = 3;
8 | bool bool_value = 4;
9 | float number_value = 5;
10 | }
11 | }
--------------------------------------------------------------------------------
/crates/grpc/proto/shared/row.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 | package sjs.shared;
3 | import "shared/data_value.proto";
4 |
5 | message RowId {
6 | message Metadata {
7 | string table = 1;
8 |
9 | // The type of ID.
10 | oneof id_type {
11 | // The auto-allocated ID of the entity.
12 | // Never equal to zero. Values less than zero are discouraged and may not
13 | // be supported in the future.
14 | string sjs_uid = 2;
15 |
16 | // The name of the entity.
17 | // A name matching regex `__.*__` is reserved/read-only.
18 | // A name must not be more than 1500 bytes when UTF-8 encoded.
19 | // Cannot be `""`.
20 | string id = 3;
21 | }
22 | }
23 | }
24 |
25 | message Row {
26 | RowId key = 1;
27 |
28 | map properties = 3;
29 | }
--------------------------------------------------------------------------------
/crates/grpc/src/interceptors/auth_interceptor.rs:
--------------------------------------------------------------------------------
1 | use schemajs_internal::manager::InternalManager;
2 | use std::sync::Arc;
3 | use tonic::body::BoxBody;
4 | use tonic::{async_trait, Request, Status};
5 | use tonic_middleware::RequestInterceptor;
6 |
7 | #[derive(Clone)]
8 | pub struct AuthInterceptor {
9 | pub(crate) engine: Arc,
10 | }
11 |
12 | #[async_trait]
13 | impl RequestInterceptor for AuthInterceptor {
14 | async fn intercept(
15 | &self,
16 | mut req: tonic::codegen::http::Request,
17 | ) -> Result, Status> {
18 | match req.headers().get("x-sjs-auth") {
19 | None => Err(Status::unauthenticated("Unknown Authentication")),
20 | Some(val) => {
21 | let ctx = self
22 | .engine
23 | .auth_manager()
24 | .check_token(val.to_str().unwrap());
25 | if let Ok(user_ctx) = ctx {
26 | req.extensions_mut().insert(user_ctx);
27 | Ok(req)
28 | } else {
29 | Err(Status::unauthenticated("Unknown Authentication"))
30 | }
31 | }
32 | }
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/crates/grpc/src/interceptors/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod auth_interceptor;
2 |
--------------------------------------------------------------------------------
/crates/grpc/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub mod interceptors;
2 | pub mod server;
3 | mod services;
4 | pub mod utils;
5 |
6 | pub type GrpcResponse = Result, tonic::Status>;
7 |
--------------------------------------------------------------------------------
/crates/grpc/src/server.rs:
--------------------------------------------------------------------------------
1 | use crate::interceptors::auth_interceptor::AuthInterceptor;
2 | use crate::services::connection::connection_service::proto_connection_service_server::ProtoConnectionServiceServer;
3 | use crate::services::connection::ConnectionService;
4 | use crate::services::query::custom_query::custom_query_service::proto_custom_query_service_server::ProtoCustomQueryServiceServer;
5 | use crate::services::query::custom_query::CustomQueryService;
6 | use crate::services::query::insert::insert_service::proto_row_insert_service_server::ProtoRowInsertServiceServer;
7 | use crate::services::query::insert::InsertService;
8 | use crate::services::query::query_data::query_service::proto_query_service_server::ProtoQueryServiceServer;
9 | use crate::services::query::query_data::QueryService;
10 | use schemajs_internal::manager::InternalManager;
11 | use std::net::{IpAddr, SocketAddr};
12 | use std::sync::Arc;
13 | use tonic::transport::Server;
14 | use tonic_middleware::InterceptorFor;
15 |
16 | pub struct GrpcServer {
17 | db_manager: Arc,
18 | ip: SocketAddr,
19 | }
20 |
21 | pub struct GrpcServerArgs {
22 | pub db_manager: Arc,
23 | pub ip: Option,
24 | }
25 |
26 | impl GrpcServer {
27 | pub fn new(args: GrpcServerArgs) -> Self {
28 | let default_ip = args.db_manager.get_config().grpc.host.clone();
29 | Self {
30 | db_manager: args.db_manager,
31 | ip: args.ip.unwrap_or_else(|| default_ip).parse().unwrap(),
32 | }
33 | }
34 |
35 | pub async fn start(&self) -> Result<(), Box> {
36 | let curr_db = self.db_manager.clone();
37 |
38 | let connection_service =
39 | ProtoConnectionServiceServer::new(ConnectionService::new(curr_db.clone()));
40 |
41 | let insert_service = ProtoRowInsertServiceServer::new(InsertService::new(curr_db.clone()));
42 |
43 | let query_service = ProtoQueryServiceServer::new(QueryService::new(curr_db.clone()));
44 |
45 | let custom_query_service =
46 | ProtoCustomQueryServiceServer::new(CustomQueryService::new(curr_db.clone()));
47 |
48 | let _ = Server::builder()
49 | .add_service(InterceptorFor::new(
50 | insert_service,
51 | AuthInterceptor {
52 | engine: curr_db.clone(),
53 | },
54 | ))
55 | .add_service(InterceptorFor::new(
56 | query_service,
57 | AuthInterceptor {
58 | engine: curr_db.clone(),
59 | },
60 | ))
61 | .add_service(InterceptorFor::new(
62 | custom_query_service,
63 | AuthInterceptor {
64 | engine: curr_db.clone(),
65 | },
66 | ))
67 | .add_service(connection_service)
68 | .serve(self.ip.clone())
69 | .await?;
70 |
71 | Ok(())
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/crates/grpc/src/services/connection.rs:
--------------------------------------------------------------------------------
1 | pub mod connection_service {
2 | tonic::include_proto!("sjs.connection");
3 | }
4 |
5 | use crate::{define_sjs_grpc_service, GrpcResponse};
6 | use connection_service::{CheckConnectionRequest, CheckConnectionResponse};
7 | use schemajs_internal::auth::types::VerifyUserArgs;
8 | use tonic::Response;
9 |
10 | define_sjs_grpc_service!(ConnectionService);
11 |
12 | #[tonic::async_trait]
13 | impl connection_service::proto_connection_service_server::ProtoConnectionService
14 | for ConnectionService
15 | {
16 | async fn check_connection(
17 | &self,
18 | request: tonic::Request,
19 | ) -> GrpcResponse {
20 | let inner_req = request.into_inner();
21 | let auth_manager = self.db_manager.auth_manager();
22 | let valid_user = auth_manager.authenticate(VerifyUserArgs {
23 | scheme_name: inner_req.database,
24 | identifier: inner_req.username,
25 | password: inner_req.password,
26 | });
27 |
28 | if let Ok(token) = valid_user {
29 | Ok(Response::new(CheckConnectionResponse {
30 | is_connected: true,
31 | token: Some(token.to_string()),
32 | }))
33 | } else {
34 | Ok(Response::new(CheckConnectionResponse {
35 | is_connected: false,
36 | token: None,
37 | }))
38 | }
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/crates/grpc/src/services/macros.rs:
--------------------------------------------------------------------------------
1 | #[macro_export]
2 | macro_rules! define_sjs_grpc_service {
3 | // Pattern to accept service name and additional methods
4 | ($service_name:ident, { $($methods:item)* }) => {
5 | pub struct $service_name {
6 | db_manager: std::sync::Arc,
7 | }
8 |
9 | impl $service_name {
10 | pub fn new(
11 | db_manager: std::sync::Arc,
12 | ) -> Self {
13 | Self { db_manager }
14 | }
15 |
16 | // Insert the custom methods provided by the user
17 | $($methods)*
18 | }
19 | };
20 | // Pattern to accept only service name without additional methods
21 | ($service_name:ident) => {
22 | pub struct $service_name {
23 | db_manager: std::sync::Arc,
24 | }
25 |
26 | impl $service_name {
27 | pub fn new(
28 | db_manager: std::sync::Arc,
29 | ) -> Self {
30 | Self { db_manager }
31 | }
32 | }
33 | };
34 | }
35 |
--------------------------------------------------------------------------------
/crates/grpc/src/services/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod connection;
2 | pub mod macros;
3 | pub mod query;
4 | pub mod shared;
5 |
--------------------------------------------------------------------------------
/crates/grpc/src/services/query/custom_query.rs:
--------------------------------------------------------------------------------
1 | use crate::define_sjs_grpc_service;
2 | use crate::services::query::custom_query::custom_query_service::{
3 | CustomQueryRequest, CustomQueryResponse,
4 | };
5 | use crate::services::shared::shared;
6 | use crate::utils::common::find_database;
7 | use crate::utils::json::{serde_json_to_prost, to_prost_struct};
8 | use prost_types::Any;
9 | use schemajs_helpers::helper::{HelperCall, HelperDbContext};
10 | use schemajs_internal::auth::types::UserContext;
11 | use serde_json::Value;
12 | use std::sync::Arc;
13 | use std::time::Duration;
14 | use tokio::select;
15 | use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
16 | use tonic::{Request, Response, Status};
17 |
18 | pub mod custom_query_service {
19 | tonic::include_proto!("sjs.query");
20 | }
21 |
22 | define_sjs_grpc_service!(CustomQueryService, {
23 | pub async fn execute_custom_query(
24 | &self,
25 | user_context: Arc,
26 | req: CustomQueryRequest,
27 | ) -> Result {
28 | let db = find_database(&self.db_manager, user_context)?;
29 | let (helper_response_tx, mut helper_response_rx) = self.create_response_handlers();
30 | let result = db
31 | .call_helper(HelperCall::CustomQuery {
32 | db_ctx: HelperDbContext {
33 | db: Some(db.name.clone()),
34 | table: Some(req.table_name),
35 | },
36 | identifier: req.identifier,
37 | req: serde_json::from_str(&req.req)
38 | .map_err(|_| Status::internal("Invalid Payload"))?,
39 | response: helper_response_tx,
40 | })
41 | .await;
42 |
43 | if result.is_err() {
44 | return Err(Status::internal("Error executing custom query call"));
45 | }
46 |
47 | let timeout = {
48 | let timeout_duration = Duration::from_secs(db.db_config.custom_query_timeout);
49 | tokio::time::sleep(timeout_duration)
50 | };
51 |
52 | let resp = select! {
53 | msg = helper_response_rx.recv() => {
54 | match msg {
55 | None => Ok(Value::Null),
56 | Some(val) => Ok(val)
57 | }
58 | }
59 | _ = timeout => Err(())
60 | };
61 |
62 | resp.map_err(|_| Status::aborted("Custom query timed out"))
63 | }
64 |
65 | fn create_response_handlers(&self) -> (UnboundedSender, UnboundedReceiver) {
66 | unbounded_channel()
67 | }
68 | });
69 |
70 | #[tonic::async_trait]
71 | impl custom_query_service::proto_custom_query_service_server::ProtoCustomQueryService
72 | for CustomQueryService
73 | {
74 | async fn custom_query(
75 | &self,
76 | request: Request,
77 | ) -> Result, Status> {
78 | let ctx = match request.extensions().get::>() {
79 | Some(ctx) => ctx,
80 | None => return Err(Status::unauthenticated("Invalid session")),
81 | };
82 |
83 | let process_custom_query = self
84 | .execute_custom_query(ctx.clone(), request.into_inner())
85 | .await;
86 | match process_custom_query {
87 | Ok(val) => {
88 | let prost_val = serde_json_to_prost(val).map_err(|_| {
89 | Status::internal("Could not serialize response from custom query")
90 | })?;
91 | Ok(Response::new(CustomQueryResponse {
92 | value: Some(prost_val),
93 | }))
94 | }
95 | Err(s) => Err(s),
96 | }
97 | }
98 | }
99 |
--------------------------------------------------------------------------------
/crates/grpc/src/services/query/insert.rs:
--------------------------------------------------------------------------------
1 | use crate::define_sjs_grpc_service;
2 | use crate::services::query::insert::insert_service::{
3 | InsertRowsRequest, InsertRowsResponse, RowInsert,
4 | };
5 | use crate::services::shared::shared;
6 | use crate::services::shared::shared::data_value::ValueType;
7 | use crate::utils::common::convert_to_data_value;
8 | use schemajs_internal::auth::types::UserContext;
9 | use schemajs_primitives::column::types::DataValue;
10 | use serde::{Deserialize, Serialize};
11 | use std::collections::HashMap;
12 | use std::str::FromStr;
13 | use std::sync::Arc;
14 | use tonic::{Request, Response, Status};
15 | use uuid::Uuid;
16 |
17 | pub mod insert_service {
18 | tonic::include_proto!("sjs.query");
19 | }
20 |
21 | define_sjs_grpc_service!(InsertService, {
22 | pub fn insert_rows_into_db(
23 | &self,
24 | user_context: Arc,
25 | rows: Vec,
26 | ) -> Result {
27 | let engine = self.db_manager.engine();
28 | let db_manager = engine.read();
29 | let user = user_context.get_user();
30 | let db = match db_manager.find_by_name_ref(&user.scheme) {
31 | Some(db) => db,
32 | None => return Err(Status::not_found("Database not found")),
33 | };
34 |
35 | let new_rows: Vec<(String, HashMap)> = rows
36 | .into_iter()
37 | .map(|row| {
38 | let hrow: HashMap = row
39 | .row_values
40 | .into_iter()
41 | .map(|(col_name, col_val)| {
42 | let value = match col_val.value_type {
43 | Some(vt) => convert_to_data_value(vt),
44 | None => DataValue::Null, // Handle the case where value_type is None
45 | };
46 | (col_name, value)
47 | })
48 | .collect();
49 | (row.table_name, hrow)
50 | })
51 | .collect();
52 |
53 | let insert = db.query_manager.insert_from_value_map(new_rows, false);
54 |
55 | Ok(insert.is_ok())
56 | }
57 | });
58 |
59 | #[tonic::async_trait]
60 | impl insert_service::proto_row_insert_service_server::ProtoRowInsertService for InsertService {
61 | async fn insert_rows(
62 | &self,
63 | request: Request,
64 | ) -> Result, Status> {
65 | let ctx = match request.extensions().get::>() {
66 | Some(ctx) => ctx,
67 | None => return Err(Status::unauthenticated("Invalid session")),
68 | };
69 |
70 | let inserted = self.insert_rows_into_db(ctx.clone(), request.into_inner().rows)?;
71 |
72 | if !inserted {
73 | Err(Status::aborted("There was an issue inserting rows"))
74 | } else {
75 | Ok(Response::new(InsertRowsResponse {
76 | success: true,
77 | message: String::from("success"),
78 | }))
79 | }
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/crates/grpc/src/services/query/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod custom_query;
2 | pub mod insert;
3 | pub mod query_data;
4 |
--------------------------------------------------------------------------------
/crates/grpc/src/services/query/query_data.rs:
--------------------------------------------------------------------------------
1 | use crate::define_sjs_grpc_service;
2 | use crate::services::query::query_data::query_service::{
3 | DataMap, QueryDataRequest, QueryOps as GrpcQueryOps, QueryResponse,
4 | };
5 | use crate::services::shared::shared;
6 | use crate::services::shared::shared::data_value::ValueType;
7 | use crate::services::shared::shared::DataValue as GrpcDataValue;
8 | use crate::utils::common::{convert_to_grpc_value, find_database, from_grpc_ops_to_sjs_ops};
9 | use schemajs_internal::auth::types::UserContext;
10 | use schemajs_primitives::column::types::DataValue;
11 | use schemajs_query::row::Row;
12 | use std::collections::HashMap;
13 | use std::sync::Arc;
14 | use tonic::{Request, Response, Status};
15 |
16 | pub mod query_service {
17 | tonic::include_proto!("sjs.query");
18 | }
19 |
20 | define_sjs_grpc_service!(QueryService, {
21 | pub fn query_rows_from_db(
22 | &self,
23 | user_context: Arc,
24 | table_name: String,
25 | operation: Option,
26 | ) -> Result, Status> {
27 | let db = find_database(&self.db_manager, user_context)?;
28 | if let Some(op) = operation {
29 | let query_ops = from_grpc_ops_to_sjs_ops(op);
30 | if let Ok(qops) = query_ops {
31 | let rows = db
32 | .query_manager
33 | .search_manager
34 | .search(&table_name, &qops)
35 | .map_err(|e| Status::internal("Query could not be completed"))?;
36 | // Refactor closure to handle errors
37 | let map_rows: Vec> = rows
38 | .into_iter()
39 | .filter_map(|r| {
40 | match r.to_map() {
41 | Ok(val) => Some(
42 | val.iter()
43 | .map(|(col, val)| {
44 | let grpc_val = convert_to_grpc_value(val);
45 |
46 | let data_val = GrpcDataValue {
47 | value_type: Some(grpc_val),
48 | };
49 |
50 | (col.clone(), data_val)
51 | })
52 | .collect::>(),
53 | ),
54 | Err(_) => {
55 | // Skip this row if it couldn't be deserialized
56 | None
57 | }
58 | }
59 | })
60 | .collect();
61 |
62 | let map_rows = map_rows
63 | .into_iter()
64 | .map(|r| DataMap { values: r })
65 | .collect();
66 |
67 | return Ok(map_rows);
68 | }
69 | }
70 |
71 | Ok(vec![])
72 | }
73 | });
74 |
75 | #[tonic::async_trait]
76 | impl query_service::proto_query_service_server::ProtoQueryService for QueryService {
77 | async fn query_rows(
78 | &self,
79 | request: Request,
80 | ) -> Result, Status> {
81 | let ctx = (match request.extensions().get::>() {
82 | Some(ctx) => ctx,
83 | None => return Err(Status::unauthenticated("Invalid session")),
84 | })
85 | .clone();
86 |
87 | let inner = request.into_inner();
88 |
89 | let rows = self.query_rows_from_db(ctx, inner.table_name, inner.query)?;
90 |
91 | Ok(Response::new(QueryResponse { values: rows }))
92 | }
93 | }
94 |
--------------------------------------------------------------------------------
/crates/grpc/src/services/shared.rs:
--------------------------------------------------------------------------------
1 | pub mod shared {
2 | tonic::include_proto!("sjs.shared");
3 | }
4 |
--------------------------------------------------------------------------------
/crates/grpc/src/utils/common.rs:
--------------------------------------------------------------------------------
1 | use crate::services::query::query_data::query_service::query_ops::Operation;
2 | use crate::services::query::query_data::query_service::{
3 | QueryOps as GrpcQueryOps, QueryVal as GrpcQueryVal,
4 | };
5 | use crate::services::shared::shared::data_value::ValueType;
6 | use schemajs_engine::engine_db::EngineDb;
7 | use schemajs_internal::auth::types::UserContext;
8 | use schemajs_internal::manager::InternalManager;
9 | use schemajs_primitives::column::types::DataValue;
10 | use schemajs_query::ops::query_ops::{QueryOps, QueryVal};
11 | use std::str::FromStr;
12 | use std::sync::Arc;
13 | use tonic::Status;
14 | use uuid::Uuid;
15 |
16 | pub fn convert_to_data_value(val: ValueType) -> DataValue {
17 | match val {
18 | ValueType::NullValue(_) => DataValue::Null,
19 | ValueType::UuidValue(u) => DataValue::Uuid(Uuid::from_str(&u).unwrap_or(Uuid::nil())),
20 | ValueType::StringValue(s) => DataValue::String(s),
21 | ValueType::BoolValue(b) => DataValue::Boolean(b),
22 | ValueType::NumberValue(n) => {
23 | DataValue::Number(serde_json::value::Number::from_f64(n as f64).unwrap())
24 | }
25 | }
26 | }
27 |
28 | pub fn convert_to_grpc_value(val: &DataValue) -> ValueType {
29 | match &val {
30 | &DataValue::Null => ValueType::NullValue(true),
31 | &DataValue::Uuid(u) => ValueType::UuidValue(u.to_string()),
32 | &DataValue::String(s) => ValueType::StringValue(s.clone()),
33 | &DataValue::Boolean(b) => ValueType::BoolValue(b.clone()),
34 | &DataValue::Number(n) => ValueType::NumberValue(n.as_f64().unwrap() as f32),
35 | }
36 | }
37 |
38 | pub fn find_database(
39 | internal_manager: &Arc,
40 | user_context: Arc,
41 | ) -> Result, Status> {
42 | let engine = internal_manager.clone().engine();
43 | let db_manager = engine.read();
44 | let user = user_context.get_user();
45 | match db_manager.find_by_name_ref(&user.scheme) {
46 | Some(db) => Ok(db.clone()),
47 | None => return Err(Status::not_found("Database not found")),
48 | }
49 | }
50 |
51 | pub fn grpc_query_val_to_sjs_value(val: GrpcQueryVal) -> QueryVal {
52 | QueryVal {
53 | key: val.key,
54 | filter_type: val.filter_type,
55 | value: convert_to_data_value(
56 | val.value
57 | .map(|i| i.value_type.unwrap_or_else(|| ValueType::NullValue(true)))
58 | .unwrap(),
59 | ),
60 | }
61 | }
62 |
63 | pub fn grpc_operation_to_sjs_op(operation: Operation) -> Result {
64 | match operation {
65 | Operation::AndOp(val) => Ok(QueryOps::And(
66 | val.ops
67 | .into_iter()
68 | .map(|e| grpc_operation_to_sjs_op(e.operation.ok_or(())?))
69 | .collect::, ()>>()?,
70 | )),
71 | Operation::OrOp(val) => Ok(QueryOps::Or(
72 | val.ops
73 | .into_iter()
74 | .map(|e| grpc_operation_to_sjs_op(e.operation.ok_or(())?))
75 | .collect::, ()>>()?,
76 | )),
77 | Operation::Condition(val) => Ok(QueryOps::Condition(grpc_query_val_to_sjs_value(val))),
78 | }
79 | }
80 |
81 | pub fn from_grpc_ops_to_sjs_ops(query_ops: GrpcQueryOps) -> Result {
82 | match query_ops.operation {
83 | None => Err(()),
84 | Some(op) => grpc_operation_to_sjs_op(op),
85 | }
86 | }
87 |
--------------------------------------------------------------------------------
/crates/grpc/src/utils/json.rs:
--------------------------------------------------------------------------------
1 | pub fn to_prost_struct(
2 | json: serde_json::Map,
3 | ) -> Result {
4 | let fields: Result<_, ()> = json
5 | .into_iter()
6 | .map(|(k, v)| serde_json_to_prost(v).map(|v| (k, v)))
7 | .collect();
8 |
9 | fields.map(|fields| prost_types::Struct { fields })
10 | }
11 |
12 | pub fn serde_json_to_prost(json: serde_json::Value) -> Result {
13 | use prost_types::value::Kind::*;
14 | use serde_json::Value::*;
15 |
16 | let kind = match json {
17 | Null => Ok(NullValue(0)),
18 | Bool(v) => Ok(BoolValue(v)),
19 | Number(n) => n.as_f64().map(NumberValue).ok_or(()), // Return an error if the number can't be represented as f64
20 | String(s) => Ok(StringValue(s)),
21 | Array(v) => {
22 | let values: Result<_, ()> = v.into_iter().map(serde_json_to_prost).collect();
23 | values.map(|v| ListValue(prost_types::ListValue { values: v }))
24 | }
25 | Object(v) => to_prost_struct(v).map(StructValue),
26 | };
27 |
28 | kind.map(|k| prost_types::Value { kind: Some(k) })
29 | }
30 |
31 | pub fn prost_to_serde_json(x: prost_types::Value) -> Result {
32 | use prost_types::value::Kind::*;
33 | use serde_json::Value::*;
34 |
35 | match x.kind {
36 | Some(x) => match x {
37 | NullValue(_) => Ok(Null),
38 | BoolValue(v) => Ok(Bool(v)),
39 | NumberValue(n) => serde_json::Number::from_f64(n).map(Number).ok_or(()), // Return an error if `from_f64` returns None
40 | StringValue(s) => Ok(String(s)),
41 | ListValue(lst) => {
42 | let values: Result<_, ()> =
43 | lst.values.into_iter().map(prost_to_serde_json).collect();
44 | values.map(Array)
45 | }
46 | StructValue(v) => {
47 | let fields: Result<_, ()> = v
48 | .fields
49 | .into_iter()
50 | .map(|(k, v)| prost_to_serde_json(v).map(|v| (k, v)))
51 | .collect();
52 | fields.map(Object)
53 | }
54 | },
55 | None => Ok(Null),
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/crates/grpc/src/utils/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod common;
2 | pub mod json;
3 |
--------------------------------------------------------------------------------
/crates/helpers/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "schemajs_helpers"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | authors.workspace = true
7 | resolver = "2"
8 |
9 | [dependencies]
10 | deno_core.workspace = true
11 | serde.workspace = true
12 | serde_json.workspace = true
13 | enum-as-inner.workspace = true
14 | uuid.workspace = true
15 | thiserror.workspace = true
16 | tokio.workspace = true
17 | r2d2.workspace = true
18 | dashmap.workspace = true
19 | schemajs_primitives = { version = "0.1.0", path = "../primitives" }
--------------------------------------------------------------------------------
/crates/helpers/src/helper.rs:
--------------------------------------------------------------------------------
1 | use dashmap::DashMap;
2 | use enum_as_inner::EnumAsInner;
3 | use serde::{Deserialize, Serialize};
4 | use serde_json::Value;
5 | use std::collections::HashMap;
6 | use std::sync::Arc;
7 | use tokio::sync::mpsc::UnboundedSender;
8 |
9 | #[derive(Serialize, Deserialize, EnumAsInner, Debug, Clone)]
10 | pub enum HelperType {
11 | CustomQuery,
12 | InsertHook,
13 | }
14 |
15 | #[derive(Debug)]
16 | pub struct Helper {
17 | pub identifier: String,
18 | pub internal_type: HelperType,
19 | pub func: deno_core::v8::Global,
20 | }
21 |
22 | #[derive(Debug, Default)]
23 | pub struct SjsHelpersContainer(pub Vec>);
24 |
25 | impl SjsHelpersContainer {
26 | pub fn new(data: Vec>) -> Self {
27 | Self(data)
28 | }
29 | }
30 |
31 | /// DashMap>
32 | pub struct SjsTableHelpers(pub DashMap>);
33 |
34 | impl SjsTableHelpers {
35 | pub fn find_custom_query_helper(
36 | &self,
37 | db_name: &str,
38 | table: &str,
39 | identifier: &str,
40 | ) -> Option> {
41 | match self.0.get(db_name) {
42 | None => None,
43 | Some(val) => {
44 | let helper = val.get(table).map(|e| {
45 | e.0.iter()
46 | .find(|e| e.identifier == identifier)
47 | .map(|e| e.clone())
48 | });
49 |
50 | helper.unwrap_or_else(|| None)
51 | }
52 | }
53 | }
54 |
55 | pub fn find_hook_helper(
56 | &self,
57 | db_name: &str,
58 | table: &str,
59 | hook: HelperType,
60 | ) -> Option>> {
61 | match self.0.get(db_name) {
62 | None => None,
63 | Some(val) => match hook {
64 | HelperType::InsertHook => {
65 | let helper: Option>> = val.get(table).map(|e| {
66 | e.0.iter()
67 | .filter(|e| e.internal_type.is_insert_hook())
68 | .map(|e| e.clone())
69 | .collect()
70 | });
71 |
72 | helper
73 | }
74 | _ => None,
75 | },
76 | }
77 | }
78 | }
79 |
80 | #[derive(Debug, Clone)]
81 | pub struct HelperDbContext {
82 | pub db: Option,
83 | pub table: Option,
84 | }
85 |
86 | #[derive(EnumAsInner, Debug, Clone)]
87 | pub enum HelperCall {
88 | CustomQuery {
89 | db_ctx: HelperDbContext,
90 | identifier: String,
91 | req: Value,
92 | response: UnboundedSender,
93 | },
94 | InsertHook {
95 | db_ctx: HelperDbContext,
96 | rows: Vec,
97 | },
98 | }
99 |
--------------------------------------------------------------------------------
/crates/helpers/src/js/helper.ts:
--------------------------------------------------------------------------------
1 | export enum HelperType {
2 | CustomQuery = "CustomQuery",
3 | InsertHook = "InsertHook"
4 | }
5 |
6 | export type HelperCbType = (...args: any[]) => any;
7 |
8 | export class Helper {
9 |
10 | public identifier: string = "";
11 | public internalType: HelperType;
12 | public cb: HelperCbType;
13 |
14 | constructor(identifier: string, internalType: HelperType, cb: HelperCbType) {
15 | if(identifier) {
16 | this.identifier = identifier;
17 | }
18 | this.internalType = internalType;
19 | this.cb = cb;
20 | }
21 |
22 | }
--------------------------------------------------------------------------------
/crates/helpers/src/lib.rs:
--------------------------------------------------------------------------------
1 | use crate::helper::{HelperCall, SjsHelpersContainer};
2 | use tokio::sync::mpsc;
3 | use tokio::sync::mpsc::{Receiver, Sender};
4 |
5 | pub mod helper;
6 |
7 | deno_core::extension!(
8 | sjs_helpers,
9 | esm = ["src/js/helper.ts",],
10 | state = |state| {
11 | state.put(SjsHelpersContainer(vec![]));
12 | }
13 | );
14 |
15 | pub fn create_helper_channel(
16 | max_helper_processing_capacity: usize,
17 | ) -> (Sender, Receiver) {
18 | mpsc::channel::(max_helper_processing_capacity)
19 | }
20 |
--------------------------------------------------------------------------------
/crates/index/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "schemajs_index"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | authors.workspace = true
7 | resolver = "2"
8 |
9 | [dependencies]
10 | serde.workspace = true
11 | anyhow.workspace = true
12 | enum-as-inner.workspace = true
13 | schemajs_data = { version = "0.1.0", path = "../data" }
14 | sha2.workspace = true
15 | tokio.workspace = true
16 | tempfile.workspace = true
17 | uuid.workspace = true
18 | rand.workspace = true
19 | parking_lot.workspace = true
--------------------------------------------------------------------------------
/crates/index/src/composite_key.rs:
--------------------------------------------------------------------------------
1 | #[derive(Debug, Eq, PartialEq, Clone)]
2 | pub struct CompositeKey(pub Vec<(String, String)>);
3 |
--------------------------------------------------------------------------------
/crates/index/src/data/index_data_unit.rs:
--------------------------------------------------------------------------------
1 | use crate::errors::IndexError;
2 | use schemajs_data::data_handler::DataHandler;
3 | use schemajs_data::U64_SIZE;
4 |
5 | pub struct IndexDataUnit {
6 | pub item_size: u64,
7 | pub data: Vec,
8 | }
9 |
10 | impl IndexDataUnit {
11 | pub fn new(data: Vec) -> Self {
12 | Self {
13 | item_size: data.len() as u64,
14 | data,
15 | }
16 | }
17 |
18 | pub fn from_data_handler(offset: u64, data_handler: &DataHandler) -> Option {
19 | let mut index = IndexDataUnit::new(vec![]);
20 | let item_size = data_handler.read_pointer(offset, U64_SIZE);
21 |
22 | if let Some(get_item_size) = item_size {
23 | let item_size_bytes: [u8; 8] = get_item_size.try_into().unwrap();
24 | index.item_size = u64::from_le_bytes(item_size_bytes);
25 |
26 | let read_data = {
27 | data_handler
28 | .read_pointer(offset + U64_SIZE as u64, index.item_size as usize)
29 | .unwrap()
30 | };
31 |
32 | index.data = read_data;
33 |
34 | return Some(index);
35 | }
36 |
37 | None
38 | }
39 |
40 | pub fn header_size() -> usize {
41 | U64_SIZE // item_size
42 | }
43 | }
44 |
45 | impl TryFrom<&[u8]> for IndexDataUnit {
46 | type Error = IndexError;
47 |
48 | fn try_from(data: &[u8]) -> Result {
49 | let mut unit = IndexDataUnit::new(vec![]);
50 |
51 | let item_size = data.get(0..U64_SIZE);
52 | return match item_size {
53 | None => Err(Self::Error::UnrecognizedItemSize),
54 | Some(size) => {
55 | let item_size_bytes: [u8; 8] = size.try_into().unwrap();
56 | unit.item_size = u64::from_le_bytes(item_size_bytes);
57 |
58 | if let Some(data) = data.get(U64_SIZE..(U64_SIZE + unit.item_size as usize)) {
59 | unit.data = data.to_vec();
60 | Ok(unit)
61 | } else {
62 | Err(Self::Error::InvalidItem)
63 | }
64 | }
65 | };
66 | }
67 | }
68 |
69 | impl Into> for IndexDataUnit {
70 | fn into(self) -> Vec {
71 | let mut entry = vec![];
72 |
73 | entry.extend(self.item_size.to_le_bytes());
74 | entry.extend(self.data.as_slice());
75 |
76 | entry
77 | }
78 | }
79 |
--------------------------------------------------------------------------------
/crates/index/src/data/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod index_data_unit;
2 | pub mod index_shard;
3 |
--------------------------------------------------------------------------------
/crates/index/src/errors.rs:
--------------------------------------------------------------------------------
1 | use enum_as_inner::EnumAsInner;
2 |
3 | #[derive(Debug, Clone, EnumAsInner)]
4 | pub enum IndexError {
5 | UnrecognizedItemSize,
6 | InvalidItem,
7 | }
8 |
--------------------------------------------------------------------------------
/crates/index/src/implementations/hash/hash_index_header.rs:
--------------------------------------------------------------------------------
1 | pub const HASH_INDEX_KEY_SIZE: usize = 64;
2 | pub const HASH_INDEX_VALUE_SIZE: usize = 8;
3 | pub const HASH_INDEX_TOTAL_ENTRY_SIZE: usize = HASH_INDEX_KEY_SIZE + HASH_INDEX_VALUE_SIZE;
4 |
--------------------------------------------------------------------------------
/crates/index/src/implementations/hash/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod hash_index;
2 | mod hash_index_header;
3 |
--------------------------------------------------------------------------------
/crates/index/src/implementations/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod hash;
2 |
--------------------------------------------------------------------------------
/crates/index/src/index_keys.rs:
--------------------------------------------------------------------------------
1 | use crate::keys::index_key_sha256::IndexKeySha256;
2 | use crate::keys::string_index::StringIndexKey;
3 | use enum_as_inner::EnumAsInner;
4 |
5 | #[derive(Debug, Clone, EnumAsInner)]
6 | pub enum IndexKeyType {
7 | Sha256(IndexKeySha256),
8 | String(StringIndexKey),
9 | }
10 |
--------------------------------------------------------------------------------
/crates/index/src/index_type.rs:
--------------------------------------------------------------------------------
1 | use crate::implementations::hash::hash_index::HashIndex;
2 | use crate::types::{Index, IndexKey};
3 | use enum_as_inner::EnumAsInner;
4 | use serde::{Deserialize, Serialize};
5 |
6 | #[derive(Debug, EnumAsInner, Clone, PartialEq, Serialize, Deserialize)]
7 | pub enum IndexType {
8 | Hash,
9 | }
10 |
11 | #[derive(Debug)]
12 | pub enum IndexTypeValue {
13 | Hash(HashIndex),
14 | }
15 |
16 | impl IndexTypeValue {
17 | pub fn as_index(&self) -> Box<&dyn Index> {
18 | match self {
19 | IndexTypeValue::Hash(indx) => Box::new(indx),
20 | }
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/crates/index/src/keys/index_key_sha256.rs:
--------------------------------------------------------------------------------
1 | use crate::composite_key::CompositeKey;
2 | use crate::data::index_data_unit::IndexDataUnit;
3 | use crate::types::IndexKey;
4 | use schemajs_data::utils::hash::{sha256_to_string, to_sha256};
5 | use sha2::{Digest, Sha256};
6 | use std::cmp::Ordering;
7 |
8 | #[derive(Debug)]
9 | pub struct IndexKeySha256 {
10 | hash: String,
11 | }
12 |
13 | impl IndexKeySha256 {
14 | pub fn to_string(&self) -> String {
15 | self.hash.clone()
16 | }
17 | }
18 |
19 | impl From for IndexKeySha256 {
20 | fn from(value: IndexDataUnit) -> Self {
21 | IndexKeySha256 {
22 | hash: String::from_utf8(value.data).unwrap(),
23 | }
24 | }
25 | }
26 |
27 | impl From for IndexKeySha256 {
28 | fn from(value: CompositeKey) -> Self {
29 | let mut vec = Vec::new();
30 |
31 | for (key, val) in value.0 {
32 | vec.extend(key.into_bytes());
33 | vec.extend(val.into_bytes());
34 | }
35 |
36 | let hash = to_sha256(vec);
37 |
38 | IndexKeySha256 {
39 | hash: sha256_to_string(hash.to_vec()),
40 | }
41 | }
42 | }
43 |
44 | impl From> for IndexKeySha256 {
45 | fn from(value: Vec) -> Self {
46 | IndexKeySha256 {
47 | hash: sha256_to_string(value),
48 | }
49 | }
50 | }
51 |
52 | impl Into> for IndexKeySha256 {
53 | fn into(self) -> Vec {
54 | self.hash.into_bytes()
55 | }
56 | }
57 |
58 | impl Ord for IndexKeySha256 {
59 | fn cmp(&self, other: &Self) -> Ordering {
60 | self.hash.cmp(&other.hash)
61 | }
62 | }
63 |
64 | impl Eq for IndexKeySha256 {}
65 |
66 | impl PartialEq for IndexKeySha256 {
67 | fn eq(&self, other: &Self) -> bool {
68 | self.hash.eq(&other.hash)
69 | }
70 | }
71 |
72 | impl PartialOrd for IndexKeySha256 {
73 | fn partial_cmp(&self, other: &Self) -> Option {
74 | self.hash.partial_cmp(&other.hash)
75 | }
76 | }
77 |
78 | impl Clone for IndexKeySha256 {
79 | fn clone(&self) -> Self {
80 | IndexKeySha256 {
81 | hash: self.hash.clone(),
82 | }
83 | }
84 | }
85 |
86 | impl Into for IndexKeySha256 {
87 | fn into(self) -> String {
88 | self.hash
89 | }
90 | }
91 |
92 | impl IndexKey for IndexKeySha256 {}
93 |
--------------------------------------------------------------------------------
/crates/index/src/keys/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod index_key_sha256;
2 | pub mod string_index;
3 |
--------------------------------------------------------------------------------
/crates/index/src/keys/string_index.rs:
--------------------------------------------------------------------------------
1 | use crate::data::index_data_unit::IndexDataUnit;
2 | use crate::types::IndexKey;
3 | use std::cmp::Ordering;
4 |
5 | #[derive(Debug)]
6 | pub struct StringIndexKey(pub String);
7 |
8 | impl From> for StringIndexKey {
9 | fn from(value: Vec) -> Self {
10 | StringIndexKey(String::from_utf8(value).unwrap())
11 | }
12 | }
13 |
14 | impl Into> for StringIndexKey {
15 | fn into(self) -> Vec {
16 | self.0.into_bytes()
17 | }
18 | }
19 |
20 | impl Ord for StringIndexKey {
21 | fn cmp(&self, other: &Self) -> Ordering {
22 | self.0.cmp(&other.0)
23 | }
24 | }
25 |
26 | impl Eq for StringIndexKey {}
27 |
28 | impl PartialEq for StringIndexKey {
29 | fn eq(&self, other: &Self) -> bool {
30 | self.0.eq(&other.0)
31 | }
32 | }
33 |
34 | impl PartialOrd for StringIndexKey {
35 | fn partial_cmp(&self, other: &Self) -> Option {
36 | self.0.partial_cmp(&other.0)
37 | }
38 | }
39 |
40 | impl Clone for StringIndexKey {
41 | fn clone(&self) -> Self {
42 | StringIndexKey(self.0.clone())
43 | }
44 | }
45 |
46 | impl Into for StringIndexKey {
47 | fn into(self) -> String {
48 | self.0
49 | }
50 | }
51 |
52 | impl From for StringIndexKey {
53 | fn from(value: IndexDataUnit) -> Self {
54 | StringIndexKey(String::from_utf8(value.data).unwrap())
55 | }
56 | }
57 |
58 | impl IndexKey for StringIndexKey {}
59 |
--------------------------------------------------------------------------------
/crates/index/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub mod composite_key;
2 | pub mod data;
3 | pub mod errors;
4 | pub mod implementations;
5 | pub mod index_keys;
6 | pub mod index_type;
7 | pub mod keys;
8 | pub mod types;
9 | pub mod utils;
10 | pub mod vals;
11 |
--------------------------------------------------------------------------------
/crates/index/src/types.rs:
--------------------------------------------------------------------------------
1 | use crate::composite_key::CompositeKey;
2 | use crate::data::index_data_unit::IndexDataUnit;
3 | use crate::index_keys::IndexKeyType;
4 | use std::fmt::Debug;
5 |
6 | pub trait IndexKey:
7 | From> + Into> + Ord + Clone + Into + From
8 | {
9 | }
10 |
11 | pub trait IndexValue: From> + Into> + Clone + From {}
12 |
13 | pub trait Index: Debug {
14 | fn to_key(&self, key: CompositeKey) -> IndexKeyType;
15 |
16 | fn bulk_insert(&self, data: Vec<(IndexKeyType, u64)>);
17 |
18 | fn insert(&self, key: IndexKeyType, row_position: u64);
19 |
20 | fn get(&self, key: &IndexKeyType) -> Option;
21 |
22 | fn remove(&mut self, key: &IndexKeyType) -> Option;
23 |
24 | fn supported_search_operators(&self) -> Vec;
25 | }
26 |
--------------------------------------------------------------------------------
/crates/index/src/utils/mod.rs:
--------------------------------------------------------------------------------
1 | use crate::data::index_data_unit::IndexDataUnit;
2 |
3 | pub fn get_entry_size(key_size: usize, value_size: usize) -> usize {
4 | let entry_data_size = {
5 | // The data of an entry is made of 2 IndexDataUnit (Key, Value)
6 | // + The value of the key and the size.
7 | let key_size = IndexDataUnit::header_size() + key_size;
8 | let value_size = IndexDataUnit::header_size() + value_size;
9 | key_size + value_size
10 | };
11 | IndexDataUnit::header_size() + entry_data_size
12 | }
13 |
--------------------------------------------------------------------------------
/crates/index/src/vals/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod raw_value;
2 |
--------------------------------------------------------------------------------
/crates/index/src/vals/raw_value.rs:
--------------------------------------------------------------------------------
1 | use crate::data::index_data_unit::IndexDataUnit;
2 | use crate::types::IndexValue;
3 |
4 | #[derive(Debug)]
5 | pub struct RawIndexValue(pub Vec);
6 |
7 | impl From> for RawIndexValue {
8 | fn from(value: Vec) -> Self {
9 | RawIndexValue(value)
10 | }
11 | }
12 |
13 | impl Into> for RawIndexValue {
14 | fn into(self) -> Vec {
15 | self.0
16 | }
17 | }
18 |
19 | impl Clone for RawIndexValue {
20 | fn clone(&self) -> Self {
21 | RawIndexValue(self.0.clone())
22 | }
23 | }
24 |
25 | impl From for RawIndexValue {
26 | fn from(value: IndexDataUnit) -> Self {
27 | RawIndexValue(value.data)
28 | }
29 | }
30 |
31 | impl IndexValue for RawIndexValue {}
32 |
--------------------------------------------------------------------------------
/crates/internal/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "schemajs_internal"
3 | version.workspace = true
4 | edition.workspace = true
5 | license.workspace = true
6 | authors.workspace = true
7 | resolver = "2"
8 |
9 | [dependencies]
10 | schemajs_primitives = { version = "0.1.0", path = "../primitives" }
11 | serde.workspace = true
12 | schemajs_engine = { path = "../engine" }
13 | schemajs_query = { path = "../query" }
14 | schemajs_config = { path = "../config" }
15 | serde_json.workspace = true
16 | bcrypt.workspace = true
17 | uuid.workspace = true
18 | dashmap.workspace = true
19 | parking_lot.workspace = true
--------------------------------------------------------------------------------
/crates/internal/src/auth/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod auth_manager;
2 | pub mod types;
3 |
--------------------------------------------------------------------------------
/crates/internal/src/auth/types.rs:
--------------------------------------------------------------------------------
1 | use crate::users::user::User;
2 | use serde::{Deserialize, Serialize};
3 | use std::time::SystemTime;
4 |
5 | #[derive(Serialize, Deserialize)]
6 | pub struct VerifyUserArgs {
7 | pub scheme_name: String,
8 | pub identifier: String,
9 | pub password: String,
10 | }
11 |
12 | pub struct UserContext {
13 | user: User,
14 | authenticated_at: SystemTime,
15 | last_query_at: Option,
16 | }
17 |
18 | impl UserContext {
19 | pub fn new(user: User) -> Self {
20 | Self {
21 | user,
22 | authenticated_at: SystemTime::now(),
23 | last_query_at: None,
24 | }
25 | }
26 |
27 | pub fn get_user(&self) -> &User {
28 | &self.user
29 | }
30 |
31 | pub fn get_authenticated_at(&self) -> &SystemTime {
32 | &self.authenticated_at
33 | }
34 |
35 | pub fn get_last_query_at(&self) -> &Option {
36 | &self.last_query_at
37 | }
38 |
39 | pub fn log_query(&mut self) -> SystemTime {
40 | let time = SystemTime::now();
41 | self.last_query_at = Some(time.clone());
42 |
43 | time
44 | }
45 | }
46 |
--------------------------------------------------------------------------------
/crates/internal/src/lib.rs:
--------------------------------------------------------------------------------
1 | use schemajs_primitives::table::Table;
2 |
3 | pub mod auth;
4 | pub mod manager;
5 | pub mod users;
6 |
7 | pub fn get_internal_tables() -> Vec {
8 | vec![(&*users::user::INTERNAL_USER_TABLE).clone()]
9 | }
10 |
--------------------------------------------------------------------------------
/crates/internal/src/manager.rs:
--------------------------------------------------------------------------------
1 | use crate::auth::auth_manager::AuthManager;
2 | use crate::get_internal_tables;
3 | use parking_lot::RwLock;
4 | use schemajs_config::SchemeJsConfig;
5 | use schemajs_engine::engine::SchemeJsEngine;
6 | use std::sync::Arc;
7 |
8 | pub struct InternalManager {
9 | _engine: Arc>,
10 | auth_manager: Arc,
11 | }
12 |
13 | impl InternalManager {
14 | pub fn new(engine: Arc