├── .github ├── FUNDING.yml ├── dependabot.yml └── workflows │ ├── dependabot.yaml │ └── release-on-push.yaml ├── LICENSE ├── README.md ├── example-go-app ├── fun.go └── fun_test.go ├── go.mod ├── go.sum ├── internal └── unquote.go ├── key.go ├── key_test.go ├── kv ├── bench_test.go ├── cmd │ └── kv │ │ ├── delete-history.go │ │ ├── diff.go │ │ ├── main.go │ │ ├── merge.go │ │ ├── remove-tombstones.go │ │ ├── set.go │ │ ├── show.go │ │ ├── tombstone.go │ │ └── trace-history.go ├── crdt │ └── value.go ├── crypto.go ├── crypto_test.go ├── encode_gob.go ├── example_test.go ├── internal │ └── crdt │ │ ├── crdt.go │ │ └── crdt_test.go ├── kv.go └── kv_test.go ├── open.go ├── proto ├── buf.gen.yaml ├── buf.yaml ├── generate.sh └── v1 │ ├── node.pb.go │ └── node.proto ├── release.sh ├── row.go ├── sql ├── colval │ ├── colval.go │ └── colval_test.go ├── expr.go ├── parse.go ├── parse │ └── parse.go └── types │ └── types.go ├── sqlite ├── Makefile ├── s3db_changes.go ├── s3db_changes_test.go ├── s3db_conn.go ├── s3db_refresh.go ├── s3db_version.go ├── sharedlib │ └── ext.go ├── sqlite-autoload-extension │ └── auto.go ├── staticlib │ └── ext.go ├── vacuum.go ├── vtable.go └── vtable_test.go ├── test └── vtable.go ├── vtable_common.go ├── vtable_common_test.go └── writetime └── context.go /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | 2 | github: [jrhy] 3 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "gomod" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "weekly" 12 | - package-ecosystem: "github-actions" 13 | directory: "/" 14 | schedule: 15 | interval: "weekly" 16 | -------------------------------------------------------------------------------- /.github/workflows/dependabot.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | pull_request: 3 | branches: [ main ] 4 | 5 | permissions: 6 | contents: write 7 | pull-requests: write 8 | 9 | jobs: 10 | release: 11 | runs-on: ubuntu-24.04 12 | name: Auto-approve dependabot PRs for minor or test-only dependency upgrades 13 | steps: 14 | - name: Install sqlite headers and cross-compilation packages 15 | run: sudo apt-get install -y libsqlite3-dev qemu-user libc6-armhf-cross libc6-arm64-cross libc6-amd64-cross 16 | - uses: actions/checkout@v4 17 | - uses: actions/setup-go@v5 18 | with: 19 | go-version: ^1.22.0 20 | - uses: goto-bus-stop/setup-zig@v2 21 | - run: ./release.sh 22 | - uses: actions/upload-artifact@v4 23 | with: 24 | name: release 25 | path: release/* 26 | - name: Dependabot metadata 27 | if: ${{ github.event.pull_request.user.login == 'dependabot[bot]' }} 28 | id: dependabot-metadata 29 | uses: dependabot/fetch-metadata@v2.4.0 30 | - name: Enable auto-merge for Dependabot PRs 31 | if: ${{ github.event.pull_request.user.login == 'dependabot[bot]' }} 32 | run: gh pr merge --auto --rebase "$PR_URL" 33 | env: 34 | PR_URL: ${{github.event.pull_request.html_url}} 35 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 36 | - name: Approve patch and minor updates for Dependabot PRs 37 | if: ${{ (github.event.pull_request.user.login == 'dependabot[bot]') && 38 | ((steps.dependabot-metadata.outputs.update-type == 'version-update:semver-patch') || (steps.dependabot-metadata.outputs.update-type == 'version-update:semver-minor')) }} 39 | run: gh pr review $PR_URL --approve -b "I'm **approving** this pull request because **it includes a patch or minor update**" 40 | env: 41 | PR_URL: ${{github.event.pull_request.html_url}} 42 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 43 | - name: Approve major updates of development dependencies for Dependabot PRs 44 | if: ${{ (github.event.pull_request.user.login == 'dependabot[bot]') && 45 | (steps.dependabot-metadata.outputs.update-type == 'version-update:semver-major') && (steps.dependabot-metadata.outputs.dependency-type == 'direct:development') }} 46 | run: gh pr review $PR_URL --approve -b "I'm **approving** this pull request because **it includes a major update of a dependency used only in development**" 47 | env: 48 | PR_URL: ${{github.event.pull_request.html_url}} 49 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 50 | - name: Comment on major updates of non-development dependencies for Dependabot PRs 51 | if: ${{ (github.event.pull_request.user.login == 'dependabot[bot]') && 52 | (steps.dependabot-metadata.outputs.update-type == 'version-update:semver-major') && (steps.dependabot-metadata.outputs.dependency-type == 'direct:production') }} 53 | run: | 54 | gh pr comment $PR_URL --body "I'm **not approving** this PR because **it includes a major update of a dependency**" 55 | gh pr edit $PR_URL --add-label "requires-manual-qa" 56 | env: 57 | PR_URL: ${{github.event.pull_request.html_url}} 58 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 59 | 60 | -------------------------------------------------------------------------------- /.github/workflows/release-on-push.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [ main ] 4 | 5 | jobs: 6 | release: 7 | runs-on: ubuntu-24.04 8 | name: Release 9 | steps: 10 | - name: Install sqlite headers and cross-compilation packages 11 | run: sudo apt-get install -y libsqlite3-dev qemu-user libc6-armhf-cross libc6-arm64-cross libc6-amd64-cross 12 | - uses: actions/checkout@v4 13 | - uses: actions/setup-go@v5 14 | with: 15 | go-version: ^1.22.0 16 | - uses: goto-bus-stop/setup-zig@v2 17 | - run: ./release.sh 18 | - uses: actions/upload-artifact@v4 19 | with: 20 | name: release 21 | path: release/* 22 | - name: Tag new releases 23 | id: tag 24 | uses: anothrNick/github-tag-action@1.73.0 # Don't use @master unless you're happy to test the latest version 25 | env: 26 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 27 | DEFAULT_BUMP: patch 28 | WITH_V: true 29 | - name: add version number to release files 30 | run: | 31 | mv release/s3db-linux-amd64-glibc.sqlite-ext.so.gz release/s3db-${{ steps.tag.outputs.tag }}-linux-amd64-glibc.sqlite-ext.so.gz 32 | mv release/s3db-linux-arm-glibc.sqlite-ext.so.gz release/s3db-${{ steps.tag.outputs.tag }}-linux-arm-glibc.sqlite-ext.so.gz 33 | mv release/s3db-linux-arm64-glibc.sqlite-ext.so.gz release/s3db-${{ steps.tag.outputs.tag }}-linux-arm64-glibc.sqlite-ext.so.gz 34 | - name: Create Release 35 | uses: "marvinpinto/action-automatic-releases@v1.2.1" 36 | with: 37 | repo_token: "${{ secrets.GITHUB_TOKEN }}" 38 | automatic_release_tag: ${{ steps.tag.outputs.tag }} 39 | prerelease: false 40 | files: | 41 | release/s3db-${{ steps.tag.outputs.tag }}-linux-amd64-glibc.sqlite-ext.so.gz 42 | release/s3db-${{ steps.tag.outputs.tag }}-linux-arm-glibc.sqlite-ext.so.gz 43 | release/s3db-${{ steps.tag.outputs.tag }}-linux-arm64-glibc.sqlite-ext.so.gz 44 | 45 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 jrhy 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | s3db is a SQLite extension that stores tables in an S3-compatible object store. 2 | 3 | What's New? 4 | =========== 5 | 6 | v0.1.35 adds the `s3db_conn` table for per-connection `deadline` and `write_time` attributes. 7 | 8 | v0.1.33 adds s3db_changes() and s3db_version() 9 | 10 | Getting Started 11 | =============== 12 | 13 | Check that your sqlite can load extensions. 14 | ``` 15 | $ sqlite3 16 | SQLite version 3.49.1 2025-02-18 13:38:58 17 | Enter ".help" for usage hints. 18 | Connected to a transient in-memory database. 19 | Use ".open FILENAME" to reopen on a persistent database. 20 | sqlite> .dbconfig load_extension 21 | load_extension on 22 | ``` 23 | If you see `load_extension off`, build yourself 24 | [sqlite with default configuration](https://github.com/sqlite/sqlite). 25 | 26 | ``` 27 | sqlite> .open mydb.sqlite 28 | sqlite> .load ./s3db 29 | sqlite> create virtual table mytable using s3db ( 30 | ``` 31 | Specify columns with constraints like you would a regular CREATE 32 | TABLE. Note that, as yet, s3db only uses the column names and 33 | PRIMARY KEY; type affinity could be added later. 34 | ``` 35 | columns='id PRIMARY KEY, name, email', 36 | ... 37 | ``` 38 | Specify the S3 parameters. Omit the `s3_endpoint` if using AWS, or 39 | the `s3_prefix` if you don't plan to distinguish multiple tables 40 | in the same bucket. Omit everything if you just want to fiddle with 41 | a temporary bucket. 42 | ``` 43 | ... 44 | s3_bucket='mybucket', 45 | s3_endpoint='https://my-s3-server-if-not-using-aws', 46 | s3_prefix='mydb', 47 | ... 48 | ``` 49 | We are willing to cache 1000 nodes in memory. 50 | ``` 51 | ... 52 | node_cache_entries=1000); 53 | ``` 54 | Once created, tables remain part of the database and don't need to be recreated. 55 | Of course, they can be mixed with regular tables. 56 | 57 | Add some data: 58 | ``` 59 | sqlite> insert into mytable values (1,'jeff','old_address@example.org'); 60 | ``` 61 | 62 | ``` 63 | $ sqlite3 mydb.sqlite -cmd '.load ./s3db' 64 | SQLite version 3.49.1 2025-02-18 13:38:58 65 | Enter ".help" for usage hints. 66 | sqlite> select * from mytable; 67 | ┌────┬──────┬─────────────────────────┐ 68 | │ id │ name │ email │ 69 | ├────┼──────┼─────────────────────────┤ 70 | │ 1 │ jeff │ old_address@example.org │ 71 | └────┴──────┴─────────────────────────┘ 72 | ``` 73 | 74 | Tracking Changes 75 | ================ 76 | 77 | Continuing the example from above, use `s3db_changes` to see what new rows were 78 | added between two versions: 79 | 80 | ``` 81 | sqlite> select s3db_version('mytable'); 82 | ["1R2q4B_YS3HgUbGS9ycQWgP"] 83 | sqlite> insert into mytable values (2,'joe','joe@example.org'); 84 | sqlite> update mytable set email='new_address@example.org' where id=1; 85 | sqlite> select s3db_version('mytable'); 86 | ["1R2q4B_N9x6HReYysstSo9I"] 87 | sqlite> create virtual table additions using s3db_changes (table='mytable', from='["1R2q4B_YS3HgUbGS9ycQWgP"]', to='["1R2q4B_N9x6HReYysstSo9I"]'); 88 | sqlite> select * from additions; 89 | ┌────┬──────┬─────────────────────────┐ 90 | │ id │ name │ email │ 91 | ├────┼──────┼─────────────────────────┤ 92 | │ 1 │ jeff │ new_address@example.org │ 93 | │ 2 │ joe │ joe@example.org │ 94 | └────┴──────┴─────────────────────────┘ 95 | sqlite> drop table additions; 96 | ``` 97 | Flip the `from` and `to` to see what rows were removed between the two versions: 98 | ``` 99 | sqlite> create virtual table removals using s3db_changes (table='mytable', from='["1R2q4B_N9x6HReYysstSo9I"]', to='["1R2q4B_YS3HgUbGS9ycQWgP"]'); 100 | sqlite> select * from removals; 101 | ┌────┬──────┬─────────────────────────┐ 102 | │ id │ name │ email │ 103 | ├────┼──────┼─────────────────────────┤ 104 | │ 1 │ jeff │ old_address@example.org │ 105 | └────┴──────┴─────────────────────────┘ 106 | sqlite> drop table removals; 107 | ``` 108 | 109 | Performance 110 | =========== 111 | Use transactions (BEGIN TRANSACTION, INSERT..., COMMIT) to include 112 | multiple rows per table version. 113 | 114 | Multiple Writers 115 | ================ 116 | Multiple writers can commit modifications from the same version. 117 | The next reader will automatically merge both new versions. If there 118 | are any conflicting rows, the winner is chosen by "last write wins", 119 | on a per-column basis. Additionally, for any row, a DELETE supersedes 120 | all UPDATEs with a later row modification time, until another INSERT 121 | for the same key. In order to facilitate this, deletions consume 122 | space until vacuumed. 123 | 124 | Each writer can set its `write_time` corresponding to the ideal 125 | time that the column values are to be affected, which can be useful 126 | for idempotence. For example, `write_time` could rewind to the 127 | original request time to avoid undoing later updates in retries. 128 | 129 | Building from Source 130 | ==================== 131 | Requires Go >=1.22. 132 | ``` 133 | go vet ./... 134 | go generate ./sqlite/sharedlib 135 | go test ./... 136 | ``` 137 | produces the extension as `sqlite/sharedlib/s3db.so` (or 138 | `sqlite/sharedlib/s3db.dylib` on MacOS). 139 | 140 | Caveats 141 | ======= 142 | * Each transaction may make a new version. Use the `s3db_vacuum()` 143 | function to free up space from versions older than a certain time, e.g.: 144 | ```select * from s3db_vacuum('mytable', datetime('now','-7 days'))``` 145 | * Using `s3db` inside Go programs is best done using `github.com/jrhy/s3db/sqlite/mod`. 146 | See [sqlite/example-go-app/](sqlite/example-go-app/) for an example. 147 | 148 | Function Reference 149 | ================== 150 | `s3db_refresh(`*tablename*`)` reopens a table to show updates from 151 | other writers. 152 | 153 | `s3db_version(`*tablename*`)` returns a list of the versions merged to 154 | form the current table. 155 | 156 | Table-Valued Function Reference 157 | =============================== 158 | `s3db_vacuum(`*tablename*`,`*before-timestamp*`)` removes versions older 159 | than *before-timestamp*, e.g. 160 | `select * from s3db_vacuum('mytable', datetime('now','-7 days'))`. 161 | 162 | Virtual Table Reference 163 | ======================= 164 | CREATE VIRTUAL TABLE *tablename* USING s3db() arguments: 165 | * `columns=' [primary key], ...',` columns and constraints 166 | * (optional) `entries_per_node=,` the number of rows to store in per S3 object (defaults to 4096) 167 | * (optional) `node_cache_entries=,` number of nodes to cache in memory (defaults to 0) 168 | * (optional) `readonly,` don't write to S3 169 | * (optional) `s3_bucket='mybucket',` defaults to in-memory bucket 170 | * (optional) `s3_endpoint='https://minio.example.com',` S3 endpoint, if not using AWS 171 | * (optional) `s3_prefix='/prefix',` separate tables within a bucket 172 | 173 | CREATE VIRTUAL TABLE *tablename* USING s3db_changes() arguments: 174 | * `table=`*tablename*, the s3db table to show changes of. Must be loaded already. 175 | * `from=`*from-version*, the version to show changes since. Should be a previous result from `s3db_version()`. 176 | * (optional) `to=`*to-version*, the version to show changes until. Defaults to the current version. 177 | 178 | `s3db_conn` sets per-connection attributes with the following columns: 179 | * `deadline` - the timestamp after which network operations will be cancelled (defaults to forever), e.g. 180 | `update s3db_conn set deadline=datetime('now','+3 seconds')` 181 | * `write_time` - value modification timestamp, for resolving updates to the same column in a row with 182 | "last-write wins" strategy idempotently 183 | 184 | -------------------------------------------------------------------------------- /example-go-app/fun.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | "fmt" 7 | 8 | // register s3db extension with riyazali.net/sqlite 9 | _ "github.com/jrhy/s3db/sqlite" 10 | // autoload riyazali.net/sqlite-registered extensions in sqlite 11 | _ "github.com/jrhy/s3db/sqlite/sqlite-autoload-extension" 12 | // mattn's awesome sql driver for sqlite 13 | _ "github.com/mattn/go-sqlite3" 14 | ) 15 | 16 | func main() { 17 | if err := run(); err != nil { 18 | panic(err) 19 | } 20 | } 21 | 22 | func run() error { 23 | var db *sql.DB 24 | var err error 25 | if db, err = sql.Open("sqlite3", ""); err != nil { 26 | return err 27 | } 28 | defer db.Close() 29 | if err = db.Ping(); err != nil { 30 | return err 31 | } 32 | _, err = db.Exec("create virtual table f using s3db (columns='a primary key, b')") 33 | if err != nil { 34 | return err 35 | } 36 | _, err = db.Exec("insert into f values(1,1)") 37 | if err != nil { 38 | return err 39 | } 40 | r, err := db.Query("select * from f") 41 | if err != nil { 42 | return err 43 | } 44 | if !r.Next() { 45 | return errors.New("something went wrong! where did the first row go?") 46 | } 47 | if cols, err := r.Columns(); err != nil { 48 | return err 49 | } else { 50 | fmt.Printf("columns: %+v\n", cols) 51 | } 52 | var a, b int64 53 | err = r.Scan(&a, &b) 54 | if err != nil { 55 | return err 56 | } 57 | if a != int64(1) || b != int64(1) { 58 | return fmt.Errorf("something went wrong! the first row should be (1,1) but is (%d,%d)!", a, b) 59 | } 60 | fmt.Printf("the first row is: [%d %d]\n", a, b) 61 | return nil 62 | } 63 | -------------------------------------------------------------------------------- /example-go-app/fun_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | func TestLoad(t *testing.T) { 10 | err := run() 11 | require.NoError(t, err) 12 | } 13 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/jrhy/s3db 2 | 3 | go 1.23.0 4 | 5 | toolchain go1.24.1 6 | 7 | require ( 8 | github.com/aws/aws-sdk-go v1.55.7 9 | github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 10 | github.com/johannesboyne/gofakes3 v0.0.0-20250402064820-d479899d8cbe 11 | github.com/johncgriffin/overflow v0.0.0-20211019200055-46fa312c352c 12 | github.com/jrhy/mast v1.2.33 13 | github.com/mattn/go-sqlite3 v1.14.28 14 | github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 15 | github.com/segmentio/ksuid v1.0.4 16 | github.com/stretchr/testify v1.10.0 17 | go.riyazali.net/sqlite v0.0.0-20250204091031-8aa392720bb1 18 | golang.org/x/crypto v0.38.0 19 | google.golang.org/protobuf v1.36.6 20 | ) 21 | 22 | require ( 23 | github.com/davecgh/go-spew v1.1.1 // indirect 24 | github.com/hashicorp/golang-lru v1.0.2 // indirect 25 | github.com/jmespath/go-jmespath v0.4.0 // indirect 26 | github.com/kr/pretty v0.3.1 // indirect 27 | github.com/mattn/go-pointer v0.0.1 // indirect 28 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 29 | github.com/rogpeppe/go-internal v1.10.0 // indirect 30 | github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect 31 | go.shabbyrobe.org/gocovmerge v0.0.0-20230507111327-fa4f82cfbf4d // indirect 32 | golang.org/x/sys v0.33.0 // indirect 33 | golang.org/x/tools v0.32.0 // indirect 34 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect 35 | gopkg.in/yaml.v2 v2.4.0 // indirect 36 | gopkg.in/yaml.v3 v3.0.1 // indirect 37 | ) 38 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/aws/aws-sdk-go v1.44.256/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= 2 | github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= 3 | github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= 4 | github.com/cevatbarisyilmaz/ara v0.0.4 h1:SGH10hXpBJhhTlObuZzTuFn1rrdmjQImITXnZVPSodc= 5 | github.com/cevatbarisyilmaz/ara v0.0.4/go.mod h1:BfFOxnUd6Mj6xmcvRxHN3Sr21Z1T3U2MYkYOmoQe4Ts= 6 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 7 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 8 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 9 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 10 | github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= 11 | github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= 12 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 13 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 14 | github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= 15 | github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= 16 | github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= 17 | github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= 18 | github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= 19 | github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= 20 | github.com/johannesboyne/gofakes3 v0.0.0-20250402064820-d479899d8cbe h1:oc+3AXUeNlN53brf1JS91kMicMkLHPLHu7K9jSKlewU= 21 | github.com/johannesboyne/gofakes3 v0.0.0-20250402064820-d479899d8cbe/go.mod h1:t6osVdP++3g4v2awHz4+HFccij23BbdT1rX3W7IijqQ= 22 | github.com/johncgriffin/overflow v0.0.0-20211019200055-46fa312c352c h1:2n/HCxBM7oa5PNCPKIhV26EtJkaPXFfcVojPAT3ujTU= 23 | github.com/johncgriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:B9OPZOhZ3FIi6bu54lAgCMzXLh11Z7ilr3rOr/ClP+E= 24 | github.com/jrhy/mast v1.2.33 h1:jjEIvbIL3Ik/5XOi2Xjalt1WWF5hHZzKecbGQfYBd2c= 25 | github.com/jrhy/mast v1.2.33/go.mod h1:6v9QI2+vESNUjcsk4jLnl/cH13ktU7If0GwBF+IetVM= 26 | github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 27 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 28 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 29 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 30 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 31 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 32 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 33 | github.com/leanovate/gopter v0.2.11 h1:vRjThO1EKPb/1NsDXuDrzldR28RLkBflWYcU9CvzWu4= 34 | github.com/leanovate/gopter v0.2.11/go.mod h1:aK3tzZP/C+p1m3SPRE4SYZFGP7jjkuSI4f7Xvpt0S9c= 35 | github.com/mattn/go-pointer v0.0.1 h1:n+XhsuGeVO6MEAp7xyEukFINEa+Quek5psIR/ylA6o0= 36 | github.com/mattn/go-pointer v0.0.1/go.mod h1:2zXcozF6qYGgmsG+SeTZz3oAbFLdD3OWqnUbNvJZAlc= 37 | github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= 38 | github.com/mattn/go-sqlite3 v1.14.28 h1:ThEiQrnbtumT+QMknw63Befp/ce/nUPgBPMlRFEum7A= 39 | github.com/mattn/go-sqlite3 v1.14.28/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= 40 | github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= 41 | github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= 42 | github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= 43 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 44 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 45 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= 46 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 47 | github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= 48 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 49 | github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= 50 | github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= 51 | github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8= 52 | github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= 53 | github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= 54 | github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= 55 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 56 | github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= 57 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 58 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 59 | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 60 | go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= 61 | go.riyazali.net/sqlite v0.0.0-20250204091031-8aa392720bb1 h1:F+hyRKGcUzY46Bcl2lWBZr8nqNSfsW/j6PYQoU6TpwE= 62 | go.riyazali.net/sqlite v0.0.0-20250204091031-8aa392720bb1/go.mod h1:UVocl0mLwS0QKUKa5mI6lppmBjvQnUEkFjFfoWqFWQU= 63 | go.shabbyrobe.org/gocovmerge v0.0.0-20230507111327-fa4f82cfbf4d h1:Ns9kd1Rwzw7t0BR8XMphenji4SmIoNZPn8zhYmaVKP8= 64 | go.shabbyrobe.org/gocovmerge v0.0.0-20230507111327-fa4f82cfbf4d/go.mod h1:92Uoe3l++MlthCm+koNi0tcUCX3anayogF0Pa/sp24k= 65 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 66 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 67 | golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= 68 | golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= 69 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= 70 | golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= 71 | golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= 72 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 73 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 74 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= 75 | golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= 76 | golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 77 | golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= 78 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 79 | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 80 | golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 81 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 82 | golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 83 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 84 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 85 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 86 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 87 | golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 88 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 89 | golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 90 | golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= 91 | golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 92 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 93 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 94 | golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 95 | golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= 96 | golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= 97 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 98 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 99 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 100 | golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 101 | golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 102 | golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= 103 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 104 | golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 105 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 106 | golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= 107 | golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= 108 | golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= 109 | golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= 110 | golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= 111 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 112 | google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= 113 | google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= 114 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 115 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 116 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 117 | gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= 118 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 119 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 120 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 121 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 122 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 123 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 124 | -------------------------------------------------------------------------------- /internal/unquote.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "github.com/jrhy/s3db/sql" 5 | "github.com/jrhy/s3db/sql/colval" 6 | "github.com/jrhy/s3db/sql/parse" 7 | ) 8 | 9 | func UnquoteAll(s string) string { 10 | if len(s) == 0 { 11 | return "" 12 | } 13 | p := &parse.Parser{ 14 | Remaining: s, 15 | } 16 | var cv colval.ColumnValue 17 | var res string 18 | for { 19 | if ok := sql.ColumnValueParser(&cv)(p); !ok { 20 | // dbg("skipping unquote; using: %s\n", s) 21 | return s 22 | } 23 | res += cv.String() 24 | if len(p.Remaining) == 0 { 25 | break 26 | } 27 | } 28 | // dbg("unquoted to: %s\n", res) 29 | return res 30 | } 31 | -------------------------------------------------------------------------------- /key.go: -------------------------------------------------------------------------------- 1 | package s3db 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "strconv" 8 | 9 | "github.com/jrhy/mast" 10 | v1proto "github.com/jrhy/s3db/proto/v1" 11 | ) 12 | 13 | type Key struct { 14 | *v1proto.SQLiteValue 15 | } 16 | 17 | var _ mast.Key = &Key{} 18 | 19 | func NewKey(i interface{}) *Key { 20 | switch x := i.(type) { 21 | case int: 22 | var v int64 = int64(x) 23 | return &Key{&v1proto.SQLiteValue{Type: v1proto.Type_INT, Int: v}} 24 | case int32: 25 | var v int64 = int64(x) 26 | return &Key{&v1proto.SQLiteValue{Type: v1proto.Type_INT, Int: v}} 27 | case int16: 28 | var v int64 = int64(x) 29 | return &Key{&v1proto.SQLiteValue{Type: v1proto.Type_INT, Int: v}} 30 | case int8: 31 | var v int64 = int64(x) 32 | return &Key{&v1proto.SQLiteValue{Type: v1proto.Type_INT, Int: v}} 33 | case int64: 34 | var v int64 = int64(x) 35 | return &Key{&v1proto.SQLiteValue{Type: v1proto.Type_INT, Int: v}} 36 | case uint: 37 | var v int64 = int64(x) 38 | return &Key{&v1proto.SQLiteValue{Type: v1proto.Type_INT, Int: v}} 39 | case uint8: 40 | var v int64 = int64(x) 41 | return &Key{&v1proto.SQLiteValue{Type: v1proto.Type_INT, Int: v}} 42 | case uint16: 43 | var v int64 = int64(x) 44 | return &Key{&v1proto.SQLiteValue{Type: v1proto.Type_INT, Int: v}} 45 | case uint32: 46 | var v int64 = int64(x) 47 | return &Key{&v1proto.SQLiteValue{Type: v1proto.Type_INT, Int: v}} 48 | case float64: 49 | return &Key{&v1proto.SQLiteValue{Type: v1proto.Type_REAL, Real: x}} 50 | case string: 51 | return &Key{&v1proto.SQLiteValue{Type: v1proto.Type_TEXT, Text: x}} 52 | case []byte: 53 | return &Key{&v1proto.SQLiteValue{Type: v1proto.Type_BLOB, Blob: x}} 54 | case nil: 55 | return &Key{&v1proto.SQLiteValue{Type: v1proto.Type_NULL}} 56 | default: 57 | panic(fmt.Errorf("unhandled Key type %T", x)) 58 | } 59 | } 60 | 61 | var defaultLayer = mast.DefaultLayer(nil) 62 | 63 | func (k *Key) Layer(branchFactor uint) uint8 { 64 | var layer uint8 65 | var err error 66 | v := k.SQLiteValue 67 | switch v.Type { 68 | case v1proto.Type_INT: 69 | layer, err = defaultLayer(v.Int, branchFactor) 70 | case v1proto.Type_REAL: 71 | layer, err = defaultLayer(strconv.FormatFloat(v.Real, 'b', -1, 64), branchFactor) 72 | case v1proto.Type_TEXT: 73 | layer, err = defaultLayer(v.Text, branchFactor) 74 | case v1proto.Type_BLOB: 75 | layer, err = defaultLayer(v.Blob, branchFactor) 76 | case v1proto.Type_NULL: 77 | layer = 0 78 | default: 79 | panic("unhandled Key type") 80 | } 81 | if err != nil { 82 | panic(err) 83 | } 84 | return layer 85 | } 86 | 87 | func (k *Key) IsNull() bool { 88 | return k.Type == v1proto.Type_NULL 89 | } 90 | 91 | func (k *Key) Order(o2 mast.Key) int { 92 | if o2 == nil { 93 | return 1 94 | } 95 | k2 := o2.(*Key) 96 | v := k.SQLiteValue 97 | v2 := k2.SQLiteValue 98 | var flip bool 99 | v, v2, flip = orderType(v, v2) 100 | if v.Type == v1proto.Type_INT { 101 | if v2.Type == v1proto.Type_INT { 102 | if v.Int < v2.Int { 103 | return order(flip, -1) 104 | } else if v.Int > v2.Int { 105 | return order(flip, 1) 106 | } 107 | return 0 108 | } 109 | if v2.Type == v1proto.Type_REAL { 110 | if float64(v.Int) < v2.Real { 111 | return order(flip, -1) 112 | } else if float64(v.Int) > v2.Real { 113 | return order(flip, 1) 114 | } 115 | return 0 116 | } 117 | return order(flip, -1) 118 | } 119 | if v.Type == v1proto.Type_REAL { 120 | if v2.Type == v1proto.Type_REAL { 121 | if v.Real < v2.Real { 122 | return order(flip, -1) 123 | } else if v.Real > v2.Real { 124 | return order(flip, 1) 125 | } 126 | return 0 127 | } 128 | return order(flip, -1) 129 | } 130 | if v.Type == v1proto.Type_TEXT { 131 | if v2.Type == v1proto.Type_TEXT { 132 | if v.Text < v2.Text { 133 | return order(flip, -1) 134 | } else if v.Text > v2.Text { 135 | return order(flip, 1) 136 | } 137 | return 0 138 | } 139 | return order(flip, -1) 140 | } 141 | if v.Type == v1proto.Type_BLOB { 142 | if v2.Type == v1proto.Type_BLOB { 143 | return order(flip, bytes.Compare(v.Blob, v2.Blob)) 144 | } 145 | } 146 | panic(fmt.Errorf("key comparison %T, %T in unexpected order", 147 | k.Value(), k2.Value())) 148 | } 149 | 150 | func orderType(v, v2 *v1proto.SQLiteValue) (*v1proto.SQLiteValue, *v1proto.SQLiteValue, bool) { 151 | if typeIndex(v) <= typeIndex(v2) { 152 | return v, v2, false 153 | } 154 | return v2, v, true 155 | } 156 | 157 | func typeIndex(v *v1proto.SQLiteValue) int { 158 | if v.Type == v1proto.Type_INT { 159 | return 0 160 | } 161 | if v.Type == v1proto.Type_REAL { 162 | return 1 163 | } 164 | if v.Type == v1proto.Type_TEXT { 165 | return 2 166 | } 167 | if v.Type == v1proto.Type_BLOB { 168 | return 3 169 | } 170 | panic("unhandled key type") 171 | } 172 | 173 | func order(flip bool, cmp int) int { 174 | if cmp == 0 || !flip { 175 | return cmp 176 | } 177 | return -1 * cmp 178 | } 179 | 180 | func (k *Key) Value() interface{} { 181 | switch k.Type { 182 | case v1proto.Type_INT: 183 | return k.Int 184 | case v1proto.Type_TEXT: 185 | return k.Text 186 | case v1proto.Type_REAL: 187 | return k.Real 188 | case v1proto.Type_BLOB: 189 | return k.Blob 190 | } 191 | return nil 192 | } 193 | 194 | func (k *Key) String() string { 195 | return mustJSON(k) 196 | } 197 | 198 | func mustJSON(i interface{}) string { 199 | var b []byte 200 | var err error 201 | b, err = json.Marshal(i) 202 | if err != nil { 203 | panic(err) 204 | } 205 | if len(b) > 60 { 206 | b, err = json.MarshalIndent(i, " ", " ") 207 | if err != nil { 208 | panic(err) 209 | } 210 | } 211 | return string(b) 212 | } 213 | -------------------------------------------------------------------------------- /key_test.go: -------------------------------------------------------------------------------- 1 | package s3db 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | func TestSortOrder(t *testing.T) { 10 | require.Equal(t, 1, NewKey(4).Order(NewKey(3.14))) 11 | require.Equal(t, -1, NewKey(3.14).Order(NewKey(4))) 12 | require.Equal(t, 1, NewKey("string").Order(NewKey(4))) 13 | require.Equal(t, -1, NewKey(4).Order(NewKey("string"))) 14 | require.Equal(t, 1, NewKey([]byte("blob")).Order(NewKey("string"))) 15 | require.Equal(t, -1, NewKey("string").Order(NewKey([]byte("blob")))) 16 | } 17 | -------------------------------------------------------------------------------- /kv/bench_test.go: -------------------------------------------------------------------------------- 1 | package kv_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/aws/aws-sdk-go/aws/session" 9 | "github.com/aws/aws-sdk-go/service/s3" 10 | 11 | "github.com/jrhy/s3db/kv" 12 | ) 13 | 14 | func BenchmarkSet1(b *testing.B) { 15 | ctx := context.Background() 16 | t, close := newTestTree(0, 0) 17 | defer close() 18 | for n := 0; n < b.N; n++ { 19 | t.Set(ctx, time.Time{}, 0, 0) 20 | } 21 | t.Cancel() 22 | } 23 | 24 | func BenchmarkSetN(b *testing.B) { 25 | ctx := context.Background() 26 | t, close := newTestTree(0, 0) 27 | defer close() 28 | for n := 0; n < b.N; n++ { 29 | t.Set(ctx, time.Time{}, n, n) 30 | } 31 | t.Cancel() 32 | } 33 | 34 | func BenchmarkSetNCommit(b *testing.B) { 35 | ctx := context.Background() 36 | t, close := newTestTree(0, 0) 37 | defer close() 38 | for n := 0; n < b.N; n++ { 39 | t.Set(ctx, time.Time{}, n, n) 40 | } 41 | t.Commit(ctx) 42 | } 43 | 44 | func BenchmarkGet1(b *testing.B) { 45 | ctx := context.Background() 46 | t, close := newTestTree(0, 0) 47 | defer close() 48 | t.Set(ctx, time.Time{}, 0, 0) 49 | var v int 50 | for n := 0; n < b.N; n++ { 51 | t.Get(ctx, n, &v) 52 | } 53 | t.Cancel() 54 | } 55 | 56 | func BenchmarkGetNMemory(b *testing.B) { 57 | ctx := context.Background() 58 | t, close := newTestTree(0, 0) 59 | defer close() 60 | for n := 0; n < b.N; n++ { 61 | t.Set(ctx, time.Time{}, n, n) 62 | } 63 | t.Cancel() 64 | var v int 65 | for n := 0; n < b.N; n++ { 66 | t.Get(ctx, n, &v) 67 | } 68 | } 69 | 70 | func BenchmarkGetNStored(b *testing.B) { 71 | ctx := context.Background() 72 | t, close := newTestTree(0, 0) 73 | defer close() 74 | for n := 0; n < b.N; n++ { 75 | t.Set(ctx, time.Time{}, n, n) 76 | } 77 | t.Commit(ctx) 78 | var v int 79 | for n := 0; n < b.N; n++ { 80 | t.Get(ctx, n, &v) 81 | } 82 | } 83 | 84 | func newTestTree(zeroKey, zeroValue interface{}) (*kv.DB, func()) { 85 | ctx := context.Background() 86 | s3cfg, close := setupS3("bucket") 87 | 88 | c := s3.New(session.New(s3cfg)) 89 | 90 | cfg := kv.Config{ 91 | Storage: &kv.S3BucketInfo{ 92 | EndpointURL: c.Endpoint, 93 | BucketName: "bucket", 94 | Prefix: "/my-awesome-database", 95 | }, 96 | KeysLike: "key", 97 | ValuesLike: 1234, 98 | } 99 | s, err := kv.Open(ctx, c, cfg, kv.OpenOptions{}, time.Now()) 100 | if err != nil { 101 | panic(err) 102 | } 103 | return s, close 104 | } 105 | -------------------------------------------------------------------------------- /kv/cmd/kv/delete-history.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/jrhy/s3db/kv" 8 | ) 9 | 10 | func init() { 11 | cmd := "delete-history" 12 | subcommandUsage[cmd] = "delete-history --older-than=" 13 | subcommandDesc[cmd] = "Removes versions and associated nodes older than a certain age." 14 | subcommandFuncs[cmd] = func(sa *subcommandArgs) int { 15 | var d time.Duration 16 | err := parseDuration(&sa.SubcommandOptions, "--older-than", &d) 17 | if err != nil { 18 | err = fmt.Errorf("delete-history: %w", err) 19 | fmt.Fprintln(sa.Stderr, err) 20 | return 1 21 | } 22 | db := open(sa.Ctx, nil, sa) 23 | err = kv.DeleteHistoricVersions(sa.Ctx, db, time.Now().Add(-d)) 24 | if err != nil { 25 | err = fmt.Errorf("delete-history: %w", err) 26 | fmt.Fprintln(sa.Stderr, err) 27 | return 1 28 | } 29 | sa.Result.suppressCommit = true 30 | return 0 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /kv/cmd/kv/diff.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/jrhy/s3db/kv" 7 | ) 8 | 9 | func init() { 10 | cmd := "diff" 11 | usage := `diff []` 12 | desc := `Shows differences between two versions.` 13 | subcommandUsage[cmd] = usage 14 | subcommandDesc[cmd] = desc 15 | subcommandFuncs[cmd] = func(sa *subcommandArgs) int { 16 | al := len(sa.Arg) 17 | if al == 0 || al > 2 { 18 | fmt.Fprintln(sa.Stderr, usage) 19 | return 1 20 | } 21 | fromVersion := sa.Arg[0] 22 | from := open(sa.Ctx, &kv.OpenOptions{ 23 | OnlyVersions: []string{fromVersion}, 24 | ReadOnly: true, 25 | }, sa) 26 | var toVersion string 27 | if al > 1 { 28 | toVersion = sa.Arg[1] 29 | } 30 | to := open(sa.Ctx, &kv.OpenOptions{ 31 | OnlyVersions: []string{toVersion}, 32 | ReadOnly: true, 33 | }, sa) 34 | err := to.Diff(sa.Ctx, from, sa.diff()) 35 | if err != nil { 36 | fmt.Fprintln(sa.Stderr, err) 37 | return 1 38 | } 39 | sa.Result.suppressCommit = true 40 | return 0 41 | } 42 | } 43 | 44 | func (sa *subcommandArgs) diff() func(interface{}, interface{}, interface{}) (bool, error) { 45 | return func(key, myValue, fromValue interface{}) (keepGoing bool, err error) { 46 | if fromValue != nil { 47 | fmt.Fprintf(sa.Stdout, "-%v: %v\n", key, fromValue) 48 | } 49 | if myValue != nil { 50 | fmt.Fprintf(sa.Stdout, "+%v: %v\n", key, myValue) 51 | } 52 | return true, nil 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /kv/cmd/kv/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "io/ioutil" 9 | "os" 10 | "sort" 11 | "strings" 12 | "time" 13 | 14 | "github.com/aws/aws-sdk-go/aws" 15 | "github.com/aws/aws-sdk-go/aws/session" 16 | "github.com/aws/aws-sdk-go/service/s3" 17 | "github.com/docopt/docopt-go" 18 | "github.com/jrhy/s3db/kv" 19 | ) 20 | 21 | const version = "0.1" 22 | 23 | var ( 24 | subcommandFuncs = map[string]func(*subcommandArgs) int{} 25 | subcommandUsage = map[string]string{} 26 | subcommandDesc = map[string]string{} 27 | ) 28 | 29 | type subcommandArgs struct { 30 | // inputs 31 | Bucket string 32 | Prefix string `docopt:"-p,--prefix"` 33 | MasterKeyFile string `docopt:"-k,--master-key-file"` 34 | Quiet bool `docopt:"-q,--quiet"` 35 | Verbose bool `docopt:"-v,--verbose"` 36 | Subcommand string `docopt:""` 37 | Arg []string `docopt:""` 38 | Ctx context.Context 39 | Stdout io.Writer 40 | Stderr io.Writer 41 | 42 | // derived 43 | encryptor kv.Encryptor 44 | SubcommandOptions docopt.Opts 45 | 46 | // outputs 47 | db *kv.DB 48 | s3opts *kv.OpenOptions 49 | Result struct { 50 | suppressCommit bool 51 | } 52 | } 53 | 54 | func main() { 55 | s := subcommandArgs{ 56 | Stdout: os.Stdout, 57 | Stderr: os.Stderr, 58 | Ctx: context.Background(), 59 | } 60 | os.Exit(int(s.run(os.Args[1:]))) 61 | } 62 | 63 | func parseArgs(s *subcommandArgs, args []string) { 64 | usage := `kv v` + version + ` 65 | 66 | Usage: 67 | kv --bucket= [--master-key-file=] [--prefix=] 68 | [-qv] [...] 69 | kv -h 70 | 71 | Options: 72 | -b, --bucket= S3 bucket to put the database in 73 | -h, --help Print detailed help, including subcommands. 74 | -k, --master-key-file= 75 | path to master key material bytes 76 | -p, --prefix= S3 object name prefix 77 | -q, --quiet suppress warnings 78 | -v, --verbose always say what happened 79 | 80 | Environment: 81 | S3_ENDPOINT= override S3 endpoint, if not using AWS S3 82 | (e.g. minio, Wasabi) 83 | AWS SDK S3 client per 84 | docs.aws.amazon.com/cli/latest/reference/configure 85 | 86 | Commands: 87 | ` 88 | cmds := []string{} 89 | for cmd := range subcommandUsage { 90 | cmds = append(cmds, cmd) 91 | } 92 | sort.Strings(cmds) 93 | for _, cmd := range cmds { 94 | usage += fmt.Sprintf(" %s\n", subcommandUsage[cmd]) 95 | usage += fmt.Sprintf(" %s\n", subcommandDesc[cmd]) 96 | } 97 | p := docopt.Parser{ 98 | OptionsFirst: true, 99 | } 100 | opts, err := p.ParseArgs(usage, args, version) 101 | if err != nil { 102 | panic(err) 103 | } 104 | err = opts.Bind(s) 105 | if err != nil { 106 | panic(err) 107 | } 108 | } 109 | 110 | func (s *subcommandArgs) run(args []string) int { 111 | parseArgs(s, args) 112 | if s.MasterKeyFile != "" { 113 | keyBytes, err := ioutil.ReadFile(s.MasterKeyFile) 114 | if err != nil { 115 | fmt.Fprintln(s.Stderr, err) 116 | return 1 117 | } 118 | s.encryptor = kv.V1NodeEncryptor(keyBytes) 119 | } 120 | if f, ok := subcommandFuncs[s.Subcommand]; ok { 121 | su := subcommandUsage[s.Subcommand] 122 | var r int 123 | r = parseSubcommandArgs(su, s) 124 | if r != 0 { 125 | return r 126 | } 127 | r = f(s) 128 | if r != 0 { 129 | return r 130 | } 131 | } else { 132 | fmt.Fprintf(s.Stderr, "unknown command: %s", s.Subcommand) 133 | fmt.Fprintf(s.Stderr, "arg: %v\n", s.Arg) 134 | return 1 135 | } 136 | if s.db == nil || 137 | s.s3opts == nil || 138 | s.s3opts.ReadOnly || 139 | s.Result.suppressCommit { 140 | return 0 141 | } 142 | if !s.db.IsDirty() { 143 | if s.Verbose { 144 | fmt.Fprintf(s.Stdout, "no change\n") 145 | } 146 | return 0 147 | } 148 | hash, err := s.db.Commit(s.Ctx) 149 | if err != nil { 150 | fmt.Fprintln(s.Stderr, err) 151 | return 1 152 | } 153 | if s.Verbose { 154 | if hash != nil { 155 | fmt.Fprintf(s.Stdout, "committed %s\n", *hash) 156 | } else { 157 | fmt.Fprintf(s.Stdout, "committed empty tree\n") 158 | } 159 | } 160 | return 0 161 | } 162 | 163 | func open(ctx context.Context, opts *kv.OpenOptions, args *subcommandArgs) *kv.DB { 164 | if args.Bucket == "" { 165 | fmt.Fprintf(args.Stderr, "--bucket not set\n") 166 | os.Exit(1) 167 | } 168 | s := getS3() 169 | if s.Endpoint == "" { 170 | fmt.Fprintf(args.Stderr, "No S3 endpoint configured. Ensure AWS SDK is configured or set S3_ENDPOINT explicitly.\n") 171 | os.Exit(1) 172 | } 173 | 174 | cfg := kv.Config{ 175 | Storage: &kv.S3BucketInfo{ 176 | EndpointURL: s.Endpoint, 177 | BucketName: args.Bucket, 178 | Prefix: args.Prefix, 179 | }, 180 | KeysLike: "stringy", 181 | ValuesLike: "stringy", 182 | NodeEncryptor: args.encryptor, 183 | } 184 | var so kv.OpenOptions 185 | if opts != nil { 186 | so = *opts 187 | } 188 | db, err := kv.Open(ctx, s, cfg, so, time.Now()) 189 | if err != nil { 190 | err = fmt.Errorf("open: %w", err) 191 | fmt.Fprintln(args.Stderr, err) 192 | os.Exit(1) 193 | } 194 | 195 | args.db = db 196 | args.s3opts = &so 197 | return db 198 | } 199 | 200 | func getS3() *s3.S3 { 201 | config := aws.Config{} 202 | endpoint := os.Getenv("S3_ENDPOINT") 203 | if endpoint != "" { 204 | config.Endpoint = &endpoint 205 | config.S3ForcePathStyle = aws.Bool(true) 206 | } 207 | 208 | sess, err := session.NewSession(&config) 209 | if err != nil { 210 | err = fmt.Errorf("session: %w", err) 211 | fmt.Fprintln(os.Stderr, err) 212 | os.Exit(1) 213 | } 214 | 215 | return s3.New(sess) 216 | } 217 | 218 | func parseSubcommandArgs(usage string, s *subcommandArgs) int { 219 | p := docopt.Parser{ 220 | SkipHelpFlags: true, 221 | } 222 | opts, err := p.ParseArgs( 223 | "Usage: "+strings.Split(usage, "\n")[0], 224 | s.Arg, "") 225 | if err != nil { 226 | fmt.Fprintln(s.Stderr, err) 227 | return 1 228 | } 229 | s.SubcommandOptions = opts 230 | return 0 231 | } 232 | 233 | func parseDuration(o *docopt.Opts, name string, d *time.Duration) error { 234 | durstr, err := o.String(name) 235 | if err != nil { 236 | return fmt.Errorf("option: %w", err) 237 | } 238 | if durstr == "" { 239 | return errors.New("empty duration") 240 | } 241 | *d, err = time.ParseDuration(durstr) 242 | if err != nil { 243 | return fmt.Errorf("duration: %w", err) 244 | } 245 | return nil 246 | } 247 | -------------------------------------------------------------------------------- /kv/cmd/kv/merge.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | ) 7 | 8 | func init() { 9 | cmd := "merge" 10 | usage := "merge" 11 | desc := "Prints version name resulting from merging current versions." 12 | subcommandUsage[cmd] = usage 13 | subcommandDesc[cmd] = desc 14 | subcommandFuncs[cmd] = func(sa *subcommandArgs) int { 15 | db := open(sa.Ctx, nil, sa) 16 | name, err := db.Commit(sa.Ctx) 17 | if err != nil { 18 | fmt.Fprintln(os.Stderr, err) 19 | return 1 20 | } 21 | if name != nil { 22 | fmt.Fprintf(sa.Stdout, "%s\n", *name) 23 | } else { 24 | fmt.Fprintf(sa.Stdout, "\n") 25 | } 26 | sa.Result.suppressCommit = true 27 | return 0 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /kv/cmd/kv/remove-tombstones.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | ) 7 | 8 | func init() { 9 | cmd := "remove-tombstones" 10 | usage := "remove-tombstones --older-than=" 11 | desc := "Removes tombstone entries older than a certain age." 12 | subcommandUsage[cmd] = usage 13 | subcommandDesc[cmd] = desc 14 | subcommandFuncs[cmd] = func(sa *subcommandArgs) int { 15 | var d time.Duration 16 | err := parseDuration(&sa.SubcommandOptions, "--older-than", &d) 17 | if err != nil { 18 | err = fmt.Errorf("remove-tombstones: %w", err) 19 | fmt.Fprintln(sa.Stderr, err) 20 | return 1 21 | } 22 | db := open(sa.Ctx, nil, sa) 23 | err = db.RemoveTombstones(sa.Ctx, time.Now().Add(-d)) 24 | if err != nil { 25 | err = fmt.Errorf("remove-tombstones: %w", err) 26 | fmt.Fprintln(sa.Stderr, err) 27 | return 1 28 | } 29 | return 0 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /kv/cmd/kv/set.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | "time" 7 | ) 8 | 9 | func init() { 10 | cmd := "set" 11 | usage := `set = ...` 12 | desc := `Sets values for entries.` 13 | subcommandUsage[cmd] = usage 14 | subcommandDesc[cmd] = desc 15 | subcommandFuncs[cmd] = func(sa *subcommandArgs) int { 16 | if len(sa.Arg) == 0 { 17 | fmt.Fprintln(sa.Stderr, usage) 18 | return 1 19 | } 20 | db := open(sa.Ctx, nil, sa) 21 | keys := []string{} 22 | values := []interface{}{} 23 | for _, arg := range sa.Arg { 24 | a := strings.Split(arg, "=") 25 | if len(a) < 2 { 26 | fmt.Fprintln(sa.Stderr, usage) 27 | return 1 28 | } 29 | keys = append(keys, a[0]) 30 | values = append(values, strings.Join(a[1:], "=")) 31 | } 32 | for i := range keys { 33 | var cur interface{} 34 | var err error 35 | var tombstoned, ok bool 36 | if tombstoned, err = db.IsTombstoned(sa.Ctx, keys[i]); err == nil && tombstoned { 37 | if !sa.Quiet { 38 | fmt.Fprintf(sa.Stderr, "warning: set of '%s' is ineffective while tombstoned\n", keys[i]) 39 | } 40 | continue 41 | } else if ok, err = db.Get(sa.Ctx, keys[i], &cur); ok { 42 | if !sa.Quiet && cur == values[i] { 43 | fmt.Fprintf(sa.Stderr, "warning: '%s' already had requested value; updating time only\n", keys[i]) 44 | } 45 | } 46 | if err == nil { 47 | err = db.Set(sa.Ctx, time.Now(), keys[i], values[i]) 48 | } 49 | if err != nil { 50 | err = fmt.Errorf("set '%s': %w", keys[i], err) 51 | fmt.Fprintln(sa.Stderr, err) 52 | return 1 53 | } 54 | } 55 | return 0 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /kv/cmd/kv/show.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/jrhy/s3db/kv" 7 | ) 8 | 9 | func init() { 10 | cmd := "show" 11 | usage := cmd 12 | desc := `Prints entries to stdout.` 13 | subcommandUsage[cmd] = usage 14 | subcommandDesc[cmd] = desc 15 | subcommandFuncs[cmd] = func(sa *subcommandArgs) int { 16 | db := open(sa.Ctx, &kv.OpenOptions{ReadOnly: true}, sa) 17 | err := db.Diff(sa.Ctx, nil, sa.dump()) 18 | if err != nil { 19 | fmt.Fprintln(sa.Stderr, err) 20 | return 1 21 | } 22 | return 0 23 | } 24 | } 25 | 26 | func (sa *subcommandArgs) dump() func(interface{}, interface{}, interface{}) (bool, error) { 27 | return func(key, myValue, fromValue interface{}) (keepGoing bool, err error) { 28 | switch x := myValue.(type) { 29 | case []byte: 30 | fmt.Fprintf(sa.Stdout, "%v: %s\n", key, string(x)) 31 | default: 32 | fmt.Fprintf(sa.Stdout, "%v: %v\n", key, x) 33 | } 34 | return true, nil 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /kv/cmd/kv/tombstone.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "time" 7 | ) 8 | 9 | func init() { 10 | cmd := "tombstone" 11 | usage := `tombstone ...` 12 | desc := `Marks entry as deleted, un-settable.` 13 | subcommandUsage[cmd] = usage 14 | subcommandDesc[cmd] = desc 15 | subcommandFuncs[cmd] = func(sa *subcommandArgs) int { 16 | db := open(sa.Ctx, nil, sa) 17 | keys := []string{} 18 | if len(sa.Arg) == 0 { 19 | fmt.Fprintln(sa.Stderr, usage) 20 | return 1 21 | } 22 | for _, key := range sa.Arg { 23 | var err error 24 | var tombstoned bool 25 | if tombstoned, err = db.IsTombstoned(sa.Ctx, key); tombstoned { 26 | if !sa.Quiet { 27 | fmt.Fprintf(os.Stderr, "warning: '%s' already tombstoned\n", key) 28 | } 29 | continue 30 | } 31 | if err != nil { 32 | err = fmt.Errorf("tombstone '%s': %w", keys, err) 33 | fmt.Fprintln(sa.Stderr, err) 34 | return 1 35 | } 36 | var value string 37 | var ok bool 38 | if ok, err = db.Get(sa.Ctx, key, &value); !ok { 39 | if !sa.Quiet { 40 | fmt.Fprintf(os.Stderr, "warning: '%s' not previously set; adding tombstone anyway\n", key) 41 | } 42 | } 43 | if err != nil { 44 | err = fmt.Errorf("tombstone '%s': %w", keys, err) 45 | fmt.Fprintln(sa.Stderr, err) 46 | return 1 47 | } 48 | err = db.Tombstone(sa.Ctx, time.Now(), key) 49 | if err != nil { 50 | err = fmt.Errorf("tombstone '%s': %w", keys, err) 51 | fmt.Fprintln(sa.Stderr, err) 52 | return 1 53 | } 54 | } 55 | return 0 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /kv/cmd/kv/trace-history.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/jrhy/s3db/kv" 8 | ) 9 | 10 | func init() { 11 | cmd := "trace-history" 12 | usage := "trace-history [--newer-than=] ..." 13 | desc := "Shows historic values of keys." 14 | subcommandUsage[cmd] = usage 15 | subcommandDesc[cmd] = desc 16 | subcommandFuncs[cmd] = func(sa *subcommandArgs) int { 17 | var d time.Duration 18 | afterTime := time.Time{} 19 | if sa.SubcommandOptions["--newer-than"] != nil { 20 | err := parseDuration(&sa.SubcommandOptions, "--newer-than", &d) 21 | if err != nil { 22 | fmt.Fprintf(sa.Stderr, "trace-history: %v\n", err) 23 | return 1 24 | } 25 | afterTime = time.Now().Add(-d) 26 | } 27 | db := open(sa.Ctx, &kv.OpenOptions{ReadOnly: true}, sa) 28 | keys := []string{} 29 | if len(sa.Arg) == 0 { 30 | fmt.Fprintln(sa.Stderr, usage) 31 | return 1 32 | } 33 | for _, key := range sa.Arg { 34 | var err error 35 | err = db.TraceHistory(sa.Ctx, key, afterTime, func(when time.Time, value interface{}) (keepGoing bool, err error) { 36 | if value != nil { 37 | fmt.Fprintf(sa.Stdout, "%v %s: %s\n", when, key, value) 38 | } else { 39 | fmt.Fprintf(sa.Stdout, "%v %s tombstoned\n", when, key) 40 | } 41 | return true, nil 42 | }) 43 | if err != nil { 44 | err = fmt.Errorf("traceHistory '%s': %w", keys, err) 45 | fmt.Fprintln(sa.Stderr, err) 46 | return 1 47 | } 48 | } 49 | return 0 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /kv/crdt/value.go: -------------------------------------------------------------------------------- 1 | package crdt 2 | 3 | type Value struct { 4 | ModEpochNanos int64 `json:"m"` 5 | PreviousRoot string `json:"p,omitempty"` 6 | TombstoneSinceEpochNanos int64 `json:"d,omitempty"` 7 | Value interface{} `json:"v"` 8 | } 9 | 10 | func (v Value) Tombstoned() bool { 11 | return v.TombstoneSinceEpochNanos != 0 12 | } 13 | 14 | func LastWriteWins(newValue, oldValue *Value) *Value { 15 | if newValue.Tombstoned() || oldValue.Tombstoned() { 16 | return firstTombstoneWins(newValue, oldValue) 17 | } 18 | if newValue.ModEpochNanos >= oldValue.ModEpochNanos { 19 | return newValue 20 | } 21 | return oldValue 22 | } 23 | 24 | func firstTombstoneWins(newValue, oldValue *Value) *Value { 25 | if !newValue.Tombstoned() { 26 | return oldValue 27 | } 28 | if !oldValue.Tombstoned() { 29 | return newValue 30 | } 31 | if newValue.TombstoneSinceEpochNanos < oldValue.TombstoneSinceEpochNanos { 32 | return newValue 33 | } 34 | return oldValue 35 | } 36 | -------------------------------------------------------------------------------- /kv/crypto.go: -------------------------------------------------------------------------------- 1 | package kv 2 | 3 | import ( 4 | "encoding/base64" 5 | "errors" 6 | "fmt" 7 | 8 | "golang.org/x/crypto/argon2" 9 | "golang.org/x/crypto/blake2b" 10 | "golang.org/x/crypto/nacl/secretbox" 11 | "golang.org/x/crypto/poly1305" 12 | "golang.org/x/crypto/salsa20" 13 | "golang.org/x/crypto/salsa20/salsa" 14 | ) 15 | 16 | const ( 17 | macLen = 16 18 | encryptNonceLen = 24 19 | deriveKeySaltLen = 16 20 | keyLen = 32 21 | crypto_secretbox_zerobytes = 16 + macLen 22 | ) 23 | 24 | var ErrMACVerificationFailure = errors.New("MAC verification failure") 25 | 26 | func encrypt(key *[32]byte, message []byte) ([]byte, error) { 27 | combined := make([]byte, 0, len(message)+len(key)) 28 | combined = append(combined, message...) 29 | combined = append(combined, key[:]...) 30 | n, err := nonce(combined, encryptNonceLen) 31 | if err != nil { 32 | return nil, err 33 | } 34 | var nonce [encryptNonceLen]byte 35 | copy(nonce[:], n[:encryptNonceLen]) 36 | c := secretbox.Seal(nil, message, &nonce, key) 37 | return append(nonce[:], c...), nil 38 | } 39 | 40 | func decrypt(key *[32]byte, c []byte) ([]byte, error) { 41 | if len(c) < encryptNonceLen { 42 | return nil, fmt.Errorf("message too short, no nonce") 43 | } 44 | 45 | var nonce [encryptNonceLen]byte 46 | copy(nonce[:], c[:encryptNonceLen]) 47 | m, ok := secretbox.Open(nil, c[encryptNonceLen:], &nonce, key) 48 | if !ok { 49 | // fallback to the old implementation 50 | return crypto_secretbox_open_easy(c[24:], c[0:24], key) 51 | } 52 | return m, nil 53 | } 54 | 55 | func nonce(message []byte, nonce_len int) ([]byte, error) { 56 | hasher, err := blake2b.New(nonce_len, nil) 57 | if err != nil { 58 | return nil, err 59 | } 60 | _, err = hasher.Write(message) 61 | if err != nil { 62 | return nil, err 63 | } 64 | return hasher.Sum(nil), nil 65 | } 66 | 67 | // XXX: This DIY version has some discrepancy with the last 8 bytes of the 68 | // nonce, compared to libsodium, and is only provided for compatibility 69 | // decrypting existing data. 70 | func crypto_secretbox_open_detached( 71 | m []byte, 72 | c []byte, 73 | mac []byte, 74 | n []byte, 75 | k *[32]byte) error { 76 | var subkey [32]byte 77 | var nonce16 [16]byte 78 | copy(nonce16[:], n[0:16]) 79 | salsa.HSalsa20(&subkey, &nonce16, k, &salsa.Sigma) 80 | block0 := make([]byte, 64) 81 | salsa20.XORKeyStream(block0[0:32], block0[0:32], n[16:24], &subkey) 82 | var lmac [16]byte 83 | copy(lmac[:], mac[:]) 84 | var block0key [32]byte 85 | copy(block0key[:], block0[:]) 86 | if !poly1305.Verify(&lmac, c, &block0key) { 87 | return ErrMACVerificationFailure 88 | } 89 | mlen0 := len(m) 90 | if mlen0 > 64-crypto_secretbox_zerobytes { 91 | mlen0 = 64 - crypto_secretbox_zerobytes 92 | } 93 | for i := 0; i < mlen0; i++ { 94 | block0[i+crypto_secretbox_zerobytes] = c[i] 95 | } 96 | blen := mlen0 + crypto_secretbox_zerobytes 97 | salsa20.XORKeyStream(block0[:blen], block0[:blen], n[16:24], &subkey) 98 | for i := 0; i < mlen0; i++ { 99 | m[i] = block0[crypto_secretbox_zerobytes+i] 100 | } 101 | if len(c) > mlen0 { 102 | salsa20.XORKeyStream(m[mlen0:], c[mlen0:], n[16:24], &subkey) 103 | } 104 | return nil 105 | } 106 | 107 | // XXX: This DIY version has some discrepancy with the last 8 bytes of the 108 | // nonce, compared to libsodium, and is only provided for compatibility 109 | // decrypting existing data. 110 | func crypto_secretbox_open_easy(c []byte, n []byte, k *[32]byte) ([]byte, error) { 111 | if len(c) < macLen { 112 | return nil, fmt.Errorf("too short for MAC") 113 | } 114 | m := make([]byte, len(c)-16) 115 | err := crypto_secretbox_open_detached(m, c[16:], c[0:16], n, k) 116 | if err != nil { 117 | return nil, err 118 | } 119 | return m, nil 120 | } 121 | 122 | // XXX: Use secretbox package instead. This DIY version has some discrepancy 123 | // with the last 8 bytes of the nonce, compared to libsodium, and should not be 124 | // used. 125 | func crypto_secretbox_easy(m []byte, n []byte, k *[32]byte) ([]byte, error) { 126 | c := make([]byte, len(m)+macLen) 127 | err := crypto_secretbox_detached(c[16:], c[0:16], m, n, k) 128 | if err != nil { 129 | return nil, err 130 | } 131 | return c, nil 132 | } 133 | 134 | // XXX: Use secretbox package instead. This DIY version has some discrepancy 135 | // with the last 8 bytes of the nonce, compared to libsodium, and should not be 136 | // used. 137 | func crypto_secretbox_detached(c []byte, mac []byte, m []byte, n []byte, 138 | k *[32]byte) error { 139 | var subkey [32]byte 140 | var nonce16 [16]byte 141 | copy(nonce16[:], n[0:16]) 142 | salsa.HSalsa20(&subkey, &nonce16, k, &salsa.Sigma) 143 | if len(c) != len(m) { 144 | return fmt.Errorf("ciphertext buffer must be same size as message") 145 | } 146 | mlen0 := len(m) 147 | if mlen0 > 64-crypto_secretbox_zerobytes { 148 | mlen0 = 64 - crypto_secretbox_zerobytes 149 | } 150 | block0 := make([]byte, 64) 151 | for i := 0; i < mlen0; i++ { 152 | block0[i+crypto_secretbox_zerobytes] = m[i] 153 | } 154 | blen := mlen0 + crypto_secretbox_zerobytes 155 | salsa20.XORKeyStream(block0[:blen], block0[:blen], n[16:24], &subkey) 156 | for i := 0; i < mlen0; i++ { 157 | c[i] = block0[crypto_secretbox_zerobytes+i] 158 | } 159 | if len(m) > mlen0 { 160 | salsa20.XORKeyStream(c[mlen0:], m[mlen0:], n[16:24], &subkey) 161 | } 162 | 163 | var lmac [16]byte 164 | var block0key [32]byte 165 | copy(block0key[:], block0[:]) 166 | poly1305.Sum(&lmac, c, &block0key) 167 | copy(mac[0:16], lmac[:]) 168 | return nil 169 | } 170 | 171 | func V1NodeEncryptor(passphrase []byte) Encryptor { 172 | var key [32]byte 173 | copy(key[:], deriveKey(passphrase, nil)) 174 | return &jencryptor{key} 175 | } 176 | 177 | type jencryptor struct { 178 | key [32]byte 179 | } 180 | 181 | func (j *jencryptor) Encrypt(path string, value []byte) ([]byte, error) { 182 | return encrypt(&j.key, value) 183 | } 184 | func (j *jencryptor) Decrypt(path string, value []byte) ([]byte, error) { 185 | return decrypt(&j.key, value) 186 | } 187 | 188 | func deriveKey(master, context []byte) []byte { 189 | combined := make([]byte, 0, len(context)+len(master)) 190 | combined = append(combined, context...) 191 | combined = append(combined, master...) 192 | salt, _ := nonce(combined, deriveKeySaltLen) 193 | // base64-encode passphrase for compatibility with libsodium-based impls in which pwhash requires NUL-terminated source 194 | return argon2.IDKey([]byte(base64.StdEncoding.EncodeToString(combined)), 195 | salt, 1, 8, 1, keyLen) 196 | } 197 | -------------------------------------------------------------------------------- /kv/crypto_test.go: -------------------------------------------------------------------------------- 1 | package kv 2 | 3 | import ( 4 | "encoding/base64" 5 | 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | "golang.org/x/crypto/nacl/secretbox" 11 | ) 12 | 13 | func TestNonce(t *testing.T) { 14 | testKey, err := base64.StdEncoding.DecodeString("UdHBz8klP8ze+cl+qP2zcFBOW952mo8DUc/tn59h6Rw=") 15 | require.NoError(t, err) 16 | input1 := []byte("asdf") 17 | input := append(input1, testKey...) 18 | hash, err := nonce(input, 24) 19 | require.NoError(t, err) 20 | require.Equal(t, "DuO9oCKfeLUrcIImvVH88Y67un3CFnRw", base64.StdEncoding.EncodeToString(hash)) 21 | } 22 | 23 | func TestDeriveKey(t *testing.T) { 24 | testKey, err := base64.StdEncoding.DecodeString("UdHBz8klP8ze+cl+qP2zcFBOW952mo8DUc/tn59h6Rw=") 25 | require.NoError(t, err) 26 | derived := deriveKey(testKey, []byte("foo")) 27 | require.Equal(t, "7a0p0qOL3IqOBMPwjUlGokjz8FNDQDedZRXom5ii/Ls=", base64.StdEncoding.EncodeToString(derived)) 28 | } 29 | 30 | func TestEncrypt(t *testing.T) { 31 | testKey_slice, err := base64.StdEncoding.DecodeString("UdHBz8klP8ze+cl+qP2zcFBOW952mo8DUc/tn59h6Rw=") 32 | require.NoError(t, err) 33 | var testKey [32]byte 34 | copy(testKey[:], testKey_slice[:32]) 35 | encrypted, err := encrypt(&testKey, []byte("asdf")) 36 | require.NoError(t, err) 37 | require.Equal(t, "DuO9oCKfeLUrcIImvVH88Y67un3CFnRwhZOvsmKMKFjTuKYsiLv0bwSBbjo=", 38 | base64.StdEncoding.EncodeToString(encrypted)) 39 | } 40 | 41 | func TestSecretBoxCompat(t *testing.T) { 42 | testKey_slice, err := base64.StdEncoding.DecodeString("UdHBz8klP8ze+cl+qP2zcFBOW952mo8DUc/tn59h6Rw=") 43 | require.NoError(t, err) 44 | var testKey [32]byte 45 | copy(testKey[:], testKey_slice[:32]) 46 | nonceBytes, err := nonce([]byte("asdf"), 24) 47 | require.NoError(t, err) 48 | var nonce [24]byte 49 | copy(nonce[:], nonceBytes[:24]) 50 | 51 | encryptedLocal, err := crypto_secretbox_easy([]byte("asdf"), nonceBytes, &testKey) 52 | assert.Equal(t, "1VyPASCeHN/X2MVLxdYEUmMVjTQ=", 53 | base64.StdEncoding.EncodeToString(encryptedLocal)) 54 | 55 | var encrypted []byte 56 | encrypted = secretbox.Seal(encrypted, []byte("asdf"), &nonce, &testKey) 57 | assert.Equal(t, "1VyPASCeHN/X2MVLxdYEUmMVjTQ=", 58 | base64.StdEncoding.EncodeToString(encrypted)) 59 | } 60 | 61 | func TestDecrypt(t *testing.T) { 62 | testKey_slice, err := base64.StdEncoding.DecodeString("UdHBz8klP8ze+cl+qP2zcFBOW952mo8DUc/tn59h6Rw=") 63 | require.NoError(t, err) 64 | var testKey [32]byte 65 | copy(testKey[:], testKey_slice[:32]) 66 | 67 | decoded := make([]byte, 100) 68 | decodedLen, err := base64.StdEncoding.Decode( 69 | decoded, 70 | []byte("DuO9oCKfeLUrcIImvVH88Y67un3CFnRwhZOvsmKMKFjTuKYsiLv0bwSBbjo=")) 71 | require.NoError(t, err) 72 | 73 | decrypted, err := decrypt(&testKey, decoded[0:decodedLen]) 74 | require.NoError(t, err) 75 | require.Equal(t, "asdf", string(decrypted)) 76 | } 77 | -------------------------------------------------------------------------------- /kv/encode_gob.go: -------------------------------------------------------------------------------- 1 | package kv 2 | 3 | import ( 4 | "bytes" 5 | "encoding/gob" 6 | "fmt" 7 | ) 8 | 9 | func marshalGob(thing interface{}) ([]byte, error) { 10 | var network bytes.Buffer 11 | enc := gob.NewEncoder(&network) 12 | err := enc.Encode(thing) 13 | if err != nil { 14 | return nil, fmt.Errorf("encode gob: %w", err) 15 | } 16 | return network.Bytes(), nil 17 | } 18 | 19 | func unmarshalGob(input []byte, thing interface{}) error { 20 | dec := gob.NewDecoder(bytes.NewBuffer(input)) 21 | err := dec.Decode(thing) 22 | if err != nil { 23 | return fmt.Errorf("decode gob: %w", err) 24 | } 25 | return nil 26 | } 27 | -------------------------------------------------------------------------------- /kv/example_test.go: -------------------------------------------------------------------------------- 1 | package kv_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http/httptest" 7 | "time" 8 | 9 | "github.com/aws/aws-sdk-go/aws" 10 | "github.com/aws/aws-sdk-go/aws/credentials" 11 | "github.com/aws/aws-sdk-go/aws/session" 12 | "github.com/aws/aws-sdk-go/service/s3" 13 | "github.com/johannesboyne/gofakes3" 14 | "github.com/johannesboyne/gofakes3/backend/s3mem" 15 | "github.com/jrhy/mast" 16 | "github.com/jrhy/s3db/kv" 17 | ) 18 | 19 | func s3Config(endpoint string) *aws.Config { 20 | return &aws.Config{ 21 | Credentials: credentials.NewStaticCredentials( 22 | "TEST-ACCESSKEYID", 23 | "TEST-SECRETACCESSKEY", 24 | "", 25 | ), 26 | Region: aws.String("ca-west-1"), 27 | Endpoint: &endpoint, 28 | S3ForcePathStyle: aws.Bool(true), 29 | } 30 | } 31 | 32 | func setupS3(bucketName string) (*aws.Config, func()) { 33 | backend := s3mem.New() 34 | faker := gofakes3.New(backend) 35 | ts := httptest.NewServer(faker.Server()) 36 | s3cfg, closer := s3Config(ts.URL), ts.Close 37 | 38 | c := s3.New(session.New(s3cfg)) 39 | _, err := c.CreateBucket(&s3.CreateBucketInput{ 40 | Bucket: &bucketName, 41 | }) 42 | if err != nil { 43 | panic(err) 44 | } 45 | 46 | return s3cfg, closer 47 | } 48 | 49 | func Example() { 50 | ctx := context.Background() 51 | s3cfg, close := setupS3("bucket") 52 | defer close() 53 | 54 | c := s3.New(session.New(s3cfg)) 55 | 56 | cfg := kv.Config{ 57 | Storage: &kv.S3BucketInfo{ 58 | EndpointURL: c.Endpoint, 59 | BucketName: "bucket", 60 | Prefix: "/my-awesome-database", 61 | }, 62 | KeysLike: "key", 63 | ValuesLike: 1234, 64 | NodeCache: mast.NewNodeCache(1024), 65 | NodeEncryptor: kv.V1NodeEncryptor([]byte("This is a secret passphrase if ever there was one.")), 66 | } 67 | s, err := kv.Open(ctx, c, cfg, kv.OpenOptions{}, time.Now()) 68 | if err != nil { 69 | panic(err) 70 | } 71 | 72 | // setting a value 73 | err = s.Set(ctx, time.Now(), "hello", 5) 74 | if err != nil { 75 | panic(err) 76 | } 77 | 78 | // getting a value 79 | var v int 80 | ok, err := s.Get(ctx, "hello", &v) 81 | if err != nil { 82 | panic(err) 83 | } 84 | if !ok { 85 | panic("how is that not OK?") 86 | } 87 | fmt.Printf("hello=%d\n", v) 88 | 89 | _, err = s.Commit(ctx) 90 | if err != nil { 91 | panic(err) 92 | } 93 | 94 | fmt.Printf("size %d\n", s.Size()) 95 | // Output: 96 | // hello=5 97 | // size 1 98 | } 99 | -------------------------------------------------------------------------------- /kv/internal/crdt/crdt.go: -------------------------------------------------------------------------------- 1 | package crdt 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "reflect" 9 | "time" 10 | 11 | "github.com/jrhy/mast" 12 | "github.com/jrhy/s3db/kv/crdt" 13 | ) 14 | 15 | type Tree struct { 16 | Config Config 17 | Mast *mast.Mast 18 | Created *time.Time 19 | Source *string 20 | MergeSources []string 21 | MergeMode int 22 | } 23 | 24 | // Root identifies a tree, a persisted form, that links the mast and 25 | // ancestors. 26 | type Root struct { 27 | mast.Root 28 | Created *time.Time `json:"cr,omitempty"` 29 | MergeSources []string `json:"p,omitempty"` 30 | MergeMode int `json:"mm,omitempty"` 31 | KVVersion int `json:"kv_version,omitempty"` 32 | } 33 | 34 | const ( 35 | MergeModeLWW = iota 36 | MergeModeCustom 37 | MergeModeCustomLWW 38 | ) 39 | 40 | func mergeTrees(ctx context.Context, mergeFunc MergeFunc, conflictCB OnConflictMerged, primary *mast.Mast, grafts ...*mast.Mast) (*mast.Mast, error) { 41 | if len(grafts) == 0 { 42 | return primary, nil 43 | } 44 | newTree, err := primary.Clone(ctx) 45 | if err != nil { 46 | return nil, fmt.Errorf("clone: %w", err) 47 | } 48 | 49 | for _, graft := range grafts { 50 | err = newTree.DiffIter(ctx, graft, mergeFunc.ToDiffFunc(ctx, &newTree, conflictCB)) 51 | if err != nil { 52 | return nil, err 53 | } 54 | } 55 | return &newTree, nil 56 | } 57 | 58 | type MergeFunc func(context.Context, *mast.Mast, bool, bool, interface{}, interface{}, interface{}, 59 | OnConflictMerged) (bool, error) 60 | 61 | type MergeError error 62 | 63 | func (mf MergeFunc) ToDiffFunc(ctx context.Context, m *mast.Mast, conflictCB OnConflictMerged) func(added, removed bool, 64 | key, addedValue, removedValue interface{}, 65 | ) (bool, error) { 66 | return func(added, removed bool, key, addedValue, removedValue interface{}) (bool, error) { 67 | ok, err := mf(ctx, m, added, removed, key, addedValue, removedValue, conflictCB) 68 | if err != nil { 69 | err = MergeError(err) 70 | } 71 | return ok, err 72 | } 73 | } 74 | 75 | var LWW MergeFunc = MergeFunc( 76 | func(ctx context.Context, newTree *mast.Mast, /*, conflicts *uint64*/ 77 | added, removed bool, key, addedValue, removedValue interface{}, 78 | onConflictMerged OnConflictMerged) (bool, error) { 79 | var newValue crdt.Value 80 | if !added && !removed { // changed 81 | av := addedValue.(crdt.Value) 82 | rv := removedValue.(crdt.Value) 83 | newValue = *crdt.LastWriteWins(&av, &rv) 84 | if onConflictMerged != nil && !av.Tombstoned() && !rv.Tombstoned() && 85 | !reflect.DeepEqual(av.Value, rv.Value) { 86 | err := onConflictMerged(key, av.Value, rv.Value) 87 | if err != nil { 88 | return false, fmt.Errorf("OnConflictMerged: %w", err) 89 | } 90 | } 91 | } else if added { 92 | // already present 93 | return true, nil 94 | } else if removed { 95 | newValue = removedValue.(crdt.Value) 96 | } else { 97 | return false, fmt.Errorf("no added/removed value") 98 | } 99 | err := newTree.Insert(ctx, key, newValue) 100 | if err != nil { 101 | return false, fmt.Errorf("insert: %w", err) 102 | } 103 | return true, nil 104 | }) 105 | 106 | type Config struct { 107 | KeysLike interface{} 108 | ValuesLike interface{} 109 | StoreImmutablePartsWith mast.Persist 110 | NodeCache mast.NodeCache 111 | Marshal func(interface{}) ([]byte, error) 112 | Unmarshal func([]byte, interface{}) error 113 | UnmarshalerUsesRegisteredTypes bool 114 | CustomMerge func(key interface{}, v1, v2 crdt.Value) crdt.Value 115 | OnConflictMerged 116 | MastNodeFormat string 117 | } 118 | 119 | type OnConflictMerged func(key, v1, v2 interface{}) error 120 | 121 | func NewRoot(when time.Time, branchFactor uint) Root { 122 | return Root{ 123 | Root: *mast.NewRoot(&mast.CreateRemoteOptions{ 124 | BranchFactor: branchFactor, 125 | }), 126 | Created: &when, 127 | } 128 | } 129 | 130 | func emptyValue(cfg Config) crdt.Value { 131 | if cfg.ValuesLike == nil { 132 | return crdt.Value{} 133 | } 134 | aType := reflect.TypeOf(cfg.ValuesLike) 135 | aCopy := reflect.New(aType) 136 | return crdt.Value{Value: aCopy} 137 | } 138 | 139 | func unmarshal(bytes []byte, i interface{}, cfg Config) error { 140 | ucb := cfg.Unmarshal 141 | if ucb == nil { 142 | ucb = json.Unmarshal 143 | } 144 | if cfg.UnmarshalerUsesRegisteredTypes { 145 | return cfg.Unmarshal(bytes, i) 146 | } 147 | cv, ok := i.(*crdt.Value) 148 | if !ok { 149 | return ucb(bytes, i) 150 | } 151 | var jv struct { 152 | ModEpochNanos int64 `json:"m"` 153 | TombstoneSinceEpochNanos int64 `json:"d,omitempty"` 154 | Value json.RawMessage `json:"v,omitempty"` 155 | PreviousRoot string `json:"p,omitempty"` 156 | } 157 | err := ucb(bytes, &jv) 158 | if err != nil { 159 | return fmt.Errorf("unmarshal crdtValue message: %w", err) 160 | } 161 | 162 | if len(jv.Value) != 0 { 163 | aType := reflect.TypeOf(cfg.ValuesLike) 164 | aCopy := reflect.New(aType) 165 | err = json.Unmarshal(jv.Value, aCopy.Interface()) 166 | if err != nil { 167 | return fmt.Errorf("unmarshal crdtValue: %w", err) 168 | } 169 | cv.Value = aCopy.Elem().Interface() 170 | } else if jv.TombstoneSinceEpochNanos == 0 { 171 | return fmt.Errorf("nil value for nondeleted entry") 172 | } 173 | 174 | cv.ModEpochNanos = jv.ModEpochNanos 175 | cv.TombstoneSinceEpochNanos = jv.TombstoneSinceEpochNanos 176 | cv.PreviousRoot = jv.PreviousRoot 177 | return nil 178 | } 179 | 180 | func Load(ctx context.Context, cfg Config, rootName *string, root Root) (*Tree, error) { 181 | if !cfg.UnmarshalerUsesRegisteredTypes && cfg.ValuesLike == nil { 182 | return nil, errors.New("must set cfg.{Keys,Values}Like or use a marshaler which registers types") 183 | } 184 | mastCfg := mast.RemoteConfig{ 185 | KeysLike: cfg.KeysLike, 186 | ValuesLike: crdt.Value{ 187 | Value: cfg.ValuesLike, 188 | }, 189 | StoreImmutablePartsWith: cfg.StoreImmutablePartsWith, 190 | NodeCache: cfg.NodeCache, 191 | Marshal: cfg.Marshal, 192 | Unmarshal: func(bytes []byte, i interface{}) error { 193 | return unmarshal(bytes, i, cfg) 194 | }, 195 | UnmarshalerUsesRegisteredTypes: cfg.UnmarshalerUsesRegisteredTypes, 196 | } 197 | m, err := root.Root.LoadMast(ctx, &mastCfg) 198 | if err != nil { 199 | return nil, fmt.Errorf("load new root: %w", err) 200 | } 201 | switch root.MergeMode { 202 | case MergeModeLWW: 203 | if cfg.CustomMerge != nil { 204 | return nil, errors.New("config.CustomMergeValueOnly conflicts with MergeModeLWW") 205 | } 206 | if cfg.OnConflictMerged != nil { 207 | return nil, errors.New("config.OnConflictMerged handler conflicts with MergeModeLWW") 208 | } 209 | case MergeModeCustom: 210 | if cfg.CustomMerge == nil { 211 | return nil, errors.New("MergeModeCustom requires config.CustomMergeValueOnly") 212 | } 213 | if cfg.OnConflictMerged != nil { 214 | return nil, errors.New("config.OnConflictMerged handler conflicts with MergeModeCustom") 215 | } 216 | case MergeModeCustomLWW: 217 | if cfg.OnConflictMerged == nil { 218 | return nil, errors.New("MergeModeCustomLWW requires config.OnConflictMerged") 219 | } 220 | if cfg.CustomMerge != nil { 221 | return nil, errors.New("config.CustomMergeValueOnly handler conflicts with MergeModeCustomLWW") 222 | } 223 | } 224 | return &Tree{ 225 | cfg, 226 | m, 227 | root.Created, 228 | rootName, 229 | root.MergeSources, 230 | root.MergeMode, 231 | }, nil 232 | } 233 | 234 | func (c *Tree) MakeRoot(ctx context.Context) (*Root, error) { 235 | mastRoot, err := c.Mast.MakeRoot(ctx) 236 | if err != nil { 237 | return nil, err 238 | } 239 | crdtRoot := Root{ 240 | Root: *mastRoot, 241 | Created: c.Created, 242 | MergeSources: c.MergeSources, 243 | MergeMode: c.MergeMode, 244 | } 245 | return &crdtRoot, nil 246 | } 247 | 248 | func convertMergeFunc(cb func(key interface{}, v1, v2 crdt.Value) crdt.Value) MergeFunc { 249 | return MergeFunc(func(ctx context.Context, newTree *mast.Mast, 250 | added, removed bool, key, addedValue, removedValue interface{}, 251 | onConflictMerged OnConflictMerged) (bool, error) { 252 | var newValue crdt.Value 253 | if !added && !removed { // changed 254 | av := addedValue.(crdt.Value) 255 | rv := removedValue.(crdt.Value) 256 | newValue = av 257 | newValue = cb(key, av, rv) 258 | if onConflictMerged != nil && !av.Tombstoned() && !rv.Tombstoned() && 259 | !reflect.DeepEqual(av.Value, rv.Value) { 260 | err := onConflictMerged(key, av.Value, rv.Value) 261 | if err != nil { 262 | return false, fmt.Errorf("OnConflictMerged: %w", err) 263 | } 264 | } 265 | } else if added { 266 | // already present 267 | return true, nil 268 | } else if removed { 269 | newValue = removedValue.(crdt.Value) 270 | } else { 271 | return false, fmt.Errorf("no added/removed value") 272 | } 273 | err := newTree.Insert(ctx, key, newValue) 274 | if err != nil { 275 | return false, fmt.Errorf("insert: %w", err) 276 | } 277 | return true, nil 278 | }) 279 | 280 | } 281 | 282 | func (c *Tree) Merge(ctx context.Context, other *Tree) error { 283 | if c.MergeMode != other.MergeMode { 284 | return fmt.Errorf("incoming graft has different MergeMode %d than local %d", other.MergeMode, c.MergeMode) 285 | } 286 | var mergeFunc MergeFunc 287 | if c.MergeMode == MergeModeCustom { 288 | mergeFunc = convertMergeFunc(c.Config.CustomMerge) 289 | } else { 290 | mergeFunc = LWW 291 | } 292 | m, err := mergeTrees(ctx, mergeFunc, c.Config.OnConflictMerged, c.Mast, other.Mast) 293 | if err != nil { 294 | return err 295 | } 296 | c.Mast = m 297 | if other.Source != nil { 298 | if c.MergeSources == nil { 299 | c.MergeSources = []string{*other.Source} 300 | } else { 301 | c.MergeSources = append(c.MergeSources, *other.Source) 302 | } 303 | } 304 | return nil 305 | } 306 | 307 | func (c *Tree) Tombstone(ctx context.Context, when time.Time, key interface{}) error { 308 | n := when.UnixNano() 309 | return c.update(ctx, when, key, 310 | crdt.Value{ 311 | ModEpochNanos: n, 312 | TombstoneSinceEpochNanos: n, 313 | }, 314 | ) 315 | } 316 | 317 | func (c *Tree) IsTombstoned(ctx context.Context, key interface{}) (bool, error) { 318 | cv := emptyValue(c.Config) 319 | contains, err := c.Mast.Get(ctx, key, &cv) 320 | if err != nil || !contains { 321 | return false, err 322 | } 323 | return cv.TombstoneSinceEpochNanos != 0, nil 324 | } 325 | 326 | func (c *Tree) Set(ctx context.Context, when time.Time, key, value interface{}) error { 327 | return c.update(ctx, when, key, 328 | crdt.Value{ 329 | ModEpochNanos: when.UnixNano(), 330 | Value: value, 331 | }, 332 | ) 333 | } 334 | 335 | func (c *Tree) update(ctx context.Context, when time.Time, key interface{}, cv crdt.Value) error { 336 | existing := emptyValue(c.Config) 337 | contains, err := c.Mast.Get(ctx, key, &existing) 338 | if err != nil { 339 | return fmt.Errorf("get existing: %w", err) 340 | } 341 | if contains { 342 | wa := crdt.LastWriteWins(&cv, &existing) 343 | winner := *wa 344 | if wa != &existing { 345 | if c.Source != nil { 346 | winner.PreviousRoot = *c.Source 347 | } else { 348 | winner.PreviousRoot = "" 349 | } 350 | } 351 | err = c.Mast.Insert(ctx, key, winner) 352 | } else { 353 | err = c.Mast.Insert(ctx, key, cv) 354 | } 355 | if err != nil { 356 | return fmt.Errorf("insert: %w", err) 357 | } 358 | return nil 359 | } 360 | 361 | func (c *Tree) Get(ctx context.Context, key interface{}, value interface{}) (bool, error) { 362 | cv := emptyValue(c.Config) 363 | contains, err := c.Mast.Get(ctx, key, &cv) 364 | if err != nil || !contains { 365 | return false, err 366 | } 367 | if cv.TombstoneSinceEpochNanos > 0 { 368 | return false, nil 369 | } 370 | if cvp, ok := value.(*crdt.Value); ok { 371 | *cvp = cv 372 | return true, nil 373 | } 374 | reflect.ValueOf(value).Elem().Set(reflect.ValueOf(cv.Value)) 375 | return true, nil 376 | } 377 | 378 | func (c *Tree) Size() uint64 { 379 | return c.Mast.Size() 380 | } 381 | 382 | func (c Tree) Clone(ctx context.Context) (*Tree, error) { 383 | clone := c 384 | clonedMast, err := c.Mast.Clone(ctx) 385 | if err != nil { 386 | return nil, err 387 | } 388 | clone.Mast = &clonedMast 389 | return &clone, nil 390 | } 391 | 392 | func (c Tree) IsDirty() bool { 393 | return c.Mast.IsDirty() 394 | } 395 | -------------------------------------------------------------------------------- /kv/internal/crdt/crdt_test.go: -------------------------------------------------------------------------------- 1 | package crdt 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/gob" 7 | "fmt" 8 | "io/ioutil" 9 | "testing" 10 | "time" 11 | 12 | "github.com/stretchr/testify/assert" 13 | "github.com/stretchr/testify/require" 14 | 15 | "github.com/jrhy/mast" 16 | "github.com/jrhy/mast/persist/file" 17 | ) 18 | 19 | var ctx = context.Background() 20 | 21 | func TestHappyCase(t *testing.T) { 22 | t.Parallel() 23 | forEachMarshaler(t, func(t *testing.T, cfg Config) { 24 | store := mast.NewInMemoryStore() 25 | emptyRoot := NewRoot(time.Now(), mast.DefaultBranchFactor) 26 | cfg.KeysLike = 1234 27 | cfg.ValuesLike = "hi" 28 | cfg.StoreImmutablePartsWith = store 29 | 30 | c1, err := Load(ctx, cfg, nil, emptyRoot) 31 | require.NoError(t, err) 32 | err = c1.Set(ctx, time.Now(), 0, "tree 1 key") 33 | require.NoError(t, err) 34 | err = c1.Set(ctx, time.Now(), 1, "first write loses") 35 | require.NoError(t, err) 36 | c1Root, err := c1.MakeRoot(ctx) 37 | require.NoError(t, err) 38 | 39 | c2, err := Load(ctx, cfg, nil, emptyRoot) 40 | require.NoError(t, err) 41 | err = c2.Set(ctx, time.Now(), 1, "last write wins") 42 | require.NoError(t, err) 43 | err = c2.Set(ctx, time.Now(), 2, "tree 2 key") 44 | require.NoError(t, err) 45 | c2Root, err := c2.MakeRoot(ctx) 46 | require.NoError(t, err) 47 | 48 | c3, err := Load(ctx, cfg, nil, emptyRoot) 49 | require.NoError(t, err) 50 | err = c3.Merge(ctx, c1) 51 | require.NoError(t, err) 52 | err = c3.Merge(ctx, c2) 53 | require.NoError(t, err) 54 | c3Root, err := c3.MakeRoot(ctx) 55 | require.NoError(t, err) 56 | 57 | c4, err := Load(ctx, cfg, nil, emptyRoot) 58 | require.NoError(t, err) 59 | c2, err = Load(ctx, cfg, c2Root.Link, *c2Root) 60 | require.NoError(t, err) 61 | err = c4.Merge(ctx, c2) 62 | require.NoError(t, err) 63 | c1, err = Load(ctx, cfg, c1Root.Link, *c1Root) 64 | require.NoError(t, err) 65 | err = c4.Merge(ctx, c1) 66 | require.NoError(t, err) 67 | c4Root, err := c4.MakeRoot(ctx) 68 | require.NoError(t, err) 69 | 70 | require.Equal(t, *c3Root.Link, *c4Root.Link) 71 | 72 | var v string 73 | contains, err := c4.Get(ctx, 1, &v) 74 | require.NoError(t, err) 75 | require.True(t, contains) 76 | require.Equal(t, "last write wins", v) 77 | 78 | contains, err = c4.Get(ctx, 999, &v) 79 | require.NoError(t, err) 80 | require.False(t, contains) 81 | 82 | contains, err = c4.Get(ctx, 0, &v) 83 | require.NoError(t, err) 84 | require.True(t, contains) 85 | require.Equal(t, "tree 1 key", v) 86 | 87 | contains, err = c4.Get(ctx, 2, &v) 88 | require.NoError(t, err) 89 | require.True(t, contains) 90 | require.Equal(t, "tree 2 key", v) 91 | }) 92 | } 93 | 94 | func forEachMarshaler(t *testing.T, f func(*testing.T, Config)) { 95 | t.Run("json", func(t *testing.T) { 96 | f(t, Config{}) 97 | }) 98 | t.Run("gob", func(t *testing.T) { 99 | f(t, Config{ 100 | Marshal: marshalGob, 101 | Unmarshal: unmarshalGob, 102 | UnmarshalerUsesRegisteredTypes: true, 103 | }) 104 | }) 105 | } 106 | 107 | func TestStructValues(t *testing.T) { 108 | t.Parallel() 109 | type asdf struct { 110 | I int64 111 | F float64 112 | } 113 | type foo struct { 114 | Asdf asdf 115 | Jk bool 116 | } 117 | gob.Register(foo{}) 118 | forEachMarshaler(t, func(t *testing.T, cfg Config) { 119 | store := mast.NewInMemoryStore() 120 | emptyRoot := NewRoot(time.Now(), mast.DefaultBranchFactor) 121 | cfg.KeysLike = 1234 122 | cfg.ValuesLike = foo{asdf{1, 3.14}, true} 123 | cfg.StoreImmutablePartsWith = store 124 | 125 | c1, err := Load(ctx, cfg, nil, emptyRoot) 126 | require.NoError(t, err) 127 | err = c1.Set(ctx, time.Now(), 0, foo{asdf{1, 3.14}, true}) 128 | require.NoError(t, err) 129 | c1Root, err := c1.MakeRoot(ctx) 130 | require.NoError(t, err) 131 | c1, err = Load(ctx, cfg, c1Root.Link, *c1Root) 132 | require.NoError(t, err) 133 | var f foo 134 | contains, err := c1.Get(ctx, 0, &f) 135 | require.NoError(t, err) 136 | require.True(t, contains) 137 | require.Equal(t, foo{asdf{1, 3.14}, true}, f) 138 | }) 139 | } 140 | 141 | func TestFile(t *testing.T) { 142 | t.Parallel() 143 | 144 | type MyObject struct { 145 | A string 146 | } 147 | gob.Register(MyObject{}) 148 | 149 | forEachMarshaler(t, func(t *testing.T, cfg Config) { 150 | dir, err := ioutil.TempDir("", "kvtest") 151 | require.NoError(t, err) 152 | 153 | persist := file.NewPersistForPath(dir) 154 | 155 | empty := NewRoot(time.Now(), mast.DefaultBranchFactor) 156 | cfg.KeysLike = "hi" 157 | cfg.ValuesLike = MyObject{} 158 | cfg.StoreImmutablePartsWith = persist 159 | 160 | s1, err := Load(ctx, cfg, nil, empty) 161 | require.NoError(t, err) 162 | err = s1.Set(ctx, time.Now(), "user1", MyObject{A: "a"}) 163 | require.NoError(t, err) 164 | _, err = s1.MakeRoot(ctx) 165 | require.NoError(t, err) 166 | var v MyObject 167 | found, err := s1.Get(ctx, "user1", &v) 168 | require.NoError(t, err) 169 | assert.True(t, found) 170 | assert.Equal(t, MyObject{"a"}, v) 171 | 172 | s2, err := Load(ctx, cfg, nil, empty) 173 | require.NoError(t, err) 174 | err = s2.Set(ctx, time.Now(), "user1", MyObject{A: "b"}) 175 | require.NoError(t, err) 176 | _, err = s2.MakeRoot(ctx) 177 | require.NoError(t, err) 178 | 179 | err = s1.Merge(ctx, s2) 180 | require.NoError(t, err) 181 | assert.Equal(t, uint64(1), s1.Size()) 182 | assert.Equal(t, uint64(1), s2.Size()) 183 | found, err = s1.Get(ctx, "user1", &v) 184 | require.NoError(t, err) 185 | assert.True(t, found) 186 | assert.Equal(t, MyObject{"b"}, v) 187 | }) 188 | } 189 | 190 | func marshalGob(thing interface{}) ([]byte, error) { 191 | var network bytes.Buffer 192 | enc := gob.NewEncoder(&network) 193 | err := enc.Encode(thing) 194 | if err != nil { 195 | return nil, fmt.Errorf("encode: %w", err) 196 | } 197 | return network.Bytes(), nil 198 | } 199 | 200 | func unmarshalGob(input []byte, thing interface{}) error { 201 | dec := gob.NewDecoder(bytes.NewBuffer(input)) 202 | err := dec.Decode(thing) 203 | if err != nil { 204 | return fmt.Errorf("decode: %w", err) 205 | } 206 | return nil 207 | } 208 | -------------------------------------------------------------------------------- /open.go: -------------------------------------------------------------------------------- 1 | package s3db 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | "sync" 8 | "time" 9 | 10 | "github.com/aws/aws-sdk-go/aws" 11 | "github.com/aws/aws-sdk-go/aws/session" 12 | "github.com/aws/aws-sdk-go/service/s3" 13 | "github.com/jrhy/mast" 14 | "github.com/jrhy/mast/persist/s3test" 15 | "github.com/jrhy/s3db/kv" 16 | v1proto "github.com/jrhy/s3db/proto/v1" 17 | ) 18 | 19 | type KV struct { 20 | Root *kv.DB 21 | Closer func() 22 | } 23 | 24 | var ( 25 | inMemoryS3Lock sync.Mutex 26 | inMemoryS3 *s3.S3 27 | inMemoryBucket string 28 | ) 29 | 30 | func OpenKV(ctx context.Context, s3opts S3Options, subdir string) (*KV, error) { 31 | var err error 32 | var c kv.S3Interface 33 | if s3opts.Bucket == "" { 34 | if s3opts.Endpoint != "" { 35 | return nil, fmt.Errorf("s3_endpoint specified without s3_bucket") 36 | } 37 | inMemoryS3Lock.Lock() 38 | defer inMemoryS3Lock.Unlock() 39 | if inMemoryS3 == nil { 40 | inMemoryS3, inMemoryBucket, _ = s3test.Client() 41 | } 42 | s3opts.Endpoint = inMemoryS3.Endpoint 43 | s3opts.Bucket = inMemoryBucket 44 | c = inMemoryS3 45 | } else { 46 | c, err = getS3(s3opts.Endpoint) 47 | if err != nil { 48 | return nil, fmt.Errorf("s3 client: %w", err) 49 | } 50 | } 51 | path := strings.TrimPrefix(strings.TrimPrefix(strings.TrimSuffix(s3opts.Prefix, "/"), "/")+"/"+strings.TrimPrefix(subdir, "/"), "/") 52 | 53 | cfg := kv.Config{ 54 | Storage: &kv.S3BucketInfo{ 55 | EndpointURL: s3opts.Endpoint, 56 | BucketName: s3opts.Bucket, 57 | Prefix: path, 58 | }, 59 | KeysLike: &Key{}, 60 | ValuesLike: &v1proto.Row{}, 61 | CustomMerge: mergeValues, 62 | CustomMarshal: marshalProto, 63 | CustomUnmarshal: unmarshalProto, 64 | MastNodeFormat: string(mast.V1Marshaler), 65 | UnmarshalUsesRegisteredTypes: true, 66 | } 67 | if s3opts.NodeCacheEntries > 0 { 68 | cfg.NodeCache = mast.NewNodeCache(s3opts.NodeCacheEntries) 69 | } 70 | if s3opts.EntriesPerNode > 0 { 71 | cfg.BranchFactor = uint(s3opts.EntriesPerNode) 72 | } 73 | openOpts := kv.OpenOptions{ 74 | ReadOnly: s3opts.ReadOnly, 75 | OnlyVersions: s3opts.OnlyVersions, 76 | } 77 | s, err := kv.Open(ctx, c, cfg, openOpts, time.Now()) 78 | if err != nil { 79 | return nil, fmt.Errorf("open: %w", err) 80 | } 81 | dbg("%s size:%d\n", subdir, s.Size()) 82 | return &KV{ 83 | Root: s, 84 | }, nil 85 | } 86 | 87 | type S3Options struct { 88 | Bucket string 89 | Endpoint string 90 | Prefix string 91 | 92 | EntriesPerNode int 93 | NodeCacheEntries int 94 | ReadOnly bool 95 | OnlyVersions []string 96 | } 97 | 98 | func getS3(endpoint string) (*s3.S3, error) { 99 | config := aws.Config{} 100 | if endpoint != "" { 101 | config.Endpoint = &endpoint 102 | config.S3ForcePathStyle = aws.Bool(true) 103 | } 104 | 105 | sess, err := session.NewSession(&config) 106 | if err != nil { 107 | return nil, fmt.Errorf("session: %w", err) 108 | } 109 | 110 | return s3.New(sess), nil 111 | } 112 | -------------------------------------------------------------------------------- /proto/buf.gen.yaml: -------------------------------------------------------------------------------- 1 | version: v1 2 | plugins: 3 | - name: go 4 | out: . 5 | opt: paths=source_relative 6 | - name: go-grpc 7 | out: . 8 | opt: paths=source_relative,require_unimplemented_servers=false 9 | 10 | 11 | -------------------------------------------------------------------------------- /proto/buf.yaml: -------------------------------------------------------------------------------- 1 | version: v1 2 | breaking: 3 | use: 4 | - FILE 5 | lint: 6 | use: 7 | - DEFAULT 8 | -------------------------------------------------------------------------------- /proto/generate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -euo pipefail 4 | 5 | export GOPATH=/tmp/s3db/proto-gotools 6 | export PATH="$(go env GOPATH)/bin:$PATH" 7 | 8 | go install github.com/bufbuild/buf/cmd/buf@v1.50.0 9 | go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.31.0 10 | buf generate 11 | 12 | -------------------------------------------------------------------------------- /proto/v1/node.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package jrhy.s3db.v1; 4 | 5 | import "google/protobuf/any.proto"; 6 | import "google/protobuf/duration.proto"; 7 | 8 | option go_package = "github.com/jrhy/s3db/proto/v1"; 9 | 10 | message Node { 11 | repeated SQLiteValue key = 1; 12 | repeated CRDTValue value = 2; 13 | repeated string link = 3; 14 | } 15 | 16 | enum Type { 17 | NULL = 0; 18 | INT = 1; 19 | REAL = 2; 20 | TEXT = 3; 21 | BLOB = 4; 22 | } 23 | 24 | message SQLiteValue { 25 | Type Type = 1; 26 | int64 Int = 2; 27 | double Real = 3; 28 | string Text = 4; 29 | bytes Blob = 5; 30 | } 31 | 32 | message CRDTValue { 33 | int64 ModEpochNanos = 1; 34 | string PreviousRoot = 2; 35 | int64 TombstoneSinceEpochNanos = 3; 36 | Row Value = 4; 37 | } 38 | 39 | message Row { 40 | map ColumnValues = 1; 41 | bool Deleted = 2; 42 | google.protobuf.Duration DeleteUpdateOffset = 3; 43 | } 44 | 45 | message ColumnValue { 46 | google.protobuf.Duration UpdateOffset = 1; 47 | SQLiteValue Value = 2; 48 | } 49 | -------------------------------------------------------------------------------- /release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SQLITE3_VERSION=3490100 4 | 5 | set -o errexit 6 | set -o pipefail 7 | set -o nounset 8 | set -x 9 | 10 | mkdir release 11 | 12 | go version 13 | 14 | go get -t ./... 15 | go vet ./... 16 | go test -race ./... 17 | go mod tidy 18 | git diff --name-only --exit-code go.mod || (echo "Please run 'go mod tidy'."; exit 1) 19 | 20 | # install zig and qemu-user 21 | which zig || ( cd /tmp && curl -LO https://ziglang.org/download/0.14.0/zig-linux-aarch64-0.14.0.tar.xz && ( xzcat zig*xz | tar xf - ) && cd zig*/ && ln -s `pwd`/zig /usr/bin/zig ) 22 | which qemu-arm || ( sudo apt-get -y update && sudo apt-get -y install qemu-user libc6-armhf-cross libc6-arm64-cross libc6-amd64-cross ) 23 | 24 | # cross-compile extensions 25 | cd sqlite/sharedlib 26 | CGO_ENABLED=1 GOOS=linux GOARCH=arm CC="zig cc -target arm-linux-gnueabihf" go generate && mv s3db.so ../../release/s3db-linux-arm-glibc.sqlite-ext.so 27 | CGO_ENABLED=1 GOOS=linux GOARCH=arm64 CC="zig cc -target aarch64-linux-gnu" go generate && mv s3db.so ../../release/s3db-linux-arm64-glibc.sqlite-ext.so 28 | CGO_ENABLED=1 GOOS=linux GOARCH=amd64 CC="zig cc -target x86_64-linux-gnu" go generate && mv s3db.so ../../release/s3db-linux-amd64-glibc.sqlite-ext.so 29 | #segfaults CGO_ENABLED=1 GOOS=linux GOARCH=386 GO386=sse2 CC="zig cc -target x86-linux-gnu" go generate && mv s3db.so ../../release/s3db-linux-386-glibc.sqlite-ext.so 30 | #ld.lld: error: .cache/zig/o/e51b22516508da4ed5a02967b5ed4c8c/_cgo_export.o is incompatible with elf32_x86_64 31 | #CGO_ENABLED=1 GOOS=linux GOARCH=386 CC="zig cc -target x86_64-linux-gnux32" go generate && mv s3db.so ../../release/s3db-linux-386-glibc.sqlite-ext.so 32 | cd ../.. 33 | 34 | # cross-compile sqlite 35 | if ! [ -f /tmp/sqlite-amalgamation-$SQLITE3_VERSION/sqlite-arm ] ; then 36 | pushd /tmp 37 | curl -LO https://www.sqlite.org/2025/sqlite-amalgamation-$SQLITE3_VERSION.zip 38 | unzip sqlite-amalgamation-$SQLITE3_VERSION.zip 39 | cd sqlite-amalgamation-$SQLITE3_VERSION 40 | zig cc -target arm-linux-gnueabihf -o sqlite-arm *.c 41 | zig cc -target aarch64-linux-gnu -o sqlite-arm64 *.c 42 | zig cc -target x86_64-linux-gnu -o sqlite-amd64 *.c 43 | popd 44 | fi 45 | 46 | # verify each target can load the extension 47 | set +o pipefail 48 | ( qemu-arm -L /usr/arm-linux-gnueabihf/ /tmp/sqlite-amalgamation-$SQLITE3_VERSION/sqlite-arm -bail -cmd ".load release/s3db-linux-arm-glibc.sqlite-ext.so" -cmd "create virtual table f using s3db" 2>&1 | grep 'columns and constraints' ) || ( echo failed to load s3db extension for arm ; exit 1 ) 49 | ( qemu-aarch64 -L /usr/aarch64-linux-gnu/ /tmp/sqlite-amalgamation-$SQLITE3_VERSION/sqlite-arm64 -bail -cmd ".load release/s3db-linux-arm64-glibc.sqlite-ext.so" -cmd "create virtual table f using s3db" 2>&1 | grep 'columns and constraints' ) || ( echo failed to load s3db extension for arm64 ; exit 1 ) 50 | ( qemu-x86_64 -L /usr/x86_64-linux-gnu/ /tmp/sqlite-amalgamation-$SQLITE3_VERSION/sqlite-amd64 -bail -cmd ".load release/s3db-linux-amd64-glibc.sqlite-ext.so" -cmd "create virtual table f using s3db" 2>&1 | grep 'columns and constraints' ) || ( echo failed to load s3db extension for amd64 ; exit 1 ) 51 | 52 | cd release 53 | gzip -9 * 54 | 55 | -------------------------------------------------------------------------------- /row.go: -------------------------------------------------------------------------------- 1 | package s3db 2 | 3 | import ( 4 | "time" 5 | 6 | v1proto "github.com/jrhy/s3db/proto/v1" 7 | "google.golang.org/protobuf/types/known/durationpb" 8 | ) 9 | 10 | func DeleteUpdateTime(baseTime time.Time, offset *durationpb.Duration) time.Time { 11 | return baseTime.Add(offset.AsDuration()) 12 | } 13 | func UpdateTime(baseTime time.Time, cv *v1proto.ColumnValue) time.Time { 14 | return baseTime.Add(cv.UpdateOffset.AsDuration()) 15 | } 16 | -------------------------------------------------------------------------------- /sql/colval/colval.go: -------------------------------------------------------------------------------- 1 | package colval 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | ) 8 | 9 | type ColumnValue interface { 10 | String() string 11 | ToBool() *bool 12 | } 13 | 14 | type Text string 15 | type Real float64 16 | type Int int64 17 | type Blob []byte 18 | type Null struct{} 19 | 20 | func (v Text) String() string { return string(v) } 21 | func (v Real) String() string { 22 | res := strconv.FormatFloat(float64(v), 'g', -1, 64) 23 | if !strings.Contains(res, ".") { 24 | // add trailing tenth to distinguish real value, if no real part present 25 | res += ".0" 26 | } 27 | return res 28 | } 29 | func (v Int) String() string { return strconv.FormatInt(int64(v), 10) } 30 | func (v Blob) String() string { return strconv.Quote(string(v)) } 31 | func (v Null) String() string { return "NULL" } 32 | 33 | func ToGo(cv ColumnValue) interface{} { 34 | switch x := cv.(type) { 35 | case Blob: 36 | return []byte(x) 37 | case Int: 38 | return int64(x) 39 | case Null: 40 | return nil 41 | case Real: 42 | return float64(x) 43 | case Text: 44 | return string(x) 45 | default: 46 | panic(fmt.Errorf("unhandled colval %T", cv)) 47 | } 48 | } 49 | func (v Int) ToBool() *bool { b := v != 0; return &b } 50 | func (v Real) ToBool() *bool { b := v != 0.0; return &b } 51 | func (v Null) ToBool() *bool { return nil } 52 | func (v Text) ToBool() *bool { b := false; return &b } 53 | func (v Blob) ToBool() *bool { b := false; return &b } 54 | 55 | func ToBool(cv ColumnValue) *bool { 56 | switch v := (cv).(type) { 57 | case Int: 58 | b := v != 0 59 | return &b 60 | case Real: 61 | b := v != 0.0 62 | return &b 63 | case Null: 64 | return nil 65 | } 66 | b := false 67 | return &b 68 | } 69 | -------------------------------------------------------------------------------- /sql/colval/colval_test.go: -------------------------------------------------------------------------------- 1 | package colval_test 2 | 3 | import ( 4 | "github.com/jrhy/s3db/sql/colval" 5 | ) 6 | 7 | var _ colval.ColumnValue = colval.Text("test") 8 | var _ colval.ColumnValue = colval.Blob([]byte("test")) 9 | var _ colval.ColumnValue = colval.Real(3.14) 10 | var _ colval.ColumnValue = colval.Int(123) 11 | var _ colval.ColumnValue = colval.Null{} 12 | -------------------------------------------------------------------------------- /sql/expr.go: -------------------------------------------------------------------------------- 1 | package sql 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "math" 7 | "strconv" 8 | 9 | "github.com/johncgriffin/overflow" 10 | 11 | "github.com/jrhy/s3db/sql/colval" 12 | p "github.com/jrhy/s3db/sql/parse" 13 | "github.com/jrhy/s3db/sql/types" 14 | ) 15 | 16 | var ( 17 | operatorPrecedence = make(map[string]int) 18 | operatorMatchOrder []string 19 | precedence int 20 | ) 21 | 22 | func init() { 23 | nextPrecedence("or") 24 | nextPrecedence("and") 25 | nextPrecedence("not") 26 | nextPrecedence("||") 27 | nextPrecedence("=", "!=", "==", "<>") 28 | nextPrecedence("<", "<=", ">", ">=") 29 | nextPrecedence("+", "-") 30 | nextPrecedence("*", "/", "%") 31 | nextPrecedence("<<", ">>") 32 | nextPrecedence("|") 33 | nextPrecedence("&") 34 | assignOperatorMatchOrder() 35 | } 36 | 37 | func nextPrecedence(s ...string) { 38 | precedence = precedence + 1 39 | for i := range s { 40 | operatorPrecedence[s[i]] = precedence 41 | } 42 | } 43 | 44 | func assignOperatorMatchOrder() { 45 | var maxLength int 46 | for k := range operatorPrecedence { 47 | if len(k) > maxLength { 48 | maxLength = len(k) 49 | } 50 | } 51 | for l := maxLength; l > 0; l-- { 52 | for k := range operatorPrecedence { 53 | if len(k) != l { 54 | continue 55 | } 56 | operatorMatchOrder = append(operatorMatchOrder, k) 57 | } 58 | } 59 | } 60 | 61 | func Expression(res **types.Evaluator) p.Func { 62 | return binaryExpr(res) 63 | } 64 | 65 | func toInt(cv colval.ColumnValue) *int64 { 66 | switch v := (cv).(type) { 67 | case colval.Int: 68 | i := int64(v) 69 | return &i 70 | case colval.Real: 71 | i := int64(float64(v)) 72 | return &i 73 | case colval.Text: 74 | i, _ := strconv.ParseInt(string(v), 0, 64) 75 | return &i 76 | } 77 | return nil 78 | } 79 | 80 | func toReal(cv colval.ColumnValue) *float64 { 81 | switch v := (cv).(type) { 82 | case colval.Int: 83 | f := float64(v) 84 | return &f 85 | case colval.Real: 86 | f := float64(v) 87 | return &f 88 | case colval.Text: 89 | matches := RealValueRE.FindStringSubmatch(string(v)) 90 | var f float64 91 | if len(matches) > 0 { 92 | var err error 93 | f, err = strconv.ParseFloat(matches[1], 64) 94 | if err != nil { 95 | fmt.Printf("ERRRRRRRRR %v\n", err) 96 | } 97 | } 98 | return &f 99 | } 100 | return nil 101 | } 102 | 103 | func binaryExpr(res **types.Evaluator) p.Func { 104 | var cv colval.ColumnValue 105 | var name string 106 | cvParser := ColumnValueParser(&cv) 107 | colRefParser := SQLName(&name) 108 | return func(e *p.Parser) bool { 109 | var valStack []types.Evaluator 110 | var opStack []string 111 | var precStack []int 112 | var minPrecedence = 1 113 | for { 114 | name = "" 115 | e.SkipWS() 116 | unaryMinus := false 117 | for { 118 | if e.Exact("-") { 119 | unaryMinus = !unaryMinus 120 | } else if e.Exact("+") { 121 | 122 | } else { 123 | break 124 | } 125 | e.SkipWS() 126 | } 127 | if e.Exact("(") { 128 | var ev *types.Evaluator 129 | subExpressionParser := binaryExpr(&ev) 130 | if e.Match(subExpressionParser) && e.SkipWS() && e.Exact(")") { 131 | valStack = append(valStack, *ev) 132 | } else { 133 | return false 134 | } 135 | } else if e.Match(cvParser) { 136 | fmt.Printf("got cv: %v\n", cv) 137 | cv := cv 138 | valStack = append(valStack, types.Evaluator{Func: func(_ map[string]colval.ColumnValue) colval.ColumnValue { return cv }}) 139 | } else if e.Match(colRefParser) { 140 | name := name 141 | valStack = append(valStack, types.Evaluator{ 142 | Func: func(inputs map[string]colval.ColumnValue) colval.ColumnValue { 143 | fmt.Printf("deref! %s -> %v\n", name, inputs[name]) 144 | res, ok := inputs[name] 145 | if !ok { 146 | panic(fmt.Errorf("column reference missing in inputs: %s", name)) 147 | } 148 | return res 149 | }, 150 | Inputs: map[string]struct{}{name: {}}, 151 | }) 152 | } else { 153 | fmt.Printf("NO EXPR MATCH\n") 154 | return false 155 | } 156 | if unaryMinus { 157 | vals := []types.Evaluator{ 158 | { 159 | Func: func(_ map[string]colval.ColumnValue) colval.ColumnValue { 160 | return colval.Int(0) 161 | }, 162 | }, 163 | valStack[len(valStack)-1], 164 | } 165 | 166 | valStack = append(valStack[:len(valStack)-1], 167 | binaryArithmetic(vals, 168 | overflow.Sub64, func(a, b float64) float64 { return a - b })) 169 | unaryMinus = false 170 | } 171 | 172 | e.SkipWS() 173 | for { 174 | fmt.Printf("input: %s\n", e.Remaining) 175 | if false { 176 | fmt.Printf("valStack: ") 177 | for i := range valStack { 178 | if len(valStack[i].Inputs) == 0 { 179 | fmt.Printf("%v ", valStack[i].Func(nil)) 180 | } else { 181 | fmt.Printf("%v ", valStack[i]) 182 | } 183 | } 184 | } 185 | fmt.Printf("\nopStack: ") 186 | for i := range opStack { 187 | fmt.Printf("%s ", opStack[i]) 188 | } 189 | fmt.Printf("\nprecStack: ") 190 | for i := range precStack { 191 | fmt.Printf("%d ", precStack[i]) 192 | } 193 | fmt.Printf("\n") 194 | 195 | matchWithPrecedence := func() bool { 196 | for _, op := range operatorMatchOrder { 197 | opPrecedence := operatorPrecedence[op] 198 | if minPrecedence > opPrecedence { 199 | continue 200 | } 201 | if !e.Exact(op) { 202 | continue 203 | } 204 | fmt.Printf("pushing %s\n", op) 205 | opStack = append(opStack, op) 206 | precStack = append(precStack, minPrecedence) 207 | if opPrecedence > minPrecedence { 208 | fmt.Printf("upshift!\n") 209 | } 210 | minPrecedence = opPrecedence 211 | return true 212 | } 213 | return false 214 | } 215 | if matchWithPrecedence() { 216 | break 217 | } else if len(valStack) >= 2 { 218 | fmt.Printf("downshift!\n") 219 | op := opStack[len(opStack)-1] 220 | vals := valStack[len(valStack)-2:] 221 | valStack = valStack[:len(valStack)-len(vals)] 222 | minPrecedence = precStack[len(precStack)-1] 223 | precStack = precStack[:len(precStack)-1] 224 | opStack = opStack[:len(opStack)-1] 225 | fmt.Printf("vals: %v, op %s\n", vals, op) 226 | switch op { 227 | case "or": 228 | valStack = append(valStack, or(vals)) 229 | case "and": 230 | valStack = append(valStack, and(vals)) 231 | case "-": 232 | valStack = append(valStack, 233 | binaryArithmetic(vals, 234 | overflow.Sub64, func(a, b float64) float64 { return a - b })) 235 | case "+": 236 | valStack = append(valStack, 237 | binaryArithmetic(vals, 238 | overflow.Add64, func(a, b float64) float64 { return a + b })) 239 | case "*": 240 | valStack = append(valStack, 241 | binaryArithmetic(vals, 242 | overflow.Mul64, func(a, b float64) float64 { return a * b })) 243 | case "/": 244 | valStack = append(valStack, 245 | binaryArithmetic(vals, 246 | overflow.Div64, func(a, b float64) float64 { return a / b })) 247 | case "%": 248 | valStack = append(valStack, 249 | binaryArithmetic(vals, 250 | func(a, b int64) (int64, bool) { return a % b, true }, 251 | func(a, b float64) float64 { return math.Remainder(a, b) + b })) 252 | case "!=", "<>": 253 | valStack = append(valStack, 254 | binaryComparison(vals, 255 | func(a, b int64) bool { return a != b }, 256 | func(a, b float64) bool { return a != b })) 257 | case "<": 258 | valStack = append(valStack, 259 | binaryComparison(vals, 260 | func(a, b int64) bool { return a < b }, 261 | func(a, b float64) bool { return a < b })) 262 | case "<=": 263 | valStack = append(valStack, 264 | binaryComparison(vals, 265 | func(a, b int64) bool { return a <= b }, 266 | func(a, b float64) bool { return a <= b })) 267 | case ">": 268 | valStack = append(valStack, 269 | binaryComparison(vals, 270 | func(a, b int64) bool { return a > b }, 271 | func(a, b float64) bool { return a > b })) 272 | case ">=": 273 | valStack = append(valStack, 274 | binaryComparison(vals, 275 | func(a, b int64) bool { return a >= b }, 276 | func(a, b float64) bool { return a >= b })) 277 | case "=": 278 | valStack = append(valStack, equal(vals)) 279 | case "||": 280 | valStack = append(valStack, concat(vals)) 281 | default: 282 | panic(op) 283 | } 284 | continue 285 | } else if len(valStack) == 1 { 286 | fmt.Printf("DONE\n") 287 | v := valStack[0] 288 | *res = &v 289 | return true 290 | } 291 | break 292 | } 293 | } 294 | } 295 | } 296 | 297 | func requireDimensions(x, y int, cv [][]colval.ColumnValue) error { 298 | if len(cv) != y || y > 0 && len(cv[0]) != x { 299 | return fmt.Errorf("require %dx%d dimensions", x, y) 300 | } 301 | return nil 302 | } 303 | 304 | func requireSingle(cv [][]colval.ColumnValue) error { return requireDimensions(1, 1, cv) } 305 | 306 | func combineInputs(evaluators []types.Evaluator) map[string]struct{} { 307 | combined := make(map[string]struct{}, len(evaluators)*2) 308 | for i := range evaluators { 309 | for k := range evaluators[i].Inputs { 310 | combined[k] = struct{}{} 311 | } 312 | } 313 | return combined 314 | } 315 | 316 | func equal(inputs []types.Evaluator) types.Evaluator { 317 | capture := []types.Evaluator{inputs[0], inputs[1]} 318 | return types.Evaluator{ 319 | Inputs: combineInputs(capture), 320 | Func: func(inputs map[string]colval.ColumnValue) colval.ColumnValue { 321 | col := []colval.ColumnValue{capture[0].Func(inputs), capture[1].Func(inputs)} 322 | if isNull(col[0]) || isNull(col[1]) { 323 | return colval.Null{} 324 | } 325 | if isText(col[0]) && isText(col[1]) { 326 | return boolCV(col[0].(colval.Text) == col[1].(colval.Text)) 327 | } 328 | if isBlob(col[0]) && isBlob(col[1]) { 329 | return boolCV(bytes.Equal(col[0].(colval.Blob), col[1].(colval.Blob))) 330 | } 331 | if isInt(col[0]) { 332 | if isInt(col[1]) { 333 | return boolCV(col[0].(colval.Int) == col[1].(colval.Int)) 334 | } 335 | if isReal(col[1]) { 336 | return boolCV(float64(col[0].(colval.Int)) == float64(col[1].(colval.Real))) 337 | } 338 | } 339 | if isReal(col[0]) { 340 | if isInt(col[1]) { 341 | return boolCV(float64(col[0].(colval.Real)) == float64(col[1].(colval.Int))) 342 | } 343 | if isReal(col[1]) { 344 | return boolCV(col[0].(colval.Real) == col[1].(colval.Real)) 345 | } 346 | } 347 | return boolCV(false) 348 | }} 349 | } 350 | func boolCV(b bool) colval.ColumnValue { 351 | if b { 352 | return colval.Int(1) 353 | } else { 354 | return colval.Int(0) 355 | } 356 | } 357 | 358 | func isNull(cv colval.ColumnValue) bool { 359 | _, isNull := cv.(colval.Null) 360 | return isNull 361 | } 362 | func isInt(cv colval.ColumnValue) bool { 363 | _, isInt := cv.(colval.Int) 364 | return isInt 365 | } 366 | func isIntText(cv colval.ColumnValue) bool { 367 | s, isText := cv.(colval.Text) 368 | return isText && IntValueRE.MatchString(string(s)) 369 | } 370 | func intTextValue(cv colval.ColumnValue, res *int64) bool { 371 | s, isText := cv.(colval.Text) 372 | if !isText { 373 | return false 374 | } 375 | i, err := strconv.ParseInt(string(s), 0, 64) 376 | if err != nil { 377 | return false 378 | } 379 | *res = i 380 | return true 381 | } 382 | func isReal(cv colval.ColumnValue) bool { 383 | _, isReal := cv.(colval.Real) 384 | return isReal 385 | } 386 | func isRealText(cv colval.ColumnValue) bool { 387 | s, isText := cv.(colval.Text) 388 | return isText && RealValueRE.MatchString(string(s)) 389 | } 390 | func realTextValue(cv colval.ColumnValue, res *float64) bool { 391 | s, isText := cv.(colval.Text) 392 | if !isText { 393 | return false 394 | } 395 | f, err := strconv.ParseFloat(string(s), 64) 396 | if err != nil { 397 | return false 398 | } 399 | *res = f 400 | return true 401 | } 402 | func isText(cv colval.ColumnValue) bool { 403 | _, isText := cv.(colval.Text) 404 | return isText 405 | } 406 | func isBlob(cv colval.ColumnValue) bool { 407 | _, isBlob := cv.(colval.Blob) 408 | return isBlob 409 | } 410 | 411 | func or(inputs []types.Evaluator) types.Evaluator { 412 | capture := []types.Evaluator{inputs[0], inputs[1]} 413 | return types.Evaluator{ 414 | Inputs: combineInputs(capture), 415 | Func: func(inputs map[string]colval.ColumnValue) colval.ColumnValue { 416 | col := []colval.ColumnValue{capture[0].Func(inputs), capture[1].Func(inputs)} 417 | left := col[0].ToBool() 418 | if left != nil && *left { 419 | return colval.Int(1) 420 | } 421 | right := col[1].ToBool() 422 | if right != nil && *right { 423 | return colval.Int(1) 424 | } 425 | if left == nil || right == nil { 426 | return colval.Null{} 427 | } 428 | return colval.Int(0) 429 | }} 430 | } 431 | 432 | func and(inputs []types.Evaluator) types.Evaluator { 433 | capture := []types.Evaluator{inputs[0], inputs[1]} 434 | return types.Evaluator{ 435 | Inputs: combineInputs(capture), 436 | Func: func(inputs map[string]colval.ColumnValue) colval.ColumnValue { 437 | col := []colval.ColumnValue{capture[0].Func(inputs), capture[1].Func(inputs)} 438 | left := col[0].ToBool() 439 | right := col[1].ToBool() 440 | if left != nil && right != nil { 441 | return boolCV(*left && *right) 442 | } 443 | if left != nil && !*left || right != nil && !*right { 444 | return colval.Int(0) 445 | } 446 | return colval.Null{} 447 | }} 448 | } 449 | 450 | func binaryArithmetic( 451 | inputs []types.Evaluator, 452 | intFunc func(int64, int64) (int64, bool), 453 | realFunc func(float64, float64) float64, 454 | ) types.Evaluator { 455 | capture := []types.Evaluator{inputs[0], inputs[1]} 456 | return types.Evaluator{ 457 | Inputs: combineInputs(capture), 458 | Func: func(inputs map[string]colval.ColumnValue) colval.ColumnValue { 459 | col := []colval.ColumnValue{capture[0].Func(inputs), capture[1].Func(inputs)} 460 | if isNull(col[0]) || isNull(col[1]) { 461 | return colval.Null{} 462 | } 463 | if isReal(col[0]) || isRealText(col[0]) || isReal(col[1]) || isRealText(col[1]) { 464 | return colval.Real(realFunc(*toReal(col[0]), *toReal(col[1]))) 465 | } 466 | left := *toInt(col[0]) 467 | right := *toInt(col[1]) 468 | res, ok := intFunc(left, right) 469 | if !ok { 470 | return colval.Real(realFunc(*toReal(col[0]), *toReal(col[1]))) 471 | } 472 | return colval.Int(res) 473 | }} 474 | } 475 | 476 | /* 477 | func binaryComparison( 478 | 479 | inputs []types.Evaluator, 480 | intFunc func(int64, int64) (bool, bool), 481 | realFunc func(float64, float64) bool, 482 | 483 | ) types.Evaluator { 484 | arithmeticEvaluator := binaryArithmetic(inputs, 485 | func(a, b int64) (int64, bool) { 486 | if res, _ := intFunc(a,b); res { return 1, true } 487 | return 0, true 488 | }, func(a, b float64) float64 { 489 | if realFunc(a,b) { return 1.0 } 490 | return 0.0 491 | }) 492 | inner := arithmeticEvaluator.Func 493 | arithmeticEvaluator.Func = 494 | func(inputs map[string]colval.ColumnValue) colval.ColumnValue { 495 | cv := inner(inputs) 496 | if r, isReal := cv.(colval.Real) { 497 | return colval.Int(int64(r)) 498 | } 499 | } 500 | 501 | return arithmeticEvaluator 502 | } 503 | */ 504 | func binaryComparison( 505 | inputs []types.Evaluator, 506 | intFunc func(int64, int64) bool, 507 | realFunc func(float64, float64) bool, 508 | ) types.Evaluator { 509 | capture := []types.Evaluator{inputs[0], inputs[1]} 510 | return types.Evaluator{ 511 | Inputs: combineInputs(capture), 512 | Func: func(inputs map[string]colval.ColumnValue) colval.ColumnValue { 513 | col := []colval.ColumnValue{capture[0].Func(inputs), capture[1].Func(inputs)} 514 | if isNull(col[0]) || isNull(col[1]) { 515 | return colval.Null{} 516 | } 517 | if isReal(col[0]) || isRealText(col[0]) || isReal(col[1]) || isRealText(col[1]) { 518 | return boolCV(realFunc(*toReal(col[0]), *toReal(col[1]))) 519 | } 520 | return boolCV(intFunc(*toInt(col[0]), *toInt(col[1]))) 521 | }} 522 | } 523 | 524 | func concat( 525 | inputs []types.Evaluator, 526 | ) types.Evaluator { 527 | capture := []types.Evaluator{inputs[0], inputs[1]} 528 | return types.Evaluator{ 529 | Inputs: combineInputs(capture), 530 | Func: func(inputs map[string]colval.ColumnValue) colval.ColumnValue { 531 | col := []colval.ColumnValue{capture[0].Func(inputs), capture[1].Func(inputs)} 532 | if isNull(col[0]) || isNull(col[1]) { 533 | return colval.Null{} 534 | } 535 | return colval.Text(col[0].String() + col[1].String()) 536 | }} 537 | } 538 | 539 | func ColumnValueLess(a, b colval.ColumnValue) bool { 540 | if isNull(a) || isNull(b) { 541 | return true 542 | } 543 | if isReal(a) || isRealText(a) || isReal(b) || isRealText(b) { 544 | return *toReal(a) < *toReal(b) 545 | } 546 | return *toInt(a) < *toInt(b) 547 | 548 | } 549 | -------------------------------------------------------------------------------- /sql/parse.go: -------------------------------------------------------------------------------- 1 | package sql 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "regexp" 7 | "strconv" 8 | "strings" 9 | 10 | "github.com/jrhy/s3db/sql/colval" 11 | "github.com/jrhy/s3db/sql/parse" 12 | "github.com/jrhy/s3db/sql/types" 13 | ) 14 | 15 | var StringValueRE = regexp.MustCompile(`^('(([^']|'')*)'|"([^"]*)")`) 16 | var IntValueRE = regexp.MustCompile(`^\d+`) 17 | var RealValueRE = regexp.MustCompile(`^(((\+|-)?([0-9]+)(\.[0-9]+)?)|((\+|-)?\.?[0-9]+))([Ee]\d+)?`) 18 | 19 | func ColumnValueParser(cv *colval.ColumnValue) parse.Func { 20 | return parse.OneOf( 21 | parse.RE(StringValueRE, func(s []string) bool { 22 | if len(s[2]) > 0 { 23 | *cv = colval.Text(strings.ReplaceAll(s[2], `''`, `'`)) 24 | } else { 25 | *cv = colval.Text(s[4]) 26 | } 27 | return true 28 | }), 29 | parse.RE(RealValueRE, func(s []string) bool { 30 | if !strings.ContainsAny(s[0], ".eE") { 31 | fmt.Printf("parsing as int...\n") 32 | i, err := strconv.ParseInt(s[0], 0, 64) 33 | fmt.Printf("the int: %d, conversion error: %v\n", i, err) 34 | if err == nil { 35 | *cv = colval.Int(i) 36 | return true 37 | } 38 | // too big or whatever, try again as real 39 | } 40 | f, err := strconv.ParseFloat(s[0], 64) 41 | if err != nil { 42 | return false 43 | } 44 | *cv = colval.Real(f) 45 | return true 46 | }), 47 | parse.CI("null").Action(func() { 48 | *cv = colval.Null{} 49 | }), 50 | ) 51 | } 52 | 53 | func Schema(s *types.Schema, errs *[]error) parse.Func { 54 | return func(b *parse.Parser) bool { 55 | var col, coltype, name string 56 | return b.Match( 57 | parse.Delimited( 58 | parse.OneOf( 59 | parse.SeqWS( 60 | parse.CI("primary"), parse.CI("key"), 61 | parse.Exact("("). 62 | Action(func() { 63 | if len(s.PrimaryKey) > 0 { 64 | *errs = append(*errs, errors.New("PRIMARY KEY specified multiple times")) 65 | } 66 | }), 67 | parse.Delimited( 68 | parse.SeqWS( 69 | SQLName(&col). 70 | Action(func() { 71 | s.PrimaryKey = append(s.PrimaryKey, col) 72 | })), 73 | parse.Exact(",")), 74 | parse.Exact(")")), 75 | parse.SeqWS( 76 | SQLName(&col). 77 | Action(func() { s.Columns = append(s.Columns, types.SchemaColumn{Name: col}) }), 78 | parse.Optional(ColumnType(&coltype)).Action(func() { 79 | s.Columns[len(s.Columns)-1].DefaultType = 80 | strings.ToLower(coltype) 81 | }), 82 | parse.Multiple( 83 | parse.OneOf( 84 | parse.SeqWS(parse.CI("primary"), parse.CI("key")). 85 | Action(func() { 86 | if len(s.PrimaryKey) > 0 { 87 | *errs = append(*errs, errors.New("PRIMARY KEY already specified")) 88 | } else { 89 | s.PrimaryKey = []string{col} 90 | } 91 | }), 92 | parse.CI("unique").Action(func() { 93 | s.Columns[len(s.Columns)-1].Unique = true 94 | *errs = append(*errs, errors.New("UNIQUE is not supported yet")) 95 | }), 96 | parse.SeqWS(parse.CI("not"), parse.CI("null")).Action(func() { 97 | s.Columns[len(s.Columns)-1].NotNull = true 98 | }), 99 | )))), 100 | parse.Exact(",")).Action(func() { s.Name = name })) 101 | } 102 | } 103 | 104 | func debugHere(s string) parse.Func { 105 | return func(p *parse.Parser) bool { 106 | fmt.Printf("DBG debugHere %s, remaining: %s\n", s, p.Remaining) 107 | return true 108 | } 109 | } 110 | 111 | func limit(res **int64, errors *[]error) parse.Func { 112 | return parse.SeqWS(parse.CI("limit"), parseExpressionToInt(res, "limit", errors)) 113 | } 114 | func offset(res **int64, errors *[]error) parse.Func { 115 | return parse.SeqWS(parse.CI("offset"), parseExpressionToInt(res, "offset", errors)) 116 | } 117 | func parseExpressionToInt(res **int64, name string, errors *[]error) parse.Func { 118 | var e *types.Evaluator 119 | return func(b *parse.Parser) bool { 120 | return b.Match( 121 | Expression(&e).Action(func() { 122 | if len(e.Inputs) > 0 { 123 | fmt.Printf("inputs: %+v\n", e.Inputs) 124 | *errors = append(*errors, fmt.Errorf("%s: int expression cannot reference", name)) 125 | return 126 | } 127 | v := e.Func(nil) 128 | switch x := v.(type) { 129 | case colval.Int: 130 | i := int64(x) 131 | *res = &i 132 | default: 133 | *errors = append(*errors, fmt.Errorf("%s: got %T, expected int expression", name, x)) 134 | } 135 | }), 136 | ) 137 | } 138 | } 139 | 140 | func ResolveColumnRef(s *types.Schema, name string, schema **types.Schema, columnRes **types.SchemaColumn) error { 141 | var resolvedSchema *types.Schema 142 | parts := strings.Split(name, ".") 143 | if len(parts) > 2 { 144 | return fmt.Errorf("nested schema reference unimplemented: %s", name) 145 | } 146 | var column *types.SchemaColumn 147 | if len(parts) == 2 { 148 | var found bool 149 | s, found = s.Sources[parts[0]] 150 | if !found { 151 | return fmt.Errorf("schema not found: %s", name) 152 | } 153 | column = findColumn(s, parts[1]) 154 | } else { 155 | resolvedSchema, column = resolveUnqualifiedColumnReference(s, parts[0]) 156 | if resolvedSchema != nil { 157 | s = resolvedSchema 158 | } 159 | } 160 | if column == nil { 161 | return fmt.Errorf("not found: column %s, in %s", name, s.Name) 162 | } 163 | if schema != nil { 164 | *schema = s 165 | } 166 | if columnRes != nil { 167 | *columnRes = column 168 | } 169 | return nil 170 | } 171 | 172 | func findColumn(s *types.Schema, name string) *types.SchemaColumn { 173 | for _, c := range s.Columns { 174 | if strings.EqualFold(c.Name, name) { 175 | return &c 176 | } 177 | } 178 | return nil 179 | } 180 | func FindColumnIndex(s *types.Schema, name string) *int { 181 | for i, c := range s.Columns { 182 | if strings.EqualFold(c.Name, name) { 183 | fmt.Printf("returning MATCH column %d, c.Name=%s, name=%s\n", i, c.Name, name) 184 | return &i 185 | } 186 | } 187 | return nil 188 | } 189 | 190 | func resolveUnqualifiedColumnReference(s *types.Schema, name string) (*types.Schema, *types.SchemaColumn) { 191 | for _, c := range s.Sources { 192 | if res := findColumn(c, name); res != nil { 193 | return c, res 194 | } 195 | } 196 | return nil, nil 197 | } 198 | 199 | func where(evaluator **types.Evaluator) parse.Func { 200 | return func(b *parse.Parser) bool { 201 | return b.Match(parse.SeqWS( 202 | parse.CI("where"), 203 | Expression(evaluator), 204 | )) 205 | } 206 | } 207 | 208 | func name(res *string) parse.Func { 209 | return func(b *parse.Parser) bool { 210 | var name string 211 | return b.Match(parse.SeqWS( 212 | SQLName(&name), 213 | ).Action(func() { *res = name })) 214 | } 215 | } 216 | 217 | var sqlNameRE = regexp.MustCompile(`^([a-zA-Z_][a-zA-Z_0-9-\.]*)|^('(([^']|'')*)'|"([^"]*)")`) 218 | 219 | func SQLName(res *string) parse.Func { 220 | return parse.RE(sqlNameRE, func(s []string) bool { 221 | if len(s[1]) > 0 { 222 | *res = s[1] 223 | } else if len(s[3]) > 0 { 224 | *res = strings.ReplaceAll(s[3], `''`, `'`) 225 | } else { 226 | *res = s[5] 227 | } 228 | return true 229 | }) 230 | } 231 | 232 | var typeRE = regexp.MustCompile(`^(?i:text|varchar|integer|number|real)`) 233 | 234 | func ColumnType(res *string) parse.Func { 235 | return parse.RE(typeRE, func(s []string) bool { 236 | *res = s[0] 237 | return true 238 | }) 239 | } 240 | 241 | func As(as *string) parse.Func { 242 | return parse.SeqWS( 243 | parse.Optional(parse.CI("as")), 244 | SQLName(as)) 245 | } 246 | 247 | func FromItemAs(as *string) parse.Func { 248 | return parse.SeqWS( 249 | parse.Exact("").Action(func() { fmt.Printf("DBG starting fromAs") }), 250 | parse.Optional(parse.CI("as")), 251 | FromItemAlias(as)) 252 | } 253 | 254 | var sqlKeywordRE = regexp.MustCompile(`^(?i:select|from|where|join|outer|inner|left|on|using|union|except|group|all|distinct|order|limit|offset)`) 255 | var matchKeyword = parse.RE(sqlKeywordRE, func(_ []string) bool { return true }) 256 | 257 | func FromItemAlias(res *string) parse.Func { 258 | return func(p *parse.Parser) bool { 259 | if matchKeyword(p) { 260 | return false 261 | } 262 | return SQLName(res)(p) 263 | } 264 | } 265 | 266 | // TODO: consider if the constraint on 'insert or replace' or 'insert or ignore' for tables with keys may be an intuitive way to convey how coordinatorless tables will work. 267 | 268 | func orderBy(op *[]types.OrderBy, sources map[string]types.Source) parse.Func { 269 | return func(b *parse.Parser) bool { 270 | var o types.OrderBy 271 | return b.Match(parse.SeqWS( 272 | parse.CI("order"), 273 | parse.CI("by"), 274 | parse.Delimited( 275 | parse.SeqWS(Expression(&o.Expression).Action(func() { 276 | if len(o.Expression.Inputs) == 0 { 277 | if col, ok := o.Expression.Func(nil).(colval.Int); ok { 278 | o.OutputColumn = int(col) 279 | o.Expression = nil 280 | } else { 281 | panic("TODO error order by with non-integer column") 282 | } 283 | } else { 284 | panic("TODO error expecting column number as integer") 285 | } 286 | }), 287 | parse.Optional( 288 | parse.OneOf( 289 | parse.CI("asc"), 290 | parse.CI("desc").Action(func() { o.Desc = true }), 291 | //TODO parse.CI("using"), operator..., 292 | )), 293 | parse.Optional(parse.SeqWS( 294 | parse.CI("nulls"), 295 | parse.OneOf( 296 | parse.CI("first").Action(func() { o.NullsFirst = true }), 297 | parse.CI("last")))), 298 | ).Action(func() { 299 | *op = append(*op, o) 300 | o = types.OrderBy{} 301 | }), 302 | parse.Exact(",")))) 303 | } 304 | } 305 | -------------------------------------------------------------------------------- /sql/parse/parse.go: -------------------------------------------------------------------------------- 1 | package parse 2 | 3 | import ( 4 | "encoding/json" 5 | "regexp" 6 | "strings" 7 | ) 8 | 9 | type Parser struct { 10 | Remaining string 11 | LastReject string 12 | } 13 | 14 | func (b *Parser) Copy() *Parser { 15 | ne := *b 16 | return &ne 17 | } 18 | 19 | func (b *Parser) SkipWS() bool { 20 | b.Remaining = strings.TrimLeft(b.Remaining, " \t\r\n") 21 | return true 22 | } 23 | 24 | func (b *Parser) Exact(prefix string) bool { 25 | if strings.HasPrefix(b.Remaining, prefix) { 26 | b.Remaining = b.Remaining[len(prefix):] 27 | return true 28 | } 29 | return false 30 | } 31 | 32 | func (b *Parser) CI(prefix string) bool { 33 | if strings.HasPrefix(strings.ToLower(b.Remaining), 34 | strings.ToLower(prefix)) { 35 | b.Remaining = b.Remaining[len(prefix):] 36 | return true 37 | } 38 | return false 39 | } 40 | 41 | func (b *Parser) String() string { 42 | bs, err := json.MarshalIndent(*b, "", " ") 43 | if err != nil { 44 | panic(err) 45 | } 46 | return string(bs) 47 | } 48 | 49 | type Func func(*Parser) bool 50 | 51 | func SeqWS(fns ...Func) Func { 52 | return func(b *Parser) bool { 53 | e := b.Copy() 54 | for _, f := range fns { 55 | e.SkipWS() 56 | if !f(e) { 57 | if len(e.Remaining) < len(b.LastReject) { 58 | b.LastReject = e.Remaining 59 | } 60 | return false 61 | } 62 | e.SkipWS() 63 | } 64 | *b = *e 65 | return true 66 | } 67 | } 68 | 69 | func CI(s string) Func { 70 | return func(b *Parser) bool { 71 | return b.CI(s) 72 | } 73 | } 74 | 75 | func Exact(s string) Func { 76 | return func(b *Parser) bool { 77 | return b.Exact(s) 78 | } 79 | } 80 | 81 | func (b *Parser) Match(f func(*Parser) bool) bool { 82 | return f(b) 83 | } 84 | 85 | func (m Func) Action(then func()) Func { 86 | return func(b *Parser) bool { 87 | if m(b) { 88 | then() 89 | return true 90 | } 91 | return false 92 | } 93 | } 94 | 95 | func (m Func) Or(other Func) Func { 96 | return func(b *Parser) bool { 97 | if m(b) { 98 | return true 99 | } 100 | return other(b) 101 | } 102 | } 103 | 104 | func OneOf(fns ...func(*Parser) bool) Func { 105 | return func(b *Parser) bool { 106 | for _, f := range fns { 107 | if f(b) { 108 | return true 109 | } 110 | } 111 | return false 112 | } 113 | } 114 | 115 | func Optional(f Func) Func { 116 | return func(b *Parser) bool { 117 | e := b.Copy() 118 | if e.Match(f) { 119 | *b = *e 120 | } 121 | return true 122 | } 123 | } 124 | 125 | func AtLeastOne(f Func) Func { 126 | return func(b *Parser) bool { 127 | e := b.Copy() 128 | if !e.Match(f) { 129 | return false 130 | } 131 | for e.Match(f) { 132 | } 133 | *b = *e 134 | return true 135 | } 136 | } 137 | 138 | func Multiple(f Func) Func { 139 | return func(b *Parser) bool { 140 | e := b.Copy() 141 | for e.Match(f) { 142 | } 143 | *b = *e 144 | return true 145 | } 146 | } 147 | 148 | func RE(re *regexp.Regexp, submatchcb func([]string) bool) Func { 149 | if !strings.HasPrefix(re.String(), "^") { 150 | panic("regexp missing ^ restriction: " + re.String()) 151 | } 152 | return func(b *Parser) bool { 153 | s := re.FindStringSubmatch(b.Remaining) 154 | if s != nil && submatchcb != nil && submatchcb(s) { 155 | if !strings.HasPrefix(b.Remaining, s[0]) { 156 | panic("pattern must restrict all alternatives to match at beginning: " + re.String()) 157 | } 158 | b.Remaining = b.Remaining[len(s[0]):] 159 | return true 160 | } 161 | return false 162 | } 163 | } 164 | 165 | func Delimited( 166 | term Func, delimiter Func) Func { 167 | return func(e *Parser) bool { 168 | terms := 0 169 | for { 170 | if !term(e) { 171 | break 172 | } 173 | terms++ 174 | if !delimiter(e) { 175 | break 176 | } 177 | } 178 | return terms > 0 179 | } 180 | } 181 | 182 | func End() Func { 183 | return func(e *Parser) bool { 184 | return e.Remaining == "" 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /sql/types/types.go: -------------------------------------------------------------------------------- 1 | package types 2 | 3 | import ( 4 | "github.com/jrhy/s3db/sql/colval" 5 | "github.com/jrhy/s3db/sql/parse" 6 | ) 7 | 8 | type Expression struct { 9 | Parser parse.Parser 10 | Create *Create `json:",omitempty"` 11 | Drop *Drop `json:",omitempty"` 12 | Insert *Insert `json:",omitempty"` 13 | Select *Select `json:",omitempty"` 14 | Errors []error 15 | } 16 | 17 | type Schema struct { 18 | Name string `json:",omitempty"` 19 | Sources map[string]*Schema `json:",omitempty"` // functions/tables 20 | Columns []SchemaColumn `json:",omitempty"` 21 | PrimaryKey []string `json:",omitempty"` 22 | } 23 | 24 | type SchemaColumn struct { 25 | Source string `json:",omitempty"` 26 | SourceColumn string `json:",omitempty"` 27 | Name string `json:",omitempty"` 28 | DefaultType string `json:",omitempty"` 29 | NotNull bool `json:",omitempty"` 30 | Unique bool `json:",omitempty"` 31 | Default colval.ColumnValue `json:",omitempty"` 32 | } 33 | 34 | type Select struct { 35 | Create *Create `json:",omitempty"` 36 | With []With `json:",omitempty"` // TODO generalize With,Values,Tables to FromItems 37 | Expressions []OutputExpression `json:",omitempty"` 38 | FromItems []FromItem `json:",omitempty"` 39 | Values *Values `json:",omitempty"` 40 | Where *Evaluator `json:",omitempty"` 41 | Join []Join `json:",omitempty"` 42 | SetOp *SetOp `json:",omitempty"` 43 | OrderBy []OrderBy `json:",omitempty"` 44 | Limit *int64 `json:",omitempty"` 45 | Offset *int64 `json:",omitempty"` 46 | Schema Schema `json:",omitempty"` 47 | Errors []error // schema errors 48 | } 49 | 50 | type JoinType int 51 | 52 | const ( 53 | InnerJoin = JoinType(iota) 54 | LeftJoin 55 | OuterJoin 56 | RightJoin 57 | ) 58 | 59 | type Join struct { 60 | JoinType JoinType `json:",omitempty"` 61 | Right FromItem `json:",omitempty"` 62 | Alias string `json:",omitempty"` // TODO: consolidate in Right.Alias 63 | On *Evaluator `json:",omitempty"` 64 | Using []string `json:",omitempty"` 65 | } 66 | 67 | type Values struct { 68 | Name string 69 | Rows []Row `json:",omitempty"` 70 | Schema Schema `json:",omitempty"` 71 | Errors []error 72 | } 73 | 74 | type Row []colval.ColumnValue 75 | 76 | func (r Row) String() string { 77 | res := "" 78 | for i := range r { 79 | if i > 0 { 80 | res += " " 81 | } 82 | res += r[i].String() 83 | } 84 | return res 85 | } 86 | 87 | type OutputExpression struct { 88 | Expression SelectExpression `json:",omitempty"` 89 | Alias string `json:",omitempty"` 90 | } 91 | 92 | type SelectExpression struct { 93 | Column *Column `json:",omitempty"` 94 | Func *Func `json:",omitempty"` 95 | } 96 | 97 | type Column struct { 98 | //Family Family 99 | Term string `json:",omitempty"` 100 | All bool `json:",omitempty"` 101 | } 102 | 103 | type Func struct { 104 | Aggregate bool 105 | Name string 106 | RowFunc func(Row) colval.ColumnValue 107 | //Expression OutputExpression //TODO: should maybe SelectExpression+* 108 | } 109 | 110 | type FromItem struct { 111 | TableRef *TableRef `json:",omitempty"` 112 | Subquery *Select `json:",omitempty"` 113 | Alias string `json:",omitempty"` 114 | } 115 | 116 | type TableRef struct { 117 | Schema string `json:",omitempty"` 118 | Table string `json:",omitempty"` 119 | } 120 | 121 | type With struct { 122 | Name string `json:",omitempty"` 123 | Schema Schema 124 | Select Select `json:",omitempty"` 125 | Errors []error 126 | } 127 | 128 | type SetOpType int 129 | 130 | const ( 131 | Union = SetOpType(iota) 132 | Intersect 133 | Except 134 | ) 135 | 136 | type SetOp struct { 137 | Op SetOpType `json:",omitempty"` 138 | All bool `json:",omitempty"` 139 | Right *Select `json:",omitempty"` 140 | } 141 | 142 | type Evaluator struct { 143 | // TODO want functions that can return ([][]colval.ColumnValue,error) 144 | Func func(map[string]colval.ColumnValue) colval.ColumnValue 145 | Inputs map[string]struct{} 146 | } 147 | 148 | type Create struct { 149 | Schema Schema 150 | Query *Select 151 | Index *Index 152 | View View //TODO pointerize 153 | Errors []error 154 | } 155 | 156 | type Index struct { 157 | Table string 158 | Expr *Evaluator 159 | } 160 | 161 | type View struct { 162 | Columns Schema 163 | ReplaceIfExists bool 164 | } 165 | 166 | type Insert struct { 167 | Schema Schema 168 | Query *Select 169 | Values *Values 170 | Errors []error 171 | } 172 | 173 | type Source struct { 174 | RowIterator func() RowIterator 175 | Schema func() Schema 176 | } 177 | 178 | type RowIterator interface { 179 | Next() (*Row, error) 180 | Schema() *Schema 181 | } 182 | 183 | type Drop struct { 184 | IfExists bool 185 | TableRef *TableRef `json:",omitempty"` 186 | Errors []error 187 | } 188 | type OrderBy struct { 189 | Expression *Evaluator 190 | OutputColumn int 191 | Desc bool 192 | NullsFirst bool 193 | } 194 | -------------------------------------------------------------------------------- /sqlite/Makefile: -------------------------------------------------------------------------------- 1 | 2 | all: 3 | cd sharedlib && CGO_ENABLED=1 go generate 4 | 5 | start-minio: 6 | if ! which minio ; then \ 7 | go install -v github.com/minio/minio@latest ; \ 8 | go install -v github.com/minio/mc@latest ; \ 9 | fi 10 | if ! pgrep minio ; then \ 11 | ( MINIO_ROOT_USER=minioadmin \ 12 | MINIO_ROOT_PASSWORD=miniopassword \ 13 | minio server \ 14 | --address 127.0.0.1:9091 \ 15 | --console-address 127.0.0.1:9092 \ 16 | $$(mktemp -d) & ) ; \ 17 | sleep 3 ; \ 18 | mc alias set mys3db http://127.0.0.1:9091 minioadmin miniopassword ; \ 19 | mc ls mys3db/mybucket || mc mb mys3db/mybucket ; \ 20 | fi 21 | 22 | MINIO_ENV=\ 23 | AWS_ACCESS_KEY_ID=minioadmin \ 24 | AWS_SECRET_ACCESS_KEY=miniopassword \ 25 | AWS_REGION=dummy \ 26 | S3_ENDPOINT='http://127.0.0.1:9091' 27 | 28 | run: all scratch.sqlite 29 | export ${MINIO_ENV} ; sqlite3 scratch.sqlite \ 30 | -cmd '.load sharedlib/s3db' 31 | 32 | scratch.sqlite: 33 | export ${MINIO_ENV} ; sqlite3 scratch.sqlite \ 34 | -cmd '.load sharedlib/s3db' \ 35 | "create virtual table if not exists mytable using s3db \ 36 | (node_cache_entries=1000, \ 37 | s3_bucket='mybucket', \ 38 | s3_endpoint='$$S3_ENDPOINT', \ 39 | s3_prefix='mytable', \ 40 | columns='id PRIMARY KEY, name, email')" 41 | -------------------------------------------------------------------------------- /sqlite/s3db_changes.go: -------------------------------------------------------------------------------- 1 | package mod 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "strings" 9 | 10 | "go.riyazali.net/sqlite" 11 | 12 | "github.com/jrhy/mast" 13 | "github.com/jrhy/s3db" 14 | "github.com/jrhy/s3db/internal" 15 | "github.com/jrhy/s3db/kv" 16 | v1proto "github.com/jrhy/s3db/proto/v1" 17 | ) 18 | 19 | type ChangesModule struct { 20 | sc *S3DBConn 21 | } 22 | 23 | func (c *ChangesModule) Connect(conn *sqlite.Conn, args []string, 24 | declare func(string) error) (sqlite.VirtualTable, error) { 25 | 26 | var err error 27 | res := &ChangesTable{ 28 | module: c, 29 | } 30 | args = args[3:] 31 | 32 | if len(args) == 0 { 33 | return nil, errors.New(` 34 | usage: 35 | from='["version1"]', starting version for getting changes, from s3db_version() 36 | table='...', table to get changes for, must already be loaded 37 | [to='["version2"]'] ending version for getting changes, from s3db_version()`) 38 | } 39 | 40 | seen := map[string]struct{}{} 41 | var fromVer, tableName, toVer string 42 | for i := range args { 43 | s := strings.SplitN(args[i], "=", 2) 44 | if _, ok := seen[s[0]]; ok { 45 | return nil, fmt.Errorf("duplicated: %s", s[0]) 46 | } 47 | seen[s[0]] = struct{}{} 48 | switch s[0] { 49 | case "from": 50 | fromVer = internal.UnquoteAll(s[1]) 51 | case "table": 52 | tableName = internal.UnquoteAll(s[1]) 53 | case "to": 54 | toVer = internal.UnquoteAll(s[1]) 55 | } 56 | } 57 | if tableName == "" { 58 | return nil, errors.New("no table specified") 59 | } 60 | tableName = internal.UnquoteAll(tableName) 61 | res.table = s3db.GetTable(tableName) 62 | if res.table == nil { 63 | return nil, fmt.Errorf("table not found: %s", tableName) 64 | } 65 | 66 | err = parseVersions(fromVer, &res.fromVer) 67 | if err != nil { 68 | return nil, fmt.Errorf("from: %w", err) 69 | } 70 | err = parseVersions(toVer, &res.toVer) 71 | if err != nil { 72 | return nil, fmt.Errorf("to: %w", err) 73 | } 74 | // Tables aren't loaded on CREATE because we don't want to get in the 75 | // way of DROP TABLE if there is some kind of problem. 76 | 77 | err = declare(res.table.SchemaString) 78 | if err != nil { 79 | return nil, fmt.Errorf("declare: %w", err) 80 | } 81 | 82 | return res, nil 83 | } 84 | 85 | func parseVersions(s string, res *[]string) error { 86 | if s == "" { 87 | return nil 88 | } 89 | return json.Unmarshal([]byte(s), res) 90 | } 91 | 92 | func (c *ChangesModule) Create(conn *sqlite.Conn, args []string, declare func(string) error) (sqlite.VirtualTable, error) { 93 | // fmt.Printf("CREATE\n") 94 | return c.Connect(conn, args, declare) 95 | } 96 | 97 | func (c *ChangesTable) BestIndex(input *sqlite.IndexInfoInput) (*sqlite.IndexInfoOutput, error) { 98 | used := make([]*sqlite.ConstraintUsage, len(input.Constraints)) 99 | return &sqlite.IndexInfoOutput{ 100 | EstimatedCost: 1, 101 | ConstraintUsage: used, 102 | }, nil 103 | } 104 | 105 | type ChangesTable struct { 106 | table *s3db.VirtualTable 107 | module *ChangesModule 108 | 109 | fromVer []string 110 | toVer []string 111 | } 112 | 113 | func loadForDiffing(ctx context.Context, baseOptions s3db.S3Options, versions []string) (*s3db.KV, error) { 114 | options := baseOptions 115 | options.ReadOnly = true 116 | options.OnlyVersions = versions 117 | return s3db.OpenKV(ctx, options, "s3db-rows") 118 | } 119 | 120 | func (c *ChangesTable) Open() (sqlite.VirtualCursor, error) { 121 | ctx := c.module.sc.ctx 122 | var from *s3db.KV 123 | var err error 124 | if c.fromVer == nil { 125 | from = c.table.Tree 126 | } else { 127 | from, err = loadForDiffing(ctx, c.table.S3Options, c.fromVer) 128 | if err != nil { 129 | return nil, fmt.Errorf("from: %w", err) 130 | } 131 | } 132 | to, err := loadForDiffing(ctx, c.table.S3Options, c.toVer) 133 | if err != nil { 134 | return nil, fmt.Errorf("to: %w", err) 135 | } 136 | 137 | dc, err := to.Root.StartDiff(ctx, from.Root) 138 | if err != nil { 139 | return nil, toSqlite(err) 140 | } 141 | return &ChangesCursor{ 142 | module: c.module, 143 | t: c.table, 144 | diffCursor: dc, 145 | }, nil 146 | } 147 | 148 | func (c *ChangesTable) Disconnect() error { 149 | return nil 150 | } 151 | 152 | func (c *ChangesTable) Destroy() error { 153 | return nil 154 | } 155 | 156 | type ChangesCursor struct { 157 | module *ChangesModule 158 | t *s3db.VirtualTable 159 | currentKey *s3db.Key 160 | currentRow *v1proto.Row 161 | diffCursor *kv.DiffCursor 162 | eof bool 163 | } 164 | 165 | func (c *ChangesCursor) Next() error { 166 | if c.eof { 167 | return nil 168 | } 169 | for { 170 | de, err := c.diffCursor.NextEntry(c.module.sc.ctx) 171 | if err == mast.ErrNoMoreDiffs { 172 | c.eof = true 173 | return nil 174 | } 175 | if de.NewValue != nil { 176 | c.currentRow = de.NewValue.(*v1proto.Row) 177 | c.currentKey = de.Key.(*s3db.Key) 178 | return nil 179 | } 180 | } 181 | } 182 | 183 | func (c *ChangesCursor) Column(ctx *sqlite.VirtualTableContext, i int) error { 184 | if c.currentRow.Deleted { 185 | return fmt.Errorf("accessing deleted row") 186 | } 187 | var res interface{} 188 | if i == c.t.KeyCol { 189 | res = c.currentKey.Value() 190 | } else if cv, ok := c.currentRow.ColumnValues[c.t.ColumnNameByIndex[i]]; ok { 191 | res = s3db.FromSQLiteValue(cv.Value) 192 | } 193 | setContextResult(ctx, res, i) 194 | return nil 195 | } 196 | 197 | func (c *ChangesCursor) Filter(_ int, idxStr string, values ...sqlite.Value) error { 198 | return toSqlite(c.Next()) 199 | } 200 | func (c *ChangesCursor) Rowid() (int64, error) { 201 | return 0, errors.New("rowid: not expecting to be called, source table should be WITHOUT ROWID") 202 | } 203 | func (c *ChangesCursor) Eof() bool { return c.eof } 204 | func (c *ChangesCursor) Close() error { return nil } 205 | -------------------------------------------------------------------------------- /sqlite/s3db_changes_test.go: -------------------------------------------------------------------------------- 1 | package mod_test 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/require" 9 | ) 10 | 11 | func TestChanges_HappyCase(t *testing.T) { 12 | db, s3Bucket, s3Endpoint := openDB() 13 | defer db.Close() 14 | 15 | _, err := db.Exec(fmt.Sprintf(`create virtual table data using s3db ( 16 | s3_prefix='changes_happy', 17 | s3_bucket='%s', 18 | s3_endpoint='%s', 19 | columns='a, b')`, 20 | s3Bucket, s3Endpoint)) 21 | require.NoError(t, err) 22 | 23 | _, err = db.Exec(`insert into data values($1,$2)`, 1, 1) 24 | require.NoError(t, err) 25 | 26 | v1 := mustQueryVersion(db, "data") 27 | 28 | _, err = db.Exec(`insert into data values($1,$2)`, 2, 2) 29 | require.NoError(t, err) 30 | 31 | v2 := mustQueryVersion(db, "data") 32 | 33 | _, err = db.Exec(fmt.Sprintf(`create virtual table changes using s3db_changes ( 34 | table='data', 35 | from='%s', 36 | to='%s')`, v1, v2)) 37 | require.NoError(t, err) 38 | 39 | require.Equal(t, `[[2,2]]`, mustQueryToJSON(db, `select * from changes`)) 40 | } 41 | 42 | func mustQueryVersion(db *sql.DB, table string) string { 43 | v, err := queryVersion(db, table) 44 | if err != nil { 45 | panic(err) 46 | } 47 | return v 48 | } 49 | 50 | func queryVersion(db *sql.DB, table string) (string, error) { 51 | fmt.Println(`select s3db_version($1)`, table) 52 | r, err := db.Query(`select s3db_version($1)`, table) 53 | if err != nil { 54 | return "", fmt.Errorf("select: %w", err) 55 | } 56 | if !r.Next() { 57 | return "", nil 58 | } 59 | var v string 60 | err = r.Scan(&v) 61 | if err != nil { 62 | return "", fmt.Errorf("scan: %w", err) 63 | } 64 | r.Close() 65 | return v, nil 66 | } 67 | -------------------------------------------------------------------------------- /sqlite/s3db_conn.go: -------------------------------------------------------------------------------- 1 | package mod 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "time" 8 | 9 | "go.riyazali.net/sqlite" 10 | 11 | "github.com/jrhy/s3db" 12 | ) 13 | 14 | type ConnModule struct { 15 | sc *S3DBConn 16 | } 17 | 18 | var _ sqlite.WriteableVirtualTable = (*ConnModule)(nil) 19 | 20 | func (c *ConnModule) Connect(conn *sqlite.Conn, args []string, 21 | declare func(string) error) (sqlite.VirtualTable, error) { 22 | 23 | err := declare(`create table s3db_conn( 24 | deadline, 25 | write_time 26 | )`) 27 | if err != nil { 28 | return nil, fmt.Errorf("declare: %w", err) 29 | } 30 | 31 | return c, nil 32 | } 33 | 34 | func (vm *ConnModule) BestIndex(input *sqlite.IndexInfoInput) (*sqlite.IndexInfoOutput, error) { 35 | return &sqlite.IndexInfoOutput{ 36 | ConstraintUsage: make([]*sqlite.ConstraintUsage, len(input.Constraints)), 37 | }, nil 38 | } 39 | 40 | func (vm *ConnModule) Open() (sqlite.VirtualCursor, error) { 41 | return &ConnCursor{vm, false}, nil 42 | } 43 | 44 | func (vm *ConnModule) Disconnect() error { return nil } 45 | func (vm *ConnModule) Destroy() error { return nil } 46 | 47 | type ConnCursor struct { 48 | vm *ConnModule 49 | eof bool 50 | } 51 | 52 | func (vc *ConnCursor) Next() error { 53 | vc.eof = true 54 | return nil 55 | } 56 | 57 | func (vc *ConnCursor) Rowid() (int64, error) { 58 | return 0, nil 59 | } 60 | 61 | func (vc *ConnCursor) Column(context *sqlite.VirtualTableContext, i int) error { 62 | switch i { 63 | case 0: 64 | if vc.vm.sc.deadline.IsZero() { 65 | context.ResultNull() 66 | } else { 67 | context.ResultText(vc.vm.sc.deadline.Format(s3db.SQLiteTimeFormat)) 68 | } 69 | case 1: 70 | if vc.vm.sc.writeTime.IsZero() { 71 | context.ResultNull() 72 | } else { 73 | context.ResultText(vc.vm.sc.writeTime.Format(s3db.SQLiteTimeFormat)) 74 | } 75 | default: 76 | context.ResultError(fmt.Errorf("unhandled column %d", i)) 77 | } 78 | return nil 79 | } 80 | 81 | func (vc *ConnCursor) Eof() bool { return vc.eof } 82 | func (vc *ConnCursor) Close() error { return nil } 83 | 84 | func (vc *ConnCursor) Filter(_ int, idxStr string, values ...sqlite.Value) error { 85 | return nil 86 | } 87 | 88 | func (c *ConnModule) Update(value sqlite.Value, values ...sqlite.Value) error { 89 | var err error 90 | if len(values) != 2 { 91 | return errors.New("wrong number of column values") 92 | } 93 | 94 | deadline, writeTime := values[0], values[1] 95 | 96 | if !deadline.NoChange() { 97 | if deadline.IsNil() || deadline.Text() == "" { 98 | c.sc.deadline = time.Time{} 99 | c.sc.ctx = context.Background() 100 | } else { 101 | c.sc.deadline, err = time.Parse(s3db.SQLiteTimeFormat, deadline.Text()) 102 | if err != nil { 103 | // TODO: fix other time parsing error messages 104 | return fmt.Errorf("deadline: must be like %s", s3db.SQLiteTimeFormat) 105 | } 106 | } 107 | } 108 | 109 | if !writeTime.NoChange() { 110 | if writeTime.IsNil() || writeTime.Text() == "" { 111 | c.sc.writeTime = time.Time{} 112 | } else { 113 | c.sc.writeTime, err = time.Parse(s3db.SQLiteTimeFormat, writeTime.Text()) 114 | if err != nil { 115 | return fmt.Errorf("write_time: must be like %s", s3db.SQLiteTimeFormat) 116 | } 117 | } 118 | } 119 | 120 | c.sc.txFixedWriteTime = false 121 | c.sc.ResetContext() 122 | 123 | return nil 124 | } 125 | 126 | func (c *ConnModule) Insert(_ ...sqlite.Value) (int64, error) { 127 | return 0, sqlite.SQLITE_CONSTRAINT_VTAB 128 | } 129 | 130 | func (c *ConnModule) Replace(old, new sqlite.Value, _ ...sqlite.Value) error { 131 | return sqlite.SQLITE_CONSTRAINT_VTAB 132 | } 133 | 134 | func (c *ConnModule) Delete(sqlite.Value) error { 135 | return sqlite.SQLITE_CONSTRAINT_VTAB 136 | } 137 | -------------------------------------------------------------------------------- /sqlite/s3db_refresh.go: -------------------------------------------------------------------------------- 1 | package mod 2 | 3 | import ( 4 | "fmt" 5 | 6 | "go.riyazali.net/sqlite" 7 | 8 | "github.com/jrhy/s3db" 9 | ) 10 | 11 | type RefreshFunc struct { 12 | sc *S3DBConn 13 | } 14 | 15 | func (h *RefreshFunc) Args() int { return 1 } 16 | func (h *RefreshFunc) Deterministic() bool { return false } 17 | func (h *RefreshFunc) Step(ctx *sqlite.AggregateContext, values ...sqlite.Value) { 18 | if ctx.Data() == nil { 19 | ctx.SetData(&RefreshFuncContext{}) 20 | } 21 | 22 | var val = values[0] 23 | var fCtx = ctx.Data().(*RefreshFuncContext) 24 | 25 | if !val.IsNil() { 26 | fCtx.tableName = val.Text() 27 | } 28 | } 29 | func (h *RefreshFunc) Final(ctx *sqlite.AggregateContext) { 30 | if ctx.Data() == nil { 31 | return 32 | } 33 | var fCtx = ctx.Data().(*RefreshFuncContext) 34 | if fCtx.tableName == "" { 35 | ctx.ResultError(fmt.Errorf("missing table name")) 36 | return 37 | } 38 | vt := s3db.GetTable(fCtx.tableName) 39 | if vt == nil { 40 | ctx.ResultError(fmt.Errorf("table not found: %s", fCtx.tableName)) 41 | return 42 | } 43 | nt, err := s3db.OpenKV(h.sc.ctx, vt.S3Options, "s3db-rows") 44 | if err != nil { 45 | ctx.ResultError(fmt.Errorf("open: %w", err)) 46 | return 47 | } 48 | vt.Tree = nt 49 | } 50 | 51 | type RefreshFuncContext struct { 52 | tableName string 53 | } 54 | -------------------------------------------------------------------------------- /sqlite/s3db_version.go: -------------------------------------------------------------------------------- 1 | package mod 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | 7 | "go.riyazali.net/sqlite" 8 | 9 | "github.com/jrhy/s3db" 10 | ) 11 | 12 | type VersionFunc struct { 13 | sc *S3DBConn 14 | } 15 | 16 | func (h *VersionFunc) Args() int { return 1 } 17 | func (h *VersionFunc) Deterministic() bool { return false } 18 | func (h *VersionFunc) Step(ctx *sqlite.AggregateContext, values ...sqlite.Value) { 19 | if ctx.Data() == nil { 20 | ctx.SetData(&VersionFuncContext{}) 21 | } 22 | 23 | var val = values[0] 24 | var fCtx = ctx.Data().(*VersionFuncContext) 25 | 26 | if !val.IsNil() { 27 | fCtx.tableName = val.Text() 28 | } 29 | } 30 | func (h *VersionFunc) Final(ctx *sqlite.AggregateContext) { 31 | if ctx.Data() == nil { 32 | return 33 | } 34 | var fCtx = ctx.Data().(*VersionFuncContext) 35 | if fCtx.tableName == "" { 36 | ctx.ResultError(fmt.Errorf("missing table name")) 37 | return 38 | } 39 | vt := s3db.GetTable(fCtx.tableName) 40 | if vt == nil { 41 | ctx.ResultError(fmt.Errorf("table not found: %s", fCtx.tableName)) 42 | return 43 | } 44 | roots, err := vt.Tree.Root.Roots() 45 | if err != nil { 46 | ctx.ResultError(err) 47 | return 48 | } 49 | if len(roots) == 0 { 50 | ctx.ResultText("[]") 51 | } else { 52 | var je []byte 53 | je, err = json.Marshal(roots) 54 | if err != nil { 55 | ctx.ResultError(err) 56 | return 57 | } 58 | ctx.ResultText(string(je)) 59 | } 60 | } 61 | 62 | type VersionFuncContext struct { 63 | tableName string 64 | } 65 | -------------------------------------------------------------------------------- /sqlite/sharedlib/ext.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | _ "github.com/jrhy/s3db/sqlite" 5 | ) 6 | 7 | //go:generate sh -c "go build -buildmode=c-shared -o `if [ \"$GOOS\" = \"darwin\" ] ; then echo s3db.dylib ; else echo s3db.so ; fi`" 8 | 9 | // placeholder for c-shared 10 | func main() {} 11 | -------------------------------------------------------------------------------- /sqlite/sqlite-autoload-extension/auto.go: -------------------------------------------------------------------------------- 1 | package auto 2 | 3 | // #cgo CFLAGS: -DSQLITE_CORE 4 | // 5 | // #include "sqlite3.h" 6 | // extern int sqlite3_extension_init(sqlite3*, char**, const sqlite3_api_routines*); 7 | import "C" 8 | 9 | // invokes sqlite3_extension_init() in go.riyazali.net/sqlite that 10 | // makes available to all future database connections, all the 11 | // Go sqlite extensions that have been registered. 12 | func init() { C.sqlite3_auto_extension((*[0]byte)(C.sqlite3_extension_init)) } 13 | -------------------------------------------------------------------------------- /sqlite/staticlib/ext.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | _ "github.com/jrhy/s3db/sqlite" 5 | ) 6 | 7 | //go:generate sh -c "go build -buildmode=c-archive -o s3db.a" 8 | 9 | // placeholder for c-archive 10 | func main() {} 11 | -------------------------------------------------------------------------------- /sqlite/vacuum.go: -------------------------------------------------------------------------------- 1 | package mod 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/jrhy/s3db" 9 | "go.riyazali.net/sqlite" 10 | ) 11 | 12 | type VacuumModule struct { 13 | sc *S3DBConn 14 | } 15 | 16 | func (c *VacuumModule) Connect(conn *sqlite.Conn, args []string, 17 | declare func(string) error) (sqlite.VirtualTable, error) { 18 | 19 | err := declare(`create table s3db_vacuum( 20 | vacuum_error, 21 | table_name HIDDEN, 22 | before_time HIDDEN 23 | )`) 24 | if err != nil { 25 | return nil, fmt.Errorf("declare: %w", err) 26 | } 27 | 28 | return &VacuumTable{module: c}, nil 29 | } 30 | 31 | type VacuumTable struct { 32 | module *VacuumModule 33 | } 34 | 35 | func (vt *VacuumTable) BestIndex(input *sqlite.IndexInfoInput) (*sqlite.IndexInfoOutput, error) { 36 | used := make([]*sqlite.ConstraintUsage, len(input.Constraints)) 37 | colUsed := make(map[int]bool) 38 | for i, c := range input.Constraints { 39 | if c.ColumnIndex >= 1 && c.ColumnIndex <= 2 && c.Op == sqlite.INDEX_CONSTRAINT_EQ { 40 | used[i] = &sqlite.ConstraintUsage{ 41 | ArgvIndex: c.ColumnIndex, 42 | Omit: true, 43 | } 44 | colUsed[c.ColumnIndex] = true 45 | } 46 | } 47 | 48 | res := &sqlite.IndexInfoOutput{ 49 | ConstraintUsage: used, 50 | } 51 | if !colUsed[1] { 52 | return nil, errors.New("table_name and before_time constraints must be supplied") 53 | } 54 | return res, nil 55 | } 56 | 57 | func (vt *VacuumTable) Open() (sqlite.VirtualCursor, error) { 58 | return &VacuumCursor{module: vt.module}, nil 59 | } 60 | 61 | func (vt *VacuumTable) Disconnect() error { return nil } 62 | func (vt *VacuumTable) Destroy() error { return nil } 63 | 64 | type VacuumCursor struct { 65 | tableName string 66 | beforeTime time.Time 67 | vacuumErr error 68 | eof bool 69 | module *VacuumModule 70 | } 71 | 72 | func (vc *VacuumCursor) Next() error { 73 | vc.eof = true 74 | return nil 75 | } 76 | 77 | func (vc *VacuumCursor) Rowid() (int64, error) { 78 | return 0, nil 79 | } 80 | 81 | func (vc *VacuumCursor) Column(context *sqlite.VirtualTableContext, i int) error { 82 | switch i { 83 | case 0: 84 | if vc.vacuumErr != nil { 85 | context.ResultText(fmt.Sprintf("%v", vc.vacuumErr)) 86 | } else { 87 | context.ResultNull() 88 | } 89 | case 1: 90 | context.ResultText(vc.tableName) 91 | case 2: 92 | context.ResultText(vc.beforeTime.Format(s3db.SQLiteTimeFormat)) 93 | } 94 | return nil 95 | } 96 | 97 | func (vc *VacuumCursor) Eof() bool { 98 | return vc.eof 99 | } 100 | 101 | func (vc *VacuumCursor) Close() error { return nil } 102 | 103 | func (vc *VacuumCursor) Filter(_ int, idxStr string, values ...sqlite.Value) error { 104 | if len(values) != 2 || values[0].IsNil() || values[1].IsNil() || 105 | values[0].Text() == "" || values[1].Text() == "" { 106 | return errors.New("table_name and before_time constraints must be supplied") 107 | } 108 | 109 | vc.tableName = values[0].Text() 110 | t, err := time.Parse(s3db.SQLiteTimeFormat, values[1].Text()) 111 | if err != nil { 112 | return fmt.Errorf("before_time: %w (must be like %s)", err, s3db.SQLiteTimeFormat) 113 | } 114 | vc.beforeTime = t 115 | 116 | vc.vacuumErr = s3db.Vacuum(vc.module.sc.ctx, vc.tableName, t) 117 | return nil 118 | } 119 | -------------------------------------------------------------------------------- /sqlite/vtable.go: -------------------------------------------------------------------------------- 1 | package mod 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "time" 8 | 9 | "go.riyazali.net/sqlite" 10 | 11 | "github.com/jrhy/s3db" 12 | "github.com/jrhy/s3db/kv" 13 | "github.com/jrhy/s3db/writetime" 14 | ) 15 | 16 | type S3DBConn struct { 17 | ctx context.Context 18 | ctxCancel func() 19 | deadline time.Time 20 | writeTime time.Time 21 | txFixedWriteTime bool 22 | } 23 | 24 | func (sc *S3DBConn) ResetContext() { 25 | if sc.ctxCancel != nil { 26 | sc.ctxCancel() 27 | sc.ctxCancel = nil 28 | } 29 | sc.ctx = context.Background() 30 | if !sc.deadline.IsZero() { 31 | sc.ctx, sc.ctxCancel = context.WithDeadline(sc.ctx, sc.deadline) 32 | } 33 | if !sc.writeTime.IsZero() { 34 | sc.ctx = writetime.NewContext(sc.ctx, sc.writeTime) 35 | } 36 | } 37 | 38 | type Module struct { 39 | sc *S3DBConn 40 | } 41 | 42 | func (c *Module) Connect(conn *sqlite.Conn, args []string, 43 | declare func(string) error) (sqlite.VirtualTable, error) { 44 | 45 | args = args[2:] 46 | table, err := s3db.New(c.sc.ctx, args) 47 | if err != nil { 48 | return nil, err 49 | } 50 | 51 | err = declare(table.SchemaString) 52 | if err != nil { 53 | return nil, fmt.Errorf("declare: %w", err) 54 | } 55 | 56 | vt := &VirtualTable{ 57 | module: c, 58 | common: table, 59 | } 60 | 61 | return vt, nil 62 | } 63 | 64 | func (c *Module) Create(conn *sqlite.Conn, args []string, declare func(string) error) (sqlite.VirtualTable, error) { 65 | // fmt.Printf("CREATE\n") 66 | return c.Connect(conn, args, declare) 67 | } 68 | 69 | type VirtualTable struct { 70 | common *s3db.VirtualTable 71 | module *Module 72 | } 73 | 74 | var _ sqlite.TwoPhaseCommitter = (*VirtualTable)(nil) 75 | 76 | func mapOp(in sqlite.ConstraintOp, usable bool) s3db.Op { 77 | if !usable { 78 | return s3db.OpIgnore 79 | } 80 | switch in { 81 | case sqlite.INDEX_CONSTRAINT_EQ: 82 | return s3db.OpEQ 83 | case sqlite.INDEX_CONSTRAINT_GT: 84 | return s3db.OpGT 85 | case sqlite.INDEX_CONSTRAINT_GE: 86 | return s3db.OpGE 87 | case sqlite.INDEX_CONSTRAINT_LT: 88 | return s3db.OpLT 89 | case sqlite.INDEX_CONSTRAINT_LE: 90 | return s3db.OpLE 91 | } 92 | return s3db.OpIgnore 93 | } 94 | 95 | func (c *VirtualTable) BestIndex(input *sqlite.IndexInfoInput) (*sqlite.IndexInfoOutput, error) { 96 | indexIn := make([]s3db.IndexInput, len(input.Constraints)) 97 | for i, c := range input.Constraints { 98 | op := mapOp(c.Op, c.Usable) 99 | indexIn[i] = s3db.IndexInput{ 100 | ColumnIndex: c.ColumnIndex, 101 | Op: op, 102 | } 103 | } 104 | orderIn := make([]s3db.OrderInput, len(input.OrderBy)) 105 | for i, o := range input.OrderBy { 106 | orderIn[i] = s3db.OrderInput{ 107 | Column: o.ColumnIndex, 108 | Desc: o.Desc, 109 | } 110 | } 111 | indexOut, err := c.common.BestIndex(indexIn, orderIn) 112 | if err != nil { 113 | return nil, toSqlite(err) 114 | } 115 | used := make([]*sqlite.ConstraintUsage, len(indexIn)) 116 | for i := range indexOut.Used { 117 | if indexOut.Used[i] { 118 | used[i] = &sqlite.ConstraintUsage{ 119 | ArgvIndex: i + 1, 120 | //Omit: true, // no known cases where this doesn't work, but... 121 | } 122 | } 123 | } 124 | return &sqlite.IndexInfoOutput{ 125 | EstimatedCost: indexOut.EstimatedCost, 126 | ConstraintUsage: used, 127 | OrderByConsumed: indexOut.AlreadyOrdered, 128 | IndexNumber: indexOut.IdxNum, 129 | IndexString: indexOut.IdxStr, 130 | }, nil 131 | } 132 | 133 | func (c *VirtualTable) Open() (sqlite.VirtualCursor, error) { 134 | common, err := c.common.Open() 135 | if err != nil { 136 | return nil, toSqlite(err) 137 | } 138 | return &Cursor{ 139 | common: common, 140 | ctx: c.module.sc.ctx, 141 | }, nil 142 | } 143 | 144 | func (c *VirtualTable) Disconnect() error { 145 | if err := toSqlite(c.common.Disconnect()); err != nil { 146 | return err 147 | } 148 | if c.module.sc.ctxCancel != nil { 149 | c.module.sc.ctxCancel() 150 | c.module.sc.ctxCancel = nil 151 | } 152 | 153 | return nil 154 | } 155 | 156 | func (c *VirtualTable) Destroy() error { 157 | return c.Disconnect() 158 | } 159 | 160 | type Cursor struct { 161 | common *s3db.Cursor 162 | ctx context.Context 163 | } 164 | 165 | func (c *Cursor) Next() error { 166 | return toSqlite(c.common.Next(c.ctx)) 167 | } 168 | 169 | func (c *Cursor) Column(ctx *sqlite.VirtualTableContext, i int) error { 170 | v, err := c.common.Column(i) 171 | if err != nil { 172 | return toSqlite(err) 173 | } 174 | setContextResult(ctx, v, i) 175 | return nil 176 | } 177 | 178 | func setContextResult(ctx *sqlite.VirtualTableContext, v interface{}, colIndex int) { 179 | switch x := v.(type) { 180 | case nil: 181 | ctx.ResultNull() 182 | case []byte: 183 | ctx.ResultBlob(x) 184 | case float64: 185 | ctx.ResultFloat(x) 186 | case int: 187 | ctx.ResultInt(x) 188 | case int64: 189 | ctx.ResultInt64(x) 190 | case string: 191 | ctx.ResultText(x) 192 | default: 193 | ctx.ResultError(fmt.Errorf("column %d: cannot convert %T", colIndex, x)) 194 | } 195 | } 196 | 197 | func (c *Cursor) Filter(_ int, idxStr string, values ...sqlite.Value) error { 198 | es := make([]interface{}, len(values)) 199 | for i := range values { 200 | es[i] = valueToGo(values[i]) 201 | } 202 | return toSqlite(c.common.Filter(c.ctx, idxStr, es)) 203 | } 204 | func (c *Cursor) Rowid() (int64, error) { 205 | i, err := c.common.Rowid() 206 | return i, toSqlite(err) 207 | } 208 | func (c *Cursor) Eof() bool { return c.common.Eof() } 209 | func (c *Cursor) Close() error { return toSqlite(c.common.Close()) } 210 | 211 | func init() { 212 | sqlite.Register(func(api *sqlite.ExtensionApi) (sqlite.ErrorCode, error) { 213 | sc := &S3DBConn{ 214 | ctx: context.Background(), 215 | } 216 | err := api.CreateModule("s3db", &Module{sc}, 217 | sqlite.ReadOnly(false), sqlite.Transaction(true), 218 | sqlite.TwoPhaseCommit(true)) 219 | if err != nil { 220 | return sqlite.SQLITE_ERROR, err 221 | } 222 | err = api.CreateModule("s3db_changes", &ChangesModule{sc}) 223 | if err != nil { 224 | return sqlite.SQLITE_ERROR, err 225 | } 226 | err = api.CreateModule("s3db_conn", &ConnModule{sc}, 227 | sqlite.ReadOnly(false), 228 | sqlite.EponymousOnly(true)) 229 | if err != nil { 230 | return sqlite.SQLITE_ERROR, err 231 | } 232 | if err := api.CreateFunction("s3db_refresh", &RefreshFunc{sc}); err != nil { 233 | return sqlite.SQLITE_ERROR, fmt.Errorf("s3db_refresh: %w", err) 234 | } 235 | err = api.CreateModule("s3db_vacuum", &VacuumModule{sc}, sqlite.EponymousOnly(true)) 236 | if err != nil { 237 | return sqlite.SQLITE_ERROR, fmt.Errorf("s3db_vacuum: %w", err) 238 | } 239 | if err := api.CreateFunction("s3db_version", &VersionFunc{sc}); err != nil { 240 | return sqlite.SQLITE_ERROR, fmt.Errorf("s3db_version: %w", err) 241 | } 242 | return sqlite.SQLITE_OK, nil 243 | }) 244 | } 245 | 246 | func valuesToGo(values []sqlite.Value) map[int]interface{} { 247 | res := make(map[int]interface{}, len(values)) 248 | for i := range values { 249 | if values[i].NoChange() { 250 | continue 251 | } 252 | res[i] = valueToGo(values[i]) 253 | } 254 | return res 255 | } 256 | func valueToGo(value sqlite.Value) interface{} { 257 | switch value.Type() { 258 | case sqlite.SQLITE_BLOB: 259 | return value.Blob() 260 | case sqlite.SQLITE_FLOAT: 261 | return value.Float() 262 | case sqlite.SQLITE_INTEGER: 263 | return value.Int64() 264 | case sqlite.SQLITE_NULL: 265 | return nil 266 | case sqlite.SQLITE_TEXT: 267 | return value.Text() 268 | default: 269 | panic(fmt.Sprintf("cannot convert type %d", value.Type())) 270 | } 271 | } 272 | 273 | func (c *VirtualTable) Insert(values ...sqlite.Value) (int64, error) { 274 | i, err := c.common.Insert(c.module.sc.ctx, valuesToGo(values)) 275 | return i, toSqlite(err) 276 | } 277 | 278 | func toSqlite(err error) error { 279 | switch err { 280 | case s3db.ErrS3DBConstraintNotNull: 281 | return sqlite.SQLITE_CONSTRAINT_NOTNULL 282 | case s3db.ErrS3DBConstraintPrimaryKey: 283 | return sqlite.SQLITE_CONSTRAINT_PRIMARYKEY 284 | case s3db.ErrS3DBConstraintUnique: 285 | return sqlite.SQLITE_CONSTRAINT_UNIQUE 286 | default: 287 | return err 288 | } 289 | } 290 | 291 | func (c *VirtualTable) Update(value sqlite.Value, values ...sqlite.Value) error { 292 | return toSqlite(c.common.Update(c.module.sc.ctx, valueToGo(value), valuesToGo(values))) 293 | } 294 | 295 | func (c *VirtualTable) Replace(oldValue, newValue sqlite.Value, values ...sqlite.Value) error { 296 | if true { 297 | return errors.New("unimplemented") 298 | } 299 | fmt.Printf("REPLACE ") 300 | fmt.Printf("oldValue nochange=%v %s %+v ", oldValue.NoChange(), oldValue.Type(), oldValue.Text()) 301 | fmt.Printf("newValue nochange=%v %s %+v\n", newValue.NoChange(), newValue.Type(), newValue.Text()) 302 | var backup *kv.DB 303 | var err error 304 | if newValue.NoChange() { 305 | newValue = oldValue 306 | } else { 307 | backup, err = c.common.Tree.Root.Clone(c.module.sc.ctx) 308 | if err != nil { 309 | return fmt.Errorf("clone: %w", err) 310 | } 311 | if err := c.common.Delete(c.module.sc.ctx, valueToGo(oldValue)); err != nil { 312 | return fmt.Errorf("delete: %w", err) 313 | } 314 | } 315 | if _, err := c.common.Insert(c.module.sc.ctx, valuesToGo(values)); err != nil { 316 | c.common.Tree.Root.Cancel() 317 | c.common.Tree.Root = backup 318 | return fmt.Errorf("update: %w", err) 319 | } 320 | backup.Cancel() 321 | return nil 322 | } 323 | 324 | func (c *VirtualTable) Delete(value sqlite.Value) error { 325 | return toSqlite(c.common.Delete(c.module.sc.ctx, valueToGo(value))) 326 | } 327 | 328 | func (c *VirtualTable) Begin() error { 329 | if c.module.sc.writeTime.IsZero() { 330 | c.module.sc.writeTime = time.Now() 331 | c.module.sc.txFixedWriteTime = true 332 | c.module.sc.ResetContext() 333 | } 334 | return toSqlite(c.common.Begin(c.module.sc.ctx)) 335 | } 336 | 337 | func (c *VirtualTable) Commit() error { 338 | if c.module.sc.txFixedWriteTime { 339 | c.module.sc.writeTime = time.Time{} 340 | c.module.sc.txFixedWriteTime = false 341 | c.module.sc.ResetContext() 342 | } 343 | return nil 344 | } 345 | 346 | func (c *VirtualTable) Sync() error { 347 | if c.common.S3Options.ReadOnly { 348 | return nil 349 | } 350 | 351 | return toSqlite(c.common.Commit(c.module.sc.ctx)) 352 | } 353 | 354 | func (c *VirtualTable) Rollback() error { 355 | res := toSqlite(c.common.Rollback()) 356 | if c.module.sc.txFixedWriteTime { 357 | c.module.sc.writeTime = time.Time{} 358 | c.module.sc.txFixedWriteTime = false 359 | c.module.sc.ResetContext() 360 | } 361 | return res 362 | } 363 | -------------------------------------------------------------------------------- /sqlite/vtable_test.go: -------------------------------------------------------------------------------- 1 | package mod_test 2 | 3 | import ( 4 | "database/sql" 5 | "encoding/json" 6 | "fmt" 7 | "os" 8 | "testing" 9 | "time" 10 | 11 | "github.com/jrhy/mast/persist/s3test" 12 | "github.com/jrhy/s3db" 13 | v1proto "github.com/jrhy/s3db/proto/v1" 14 | "github.com/stretchr/testify/require" 15 | 16 | // register s3db extension with riyazali.net/sqlite 17 | _ "github.com/jrhy/s3db/sqlite" 18 | // autoload riyazali.net/sqlite-registered extensions in sqlite 19 | _ "github.com/jrhy/s3db/sqlite/sqlite-autoload-extension" 20 | // mattn's awesome Go sql driver for sqlite 21 | _ "github.com/mattn/go-sqlite3" 22 | ) 23 | 24 | func init() { 25 | os.Setenv("AWS_REGION", "dummy") 26 | os.Setenv("AWS_ACCESS_KEY_ID", "dummy") 27 | os.Setenv("AWS_SECRET_ACCESS_KEY", "dummy") 28 | } 29 | 30 | func openDB() (*sql.DB, string, string) { 31 | db, err := sql.Open("sqlite3", ":memory:") 32 | if err != nil { 33 | panic(err) 34 | } 35 | 36 | c, s3Bucket, _ := s3test.Client() 37 | 38 | return db, s3Bucket, c.Endpoint 39 | } 40 | 41 | func mustJSON(i interface{}) string { 42 | var b []byte 43 | var err error 44 | b, err = json.Marshal(i) 45 | if err != nil { 46 | panic(err) 47 | } 48 | if len(b) > 60 { 49 | b, err = json.MarshalIndent(i, " ", " ") 50 | if err != nil { 51 | panic(err) 52 | } 53 | } 54 | return string(b) 55 | } 56 | 57 | func mustGetRows(r *sql.Rows) [][]interface{} { 58 | cols, err := r.Columns() 59 | if err != nil { 60 | panic(err) 61 | } 62 | var rows [][]interface{} 63 | for r.Next() { 64 | row := make([]interface{}, len(cols)) 65 | for i := range row { 66 | var q interface{} 67 | row[i] = &q 68 | } 69 | err = r.Scan(row...) 70 | if err != nil { 71 | panic(fmt.Errorf("scan: %w", err)) 72 | } 73 | rows = append(rows, row) 74 | } 75 | return rows 76 | } 77 | 78 | func expand(row []interface{}) []interface{} { 79 | res := []interface{}{} 80 | for i := range row { 81 | for _, v := range row[i].([]interface{}) { 82 | res = append(res, v) 83 | } 84 | } 85 | return res 86 | } 87 | 88 | func populateTwoTables(db *sql.DB, s3Bucket, s3Endpoint, tablePrefix, schema string, row ...interface{}) (string, string, error) { 89 | regTableName := tablePrefix + "_reg" 90 | virtualTableName := tablePrefix + "_virtual" 91 | _, err := db.Exec(fmt.Sprintf(`create table %s(%s)`, 92 | regTableName, schema)) 93 | if err != nil { 94 | return "", "", fmt.Errorf("create %s: %w", regTableName, err) 95 | } 96 | 97 | _, err = db.Exec(fmt.Sprintf(`create virtual table %s using s3db ( 98 | s3_bucket='%s', 99 | s3_endpoint='%s', 100 | s3_prefix='%s', 101 | columns='%s')`, virtualTableName, 102 | s3Bucket, s3Endpoint, virtualTableName, schema)) 103 | if err != nil { 104 | return "", "", fmt.Errorf("create %s: %w", virtualTableName, err) 105 | } 106 | 107 | valuesStr := "values" 108 | for i := range row { 109 | if i > 0 { 110 | valuesStr += "," 111 | } 112 | valuesStr += "(" 113 | for j := range row[i].([]interface{}) { 114 | if j > 0 { 115 | valuesStr += "," 116 | } 117 | valuesStr += "?" 118 | } 119 | valuesStr += ")" 120 | } 121 | _, err = db.Exec(fmt.Sprintf("insert into %s %s", regTableName, valuesStr), expand(row)...) 122 | if err != nil { 123 | return "", "", fmt.Errorf("insert %s: %w", regTableName, err) 124 | } 125 | _, err = db.Exec(fmt.Sprintf("insert into %s %s", virtualTableName, valuesStr), expand(row)...) 126 | if err != nil { 127 | return "", "", fmt.Errorf("insert %s: %w", virtualTableName, err) 128 | } 129 | 130 | return regTableName, virtualTableName, nil 131 | } 132 | 133 | func row(cols ...interface{}) interface{} { 134 | return interface{}(cols) 135 | } 136 | 137 | func requireSelectEquiv(t *testing.T, db *sql.DB, regTable, virtualTable, where, expectedJSON string) { 138 | require.Equal(t, 139 | expectedJSON, 140 | mustQueryToJSON(db, fmt.Sprintf("select * from %s %s", regTable, where))) 141 | require.Equal(t, 142 | expectedJSON, 143 | mustQueryToJSON(db, fmt.Sprintf("select * from %s %s", virtualTable, where))) 144 | } 145 | 146 | func dump(rows *sql.Rows) error { 147 | colNames, err := rows.Columns() 148 | if err != nil { 149 | return fmt.Errorf("columns: %w", err) 150 | } 151 | cols := make([]interface{}, len(colNames)) 152 | for i := range colNames { 153 | var ef interface{} 154 | cols[i] = &ef 155 | } 156 | for rows.Next() { 157 | err = rows.Scan(cols...) 158 | if err != nil { 159 | return err 160 | } 161 | s := "ROW: " 162 | for i := range cols { 163 | s += fmt.Sprintf("%+v ", *cols[i].(*interface{})) 164 | } 165 | fmt.Println(s) 166 | } 167 | return nil 168 | } 169 | 170 | func mustQueryToJSON(db *sql.DB, query string, args ...any) string { 171 | rows, err := db.Query(query, args...) 172 | if err != nil { 173 | panic(fmt.Errorf("%s: %w", query, err)) 174 | } 175 | defer rows.Close() 176 | return mustJSON(mustGetRows(rows)) 177 | } 178 | 179 | /* 180 | // possible discrepancy to sqlite / perhaps bug in go-sqlite3 181 | func TODOTestEmptyText(t *testing.T) { 182 | db, s3Bucket, s3Endpoint := getDBBucket() 183 | _, err := db.Exec(fmt.Sprintf(`create virtual table emptytextval using s3db ( 184 | s3_bucket='%s', 185 | s3_endpoint='%s', 186 | s3_prefix='emptytextval', 187 | columns='a')`, 188 | s3Bucket, s3Endpoint)) 189 | require.NoError(t, err) 190 | _, err = db.Exec(`insert into emptytextval values ('');`) 191 | require.NoError(t, err) 192 | require.Equal(t, 193 | `[["text",""]]`, 194 | mustQueryToJSON(db, `select typeof(a), a from emptytextval`)) 195 | } 196 | 197 | // possible discrepancy to sqlite / perhaps bug in go-sqlite3 198 | func TODOTestEmptyBlob(t *testing.T) { 199 | db, s3Bucket, s3Endpoint := getDBBucket() 200 | _, err := db.Exec(fmt.Sprintf(`create virtual table emptyblobval using s3db ( 201 | s3_bucket='%s', 202 | s3_endpoint='%s', 203 | s3_prefix='emptyblobval', 204 | columns='a')`, 205 | s3Bucket, s3Endpoint)) 206 | require.NoError(t, err) 207 | _, err = db.Exec(`insert into emptyblobval values (x'');`) 208 | require.NoError(t, err) 209 | require.Equal(t, 210 | `[["blob",""]]`, 211 | mustQueryToJSON(db, `select typeof(a), a from emptyblobval`)) 212 | } 213 | */ 214 | 215 | func mustParseTime(f, s string) time.Time { 216 | t, err := time.Parse(f, s) 217 | if err != nil { 218 | panic(err) 219 | } 220 | return t 221 | } 222 | 223 | func Test(t *testing.T) { 224 | openTable := func(t *testing.T, name string, db *sql.DB, s3Bucket, s3Endpoint string) { 225 | stmt := fmt.Sprintf(`create virtual table "%s" using s3db ( 226 | s3_bucket='%s', 227 | s3_endpoint='%s', 228 | s3_prefix='%s', 229 | columns='a primary key, b')`, 230 | name, s3Bucket, s3Endpoint, t.Name()) 231 | _, err := db.Exec(stmt) 232 | require.NoError(t, err) 233 | } 234 | 235 | t.Run("1", func(t *testing.T) { 236 | db, s3Bucket, s3Endpoint := openDB() 237 | defer db.Close() 238 | _, err := db.Exec(fmt.Sprintf(`create virtual table t1 using s3db ( 239 | s3_bucket='%s', 240 | s3_endpoint='%s', 241 | s3_prefix='t1', 242 | columns='a primary key, b')`, 243 | s3Bucket, s3Endpoint)) 244 | require.NoError(t, err) 245 | _, err = db.Exec("delete from t1;") 246 | require.NoError(t, err) 247 | _, err = db.Exec("insert into t1 values ('v1','v1b'),('v2','v2b'),('v3','v3b');") 248 | require.NoError(t, err) 249 | //_, err = db.Exec("update t1 set a='v1c' where b='v1b';") 250 | //require.NoError(t, err) 251 | _, err = db.Exec("delete from t1 where b='v2b';") 252 | require.NoError(t, err) 253 | require.Equal(t, 254 | `[["v1","v1b"],`+ 255 | `["v3","v3b"]]`, 256 | mustQueryToJSON(db, "select * from t1;")) 257 | }) 258 | 259 | t.Run("BestIndex", 260 | func(t *testing.T) { 261 | db, s3Bucket, s3Endpoint := openDB() 262 | defer db.Close() 263 | regTable, sasqTable, err := populateTwoTables(db, s3Bucket, s3Endpoint, 264 | "index", "a primary key", 265 | row(1), 266 | row(3), 267 | row(2), 268 | ) 269 | require.NoError(t, err) 270 | t.Run("s3dbKeySorted", func(t *testing.T) { 271 | require.Equal(t, 272 | "[[1],[2],[3]]", 273 | mustQueryToJSON(db, fmt.Sprintf(`select a from %s`, sasqTable))) 274 | }) 275 | sqliteEquiv := func(where, expectedJSON string) func(*testing.T) { 276 | return func(t *testing.T) { 277 | requireSelectEquiv(t, db, regTable, sasqTable, 278 | where, expectedJSON) 279 | } 280 | } 281 | t.Run("gt_asc", sqliteEquiv("where a>2", "[[3]]")) 282 | t.Run("ge_asc", sqliteEquiv("where a>=2", "[[2],[3]]")) 283 | t.Run("eq_asc", sqliteEquiv("where a=2", "[[2]]")) 284 | t.Run("le_asc", sqliteEquiv("where a<=2", "[[1],[2]]")) 285 | t.Run("lt_asc", sqliteEquiv("where a<2", "[[1]]")) 286 | 287 | t.Run("gt_desc", sqliteEquiv("where a>2 order by 1 desc", "[[3]]")) 288 | t.Run("ge_desc", sqliteEquiv("where a>=2 order by 1 desc", "[[3],[2]]")) 289 | t.Run("eq_desc", sqliteEquiv("where a=2 order by 1 desc", "[[2]]")) 290 | t.Run("le_desc", sqliteEquiv("where a<=2 order by 1 desc", "[[2],[1]]")) 291 | t.Run("lt_desc", sqliteEquiv("where a<2 order by 1 desc", "[[1]]")) 292 | 293 | t.Run("and_asc", sqliteEquiv("where a>1 and a<3", "[[2]]")) 294 | 295 | t.Run("in_asc", sqliteEquiv("where a in (1,3)", "[[1],[3]]")) 296 | t.Run("in_desc", sqliteEquiv("where a in (1,3) order by 1 desc", "[[3],[1]]")) 297 | }) 298 | 299 | t.Run("SortOrder", 300 | 301 | func(t *testing.T) { 302 | db, s3Bucket, s3Endpoint := openDB() 303 | defer db.Close() 304 | _, err := db.Exec(fmt.Sprintf(`create virtual table sortfun using s3db ( 305 | s3_bucket='%s', 306 | s3_endpoint='%s', 307 | s3_prefix='sortfun', 308 | columns='a primary key')`, 309 | s3Bucket, s3Endpoint)) 310 | require.NoError(t, err) 311 | _, err = db.Exec(`insert into sortfun values (?), (?), (?), (?)`, 312 | []byte("blob"), 313 | "text", 314 | 3.14, 315 | 3, 316 | ) 317 | require.NoError(t, err) 318 | require.Equal(t, 319 | `[["integer"],["real"],["text"],["blob"]]`, 320 | mustQueryToJSON(db, `select typeof(a) from sortfun`)) 321 | 322 | _, err = db.Exec(`create table sortfun_native(a primary key) without rowid`) 323 | require.NoError(t, err) 324 | _, err = db.Exec(`insert into sortfun_native values (?), (?), (?), (?)`, 325 | []byte("blob"), 326 | "text", 327 | 3.14, 328 | 3, 329 | ) 330 | require.NoError(t, err) 331 | require.Equal(t, 332 | `[["integer"],["real"],["text"],["blob"]]`, 333 | mustQueryToJSON(db, `select typeof(a) from sortfun_native`)) 334 | }) 335 | 336 | t.Run("NullPrimaryKey", 337 | func(t *testing.T) { 338 | db, s3Bucket, s3Endpoint := openDB() 339 | defer db.Close() 340 | _, err := db.Exec(fmt.Sprintf(`create virtual table nullkey using s3db ( 341 | s3_bucket='%s', 342 | s3_endpoint='%s', 343 | s3_prefix='nullkey', 344 | columns='a primary key')`, 345 | s3Bucket, s3Endpoint)) 346 | require.NoError(t, err) 347 | _, err = db.Exec(`insert into nullkey values (null);`) 348 | require.Error(t, err, "constraint failed") 349 | }) 350 | 351 | t.Run("NullValue", 352 | func(t *testing.T) { 353 | db, s3Bucket, s3Endpoint := openDB() 354 | _, err := db.Exec(fmt.Sprintf(`create virtual table nullval using s3db ( 355 | s3_bucket='%s', 356 | s3_endpoint='%s', 357 | s3_prefix='nullval', 358 | columns='a')`, 359 | s3Bucket, s3Endpoint)) 360 | require.NoError(t, err) 361 | _, err = db.Exec(`insert into nullval values (null);`) 362 | require.NoError(t, err) 363 | require.Equal(t, 364 | `[["null",null]]`, 365 | mustQueryToJSON(db, `select typeof(a), a from nullval`)) 366 | }) 367 | 368 | t.Run("ZeroInt", func(t *testing.T) { 369 | db, s3Bucket, s3Endpoint := openDB() 370 | _, err := db.Exec(fmt.Sprintf(`create virtual table zerointval using s3db ( 371 | s3_bucket='%s', 372 | s3_endpoint='%s', 373 | s3_prefix='zerointval', 374 | columns='a')`, 375 | s3Bucket, s3Endpoint)) 376 | require.NoError(t, err) 377 | _, err = db.Exec(`insert into zerointval values (0);`) 378 | require.NoError(t, err) 379 | require.Equal(t, 380 | `[["integer",0]]`, 381 | mustQueryToJSON(db, `select typeof(a), a from zerointval`)) 382 | }) 383 | 384 | t.Run("ZeroReal", func(t *testing.T) { 385 | db, s3Bucket, s3Endpoint := openDB() 386 | _, err := db.Exec(fmt.Sprintf(`create virtual table zerorealval using s3db ( 387 | s3_bucket='%s', 388 | s3_endpoint='%s', 389 | s3_prefix='zerorealval', 390 | columns='a')`, 391 | s3Bucket, s3Endpoint)) 392 | require.NoError(t, err) 393 | _, err = db.Exec(`insert into zerorealval values (0.0);`) 394 | require.NoError(t, err) 395 | require.Equal(t, 396 | `[["real",0]]`, 397 | mustQueryToJSON(db, `select typeof(a), a from zerorealval`)) 398 | }) 399 | 400 | t.Run("InsertConflict", func(t *testing.T) { 401 | db, s3Bucket, s3Endpoint := openDB() 402 | defer db.Close() 403 | 404 | type testCase struct { 405 | firstWriteTime string 406 | secondWriteTime string 407 | expectedWinner string 408 | } 409 | cases := []testCase{ 410 | { 411 | firstWriteTime: "2006-01-01 00:00:00", 412 | secondWriteTime: "2007-01-01 00:00:00", 413 | expectedWinner: "second", 414 | }, 415 | { 416 | firstWriteTime: "2007-01-01 00:00:00", 417 | secondWriteTime: "2006-01-01 00:00:00", 418 | expectedWinner: "first", 419 | }, 420 | } 421 | for _, c := range cases { 422 | t.Run(c.expectedWinner, func(t *testing.T) { 423 | openTable(t, t.Name()+"1", db, s3Bucket, s3Endpoint) 424 | openTable(t, t.Name()+"2", db, s3Bucket, s3Endpoint) 425 | 426 | _, err := db.Exec(`update s3db_conn set write_time=?`, c.firstWriteTime) 427 | require.NoError(t, err) 428 | _, err = db.Exec(fmt.Sprintf(`insert into "%s" values('row','first')`, t.Name()+"1")) 429 | require.NoError(t, err) 430 | _, err = db.Exec(`update s3db_conn set write_time=?`, c.secondWriteTime) 431 | require.NoError(t, err) 432 | _, err = db.Exec(fmt.Sprintf(`insert into "%s" values('row','second')`, t.Name()+"2")) 433 | require.NoError(t, err) 434 | 435 | _, err = db.Exec(`update s3db_conn set write_time=?`, "2006-01-01 00:00:00") 436 | require.NoError(t, err) 437 | openTable(t, t.Name()+"read", db, s3Bucket, s3Endpoint) 438 | query := fmt.Sprintf(`select * from "%s"`, t.Name()+"read") 439 | require.Equal(t, 440 | fmt.Sprintf(`[["row","%s"]]`, c.expectedWinner), 441 | mustQueryToJSON(db, query)) 442 | }) 443 | } 444 | }) 445 | 446 | t.Run("MergeValues", 447 | func(t *testing.T) { 448 | t1 := mustParseTime(s3db.SQLiteTimeFormat, "2006-01-01 00:00:00") 449 | t2 := mustParseTime(s3db.SQLiteTimeFormat, "2007-01-01 00:00:00") 450 | v1 := &v1proto.Row{ 451 | ColumnValues: map[string]*v1proto.ColumnValue{"b": s3db.ToColumnValue("one")}, 452 | } 453 | v2 := &v1proto.Row{ 454 | ColumnValues: map[string]*v1proto.ColumnValue{"b": s3db.ToColumnValue("two")}, 455 | } 456 | res := s3db.MergeRows(nil, t1, v1, t2, v2, t2) 457 | require.Equal(t, time.Duration(0), res.DeleteUpdateOffset.AsDuration()) 458 | require.Equal(t, time.Duration(0), res.ColumnValues["b"].UpdateOffset.AsDuration()) 459 | res = s3db.MergeRows(nil, t2, v2, t1, v1, t1) 460 | require.Equal(t, t2.Sub(t1), res.DeleteUpdateOffset.AsDuration()) 461 | require.Equal(t, t2.Sub(t1), res.ColumnValues["b"].UpdateOffset.AsDuration()) 462 | res = s3db.MergeRows(nil, t2, v2, t1, v1, t2) 463 | require.Equal(t, time.Duration(0), res.DeleteUpdateOffset.AsDuration()) 464 | require.Equal(t, time.Duration(0), res.ColumnValues["b"].UpdateOffset.AsDuration()) 465 | res = s3db.MergeRows(nil, t1, v1, t2, v2, t1) 466 | require.Equal(t, t2.Sub(t1), res.DeleteUpdateOffset.AsDuration()) 467 | require.Equal(t, t2.Sub(t1), res.ColumnValues["b"].UpdateOffset.AsDuration()) 468 | }) 469 | 470 | t.Run("Vacuum", 471 | func(t *testing.T) { 472 | db, s3Bucket, s3Endpoint := openDB() 473 | defer db.Close() 474 | 475 | openTable(t, "v1", db, s3Bucket, s3Endpoint) 476 | _, err := db.Exec(`update s3db_conn set write_time=?`, "2006-01-01 00:00:00") 477 | require.NoError(t, err) 478 | _, err = db.Exec(`insert into v1 values(2006,0)`) 479 | require.NoError(t, err) 480 | _, err = db.Exec(`delete from v1`) 481 | require.NoError(t, err) 482 | require.Equal(t, `null`, mustQueryToJSON(db, `select * from v1`)) 483 | 484 | openTable(t, "v2", db, s3Bucket, s3Endpoint) 485 | _, err = db.Exec(`update s3db_conn set write_time=?`, "2007-01-01 00:00:00") 486 | require.NoError(t, err) 487 | _, err = db.Exec(`insert into v2 values(2007,0)`) 488 | require.NoError(t, err) 489 | require.Equal(t, `[[2007,0]]`, mustQueryToJSON(db, `select * from v2`)) 490 | 491 | _, err = db.Exec(`update s3db_conn set write_time=?`, "2008-01-01 00:00:00") 492 | require.NoError(t, err) 493 | openTable(t, "vacuumv1", db, s3Bucket, s3Endpoint) 494 | require.Equal(t, 495 | `[[null]]`, 496 | mustQueryToJSON(db, "select * from s3db_vacuum('vacuumv1','2007-01-01 00:00:00');")) 497 | _, err = db.Exec(`update s3db_conn set write_time=?`, "2009-01-01 00:00:00") 498 | require.NoError(t, err) 499 | openTable(t, "readv2", db, s3Bucket, s3Endpoint) 500 | require.Equal(t, `[[2007,0]]`, mustQueryToJSON(db, `select * from readv2`)) 501 | }) 502 | 503 | t.Run("NoPrefix", 504 | func(t *testing.T) { 505 | db, s3Bucket, s3Endpoint := openDB() 506 | defer db.Close() 507 | 508 | _, err := db.Exec(fmt.Sprintf(`create virtual table v1noprefix using s3db ( 509 | s3_bucket='%s', 510 | s3_endpoint='%s', 511 | columns='a primary key, b')`, 512 | s3Bucket, s3Endpoint)) 513 | require.NoError(t, err) 514 | _, err = db.Exec(fmt.Sprintf(`create virtual table v2noprefix using s3db ( 515 | s3_bucket='%s', 516 | s3_endpoint='%s', 517 | columns='a primary key, b')`, 518 | s3Bucket, s3Endpoint)) 519 | require.NoError(t, err) 520 | 521 | _, err = db.Exec(`insert into v1noprefix values($1,$2)`, 1, 1) 522 | require.NoError(t, err) 523 | _, err = db.Exec(`insert into v2noprefix values($1,$2)`, 2, 2) 524 | require.NoError(t, err) 525 | 526 | _, err = db.Exec(fmt.Sprintf(`create virtual table noprefix using s3db ( 527 | readonly, 528 | s3_bucket='%s', 529 | s3_endpoint='%s', 530 | columns='a primary key, b')`, 531 | s3Bucket, s3Endpoint)) 532 | require.NoError(t, err) 533 | 534 | require.Equal(t, 535 | `[[1,1],[2,2]]`, 536 | mustQueryToJSON(db, "select * from noprefix;")) 537 | }) 538 | 539 | } 540 | 541 | func TestCurrentVersion_Empty(t *testing.T) { 542 | db, s3Bucket, s3Endpoint := openDB() 543 | defer db.Close() 544 | 545 | _, err := db.Exec(fmt.Sprintf(`create virtual table `+t.Name()+` using s3db ( 546 | s3_bucket='%s', 547 | s3_endpoint='%s', 548 | s3_prefix='version_empty', 549 | columns='a primary key, b')`, 550 | s3Bucket, s3Endpoint)) 551 | require.NoError(t, err) 552 | 553 | r, err := db.Query(`select s3db_version($1)`, t.Name()) 554 | require.NoError(t, err) 555 | defer r.Close() 556 | require.True(t, r.Next()) 557 | var current_version_json string 558 | require.NoError(t, r.Scan(¤t_version_json)) 559 | require.Equal(t, "[]", current_version_json) 560 | } 561 | 562 | func TestCurrentVersion_Happy(t *testing.T) { 563 | db, s3Bucket, s3Endpoint := openDB() 564 | defer db.Close() 565 | 566 | _, err := db.Exec(fmt.Sprintf(`create virtual table `+t.Name()+` using s3db ( 567 | s3_bucket='%s', 568 | s3_endpoint='%s', 569 | s3_prefix='version_happy', 570 | columns='a primary key, b')`, 571 | s3Bucket, s3Endpoint)) 572 | require.NoError(t, err) 573 | _, err = db.Exec(`insert into `+t.Name()+` values($1, $2)`, 1, 1) 574 | require.NoError(t, err) 575 | 576 | r, err := db.Query(`select s3db_version($1)`, t.Name()) 577 | require.NoError(t, err) 578 | defer r.Close() 579 | require.True(t, r.Next()) 580 | var current_version_json []byte 581 | require.NoError(t, r.Scan(¤t_version_json)) 582 | var roots []string 583 | require.NoError(t, json.Unmarshal(current_version_json, &roots)) 584 | require.Equal(t, 1, len(roots)) 585 | } 586 | 587 | func TestCurrentVersion_Multiple(t *testing.T) { 588 | db, s3Bucket, s3Endpoint := openDB() 589 | defer db.Close() 590 | 591 | _, err := db.Exec(fmt.Sprintf(`create virtual table v1 using s3db ( 592 | s3_bucket='%s', 593 | s3_endpoint='%s', 594 | s3_prefix='multiple', 595 | columns='a primary key, b')`, 596 | s3Bucket, s3Endpoint)) 597 | require.NoError(t, err) 598 | _, err = db.Exec(fmt.Sprintf(`create virtual table v2 using s3db ( 599 | s3_bucket='%s', 600 | s3_endpoint='%s', 601 | s3_prefix='multiple', 602 | columns='a primary key, b')`, 603 | s3Bucket, s3Endpoint)) 604 | require.NoError(t, err) 605 | 606 | _, err = db.Exec(`insert into v1 values($1,$2)`, 1, 1) 607 | require.NoError(t, err) 608 | _, err = db.Exec(`insert into v2 values($1,$2)`, 1, 1) 609 | require.NoError(t, err) 610 | 611 | _, err = db.Exec(fmt.Sprintf(`create virtual table merged_in_memory using s3db ( 612 | readonly, 613 | s3_bucket='%s', 614 | s3_endpoint='%s', 615 | s3_prefix='multiple', 616 | columns='a primary key, b')`, 617 | s3Bucket, s3Endpoint)) 618 | require.NoError(t, err) 619 | 620 | r, err := db.Query(`select s3db_version("merged_in_memory")`) 621 | require.NoError(t, err) 622 | defer r.Close() 623 | require.True(t, r.Next()) 624 | var current_version_json []byte 625 | require.NoError(t, r.Scan(¤t_version_json)) 626 | var roots []string 627 | require.NoError(t, json.Unmarshal(current_version_json, &roots)) 628 | require.Equal(t, 2, len(roots)) 629 | } 630 | 631 | func TestDeadline(t *testing.T) { 632 | db, s3Bucket, s3Endpoint := openDB() 633 | defer db.Close() 634 | 635 | _, err := db.Exec(fmt.Sprintf(`create virtual table deadline using s3db ( 636 | s3_bucket='%s', 637 | s3_endpoint='%s', 638 | s3_prefix='deadline', 639 | columns='a')`, 640 | s3Bucket, s3Endpoint)) 641 | require.NoError(t, err) 642 | 643 | _, err = db.Exec(`UPDATE s3db_conn SET deadline=CURRENT_TIMESTAMP`) 644 | require.NoError(t, err) 645 | 646 | _, err = db.Exec(`INSERT INTO deadline VALUES($1)`, 1) 647 | require.ErrorContains(t, err, "context deadline exceeded") 648 | } 649 | -------------------------------------------------------------------------------- /test/vtable.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "database/sql" 5 | "encoding/json" 6 | "fmt" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/require" 11 | 12 | "github.com/jrhy/s3db" 13 | v1proto "github.com/jrhy/s3db/proto/v1" 14 | ) 15 | 16 | type DBOpener interface { 17 | OpenDB() (*sql.DB, string, string) 18 | } 19 | 20 | func mustJSON(i interface{}) string { 21 | var b []byte 22 | var err error 23 | b, err = json.Marshal(i) 24 | if err != nil { 25 | panic(err) 26 | } 27 | if len(b) > 60 { 28 | b, err = json.MarshalIndent(i, " ", " ") 29 | if err != nil { 30 | panic(err) 31 | } 32 | } 33 | return string(b) 34 | } 35 | 36 | func mustGetRows(r *sql.Rows) [][]interface{} { 37 | cols, err := r.Columns() 38 | if err != nil { 39 | panic(err) 40 | } 41 | var rows [][]interface{} 42 | for r.Next() { 43 | row := make([]interface{}, len(cols)) 44 | for i := range row { 45 | var q interface{} 46 | row[i] = &q 47 | } 48 | err = r.Scan(row...) 49 | if err != nil { 50 | panic(fmt.Errorf("scan: %w", err)) 51 | } 52 | rows = append(rows, row) 53 | } 54 | return rows 55 | } 56 | 57 | func expand(row []interface{}) []interface{} { 58 | res := []interface{}{} 59 | for i := range row { 60 | for _, v := range row[i].([]interface{}) { 61 | res = append(res, v) 62 | } 63 | } 64 | return res 65 | } 66 | func populateTwoTables(db *sql.DB, s3Bucket, s3Endpoint, tablePrefix, schema string, row ...interface{}) (string, string, error) { 67 | 68 | regTableName := tablePrefix + "_reg" 69 | virtualTableName := tablePrefix + "_virtual" 70 | _, err := db.Exec(fmt.Sprintf(`create table %s(%s)`, 71 | regTableName, schema)) 72 | if err != nil { 73 | return "", "", fmt.Errorf("create %s: %w", regTableName, err) 74 | } 75 | 76 | _, err = db.Exec(fmt.Sprintf(`create virtual table %s using s3db ( 77 | s3_bucket='%s', 78 | s3_endpoint='%s', 79 | s3_prefix='%s', 80 | columns='%s')`, virtualTableName, 81 | s3Bucket, s3Endpoint, virtualTableName, schema)) 82 | if err != nil { 83 | return "", "", fmt.Errorf("create %s: %w", virtualTableName, err) 84 | } 85 | 86 | valuesStr := "values" 87 | for i := range row { 88 | if i > 0 { 89 | valuesStr += "," 90 | } 91 | valuesStr += "(" 92 | for j := range row[i].([]interface{}) { 93 | if j > 0 { 94 | valuesStr += "," 95 | } 96 | valuesStr += "?" 97 | } 98 | valuesStr += ")" 99 | } 100 | _, err = db.Exec(fmt.Sprintf("insert into %s %s", regTableName, valuesStr), expand(row)...) 101 | if err != nil { 102 | return "", "", fmt.Errorf("insert %s: %w", regTableName, err) 103 | } 104 | _, err = db.Exec(fmt.Sprintf("insert into %s %s", virtualTableName, valuesStr), expand(row)...) 105 | if err != nil { 106 | return "", "", fmt.Errorf("insert %s: %w", virtualTableName, err) 107 | } 108 | 109 | return regTableName, virtualTableName, nil 110 | } 111 | 112 | func row(cols ...interface{}) interface{} { 113 | return interface{}(cols) 114 | } 115 | 116 | func requireSelectEquiv(t *testing.T, db *sql.DB, regTable, virtualTable, where, expectedJSON string) { 117 | require.Equal(t, 118 | expectedJSON, 119 | mustQueryToJSON(db, fmt.Sprintf("select * from %s %s", regTable, where))) 120 | require.Equal(t, 121 | expectedJSON, 122 | mustQueryToJSON(db, fmt.Sprintf("select * from %s %s", virtualTable, where))) 123 | } 124 | 125 | func dump(rows *sql.Rows) error { 126 | colNames, err := rows.Columns() 127 | if err != nil { 128 | return fmt.Errorf("columns: %w", err) 129 | } 130 | cols := make([]interface{}, len(colNames)) 131 | for i := range colNames { 132 | var ef interface{} 133 | cols[i] = &ef 134 | } 135 | for rows.Next() { 136 | err = rows.Scan(cols...) 137 | if err != nil { 138 | return err 139 | } 140 | s := "ROW: " 141 | for i := range cols { 142 | s += fmt.Sprintf("%+v ", *cols[i].(*interface{})) 143 | } 144 | fmt.Println(s) 145 | } 146 | return nil 147 | } 148 | 149 | func mustQueryToJSON(db *sql.DB, query string) string { 150 | rows, err := db.Query(query) 151 | if err != nil { 152 | panic(fmt.Errorf("%s: %w", query, err)) 153 | } 154 | defer rows.Close() 155 | return mustJSON(mustGetRows(rows)) 156 | } 157 | 158 | /* 159 | // possible discrepancy to sqlite / perhaps bug in go-sqlite3 160 | func TODOTestEmptyText(t *testing.T) { 161 | db, s3Bucket, s3Endpoint := getDBBucket() 162 | _, err := db.Exec(fmt.Sprintf(`create virtual table emptytextval using s3db ( 163 | s3_bucket='%s', 164 | s3_endpoint='%s', 165 | s3_prefix='emptytextval', 166 | columns='a')`, 167 | s3Bucket, s3Endpoint)) 168 | require.NoError(t, err) 169 | _, err = db.Exec(`insert into emptytextval values ('');`) 170 | require.NoError(t, err) 171 | require.Equal(t, 172 | `[["text",""]]`, 173 | mustQueryToJSON(db, `select typeof(a), a from emptytextval`)) 174 | } 175 | 176 | // possible discrepancy to sqlite / perhaps bug in go-sqlite3 177 | func TODOTestEmptyBlob(t *testing.T) { 178 | db, s3Bucket, s3Endpoint := getDBBucket() 179 | _, err := db.Exec(fmt.Sprintf(`create virtual table emptyblobval using s3db ( 180 | s3_bucket='%s', 181 | s3_endpoint='%s', 182 | s3_prefix='emptyblobval', 183 | columns='a')`, 184 | s3Bucket, s3Endpoint)) 185 | require.NoError(t, err) 186 | _, err = db.Exec(`insert into emptyblobval values (x'');`) 187 | require.NoError(t, err) 188 | require.Equal(t, 189 | `[["blob",""]]`, 190 | mustQueryToJSON(db, `select typeof(a), a from emptyblobval`)) 191 | } 192 | */ 193 | 194 | func mustParseTime(f, s string) time.Time { 195 | t, err := time.Parse(f, s) 196 | if err != nil { 197 | panic(err) 198 | } 199 | return t 200 | } 201 | 202 | func Test(t *testing.T, opener DBOpener) { 203 | 204 | t.Run("1", func(t *testing.T) { 205 | fmt.Printf("opening...\n") 206 | db, s3Bucket, s3Endpoint := opener.OpenDB() 207 | fmt.Printf("deferring...\n") 208 | defer db.Close() 209 | fmt.Printf("exec'ing...\n") 210 | _, err := db.Exec(fmt.Sprintf(`create virtual table t1 using s3db ( 211 | s3_bucket='%s', 212 | s3_endpoint='%s', 213 | s3_prefix='t1', 214 | columns='a primary key, b')`, 215 | s3Bucket, s3Endpoint)) 216 | require.NoError(t, err) 217 | _, err = db.Exec("delete from t1;") 218 | require.NoError(t, err) 219 | _, err = db.Exec("insert into t1 values ('v1','v1b'),('v2','v2b'),('v3','v3b');") 220 | require.NoError(t, err) 221 | //_, err = db.Exec("update t1 set a='v1c' where b='v1b';") 222 | //require.NoError(t, err) 223 | _, err = db.Exec("delete from t1 where b='v2b';") 224 | require.NoError(t, err) 225 | require.Equal(t, 226 | `[["v1","v1b"],`+ 227 | `["v3","v3b"]]`, 228 | mustQueryToJSON(db, "select * from t1;")) 229 | }) 230 | 231 | t.Run("BestIndex", 232 | func(t *testing.T) { 233 | db, s3Bucket, s3Endpoint := opener.OpenDB() 234 | defer db.Close() 235 | regTable, sasqTable, err := populateTwoTables(db, s3Bucket, s3Endpoint, 236 | "index", "a primary key", 237 | row(1), 238 | row(3), 239 | row(2), 240 | ) 241 | require.NoError(t, err) 242 | t.Run("s3dbKeySorted", func(t *testing.T) { 243 | require.Equal(t, 244 | "[[1],[2],[3]]", 245 | mustQueryToJSON(db, fmt.Sprintf(`select a from %s`, sasqTable))) 246 | }) 247 | sqliteEquiv := func(where, expectedJSON string) func(*testing.T) { 248 | return func(t *testing.T) { 249 | requireSelectEquiv(t, db, regTable, sasqTable, 250 | where, expectedJSON) 251 | } 252 | } 253 | t.Run("gt_asc", sqliteEquiv("where a>2", "[[3]]")) 254 | t.Run("ge_asc", sqliteEquiv("where a>=2", "[[2],[3]]")) 255 | t.Run("eq_asc", sqliteEquiv("where a=2", "[[2]]")) 256 | t.Run("le_asc", sqliteEquiv("where a<=2", "[[1],[2]]")) 257 | t.Run("lt_asc", sqliteEquiv("where a<2", "[[1]]")) 258 | 259 | t.Run("gt_desc", sqliteEquiv("where a>2 order by 1 desc", "[[3]]")) 260 | t.Run("ge_desc", sqliteEquiv("where a>=2 order by 1 desc", "[[3],[2]]")) 261 | t.Run("eq_desc", sqliteEquiv("where a=2 order by 1 desc", "[[2]]")) 262 | t.Run("le_desc", sqliteEquiv("where a<=2 order by 1 desc", "[[2],[1]]")) 263 | t.Run("lt_desc", sqliteEquiv("where a<2 order by 1 desc", "[[1]]")) 264 | 265 | t.Run("and_asc", sqliteEquiv("where a>1 and a<3", "[[2]]")) 266 | 267 | t.Run("in_asc", sqliteEquiv("where a in (1,3)", "[[1],[3]]")) 268 | t.Run("in_desc", sqliteEquiv("where a in (1,3) order by 1 desc", "[[3],[1]]")) 269 | }) 270 | 271 | t.Run("SortOrder", 272 | 273 | func(t *testing.T) { 274 | db, s3Bucket, s3Endpoint := opener.OpenDB() 275 | defer db.Close() 276 | _, err := db.Exec(fmt.Sprintf(`create virtual table sortfun using s3db ( 277 | s3_bucket='%s', 278 | s3_endpoint='%s', 279 | s3_prefix='sortfun', 280 | columns='a primary key')`, 281 | s3Bucket, s3Endpoint)) 282 | require.NoError(t, err) 283 | _, err = db.Exec(`insert into sortfun values (?), (?), (?), (?)`, 284 | []byte("blob"), 285 | "text", 286 | 3.14, 287 | 3, 288 | ) 289 | require.NoError(t, err) 290 | require.Equal(t, 291 | `[["integer"],["real"],["text"],["blob"]]`, 292 | mustQueryToJSON(db, `select typeof(a) from sortfun`)) 293 | 294 | _, err = db.Exec(`create table sortfun_native(a primary key) without rowid`) 295 | require.NoError(t, err) 296 | _, err = db.Exec(`insert into sortfun_native values (?), (?), (?), (?)`, 297 | []byte("blob"), 298 | "text", 299 | 3.14, 300 | 3, 301 | ) 302 | require.NoError(t, err) 303 | require.Equal(t, 304 | `[["integer"],["real"],["text"],["blob"]]`, 305 | mustQueryToJSON(db, `select typeof(a) from sortfun_native`)) 306 | }) 307 | 308 | t.Run("NullPrimaryKey", 309 | func(t *testing.T) { 310 | db, s3Bucket, s3Endpoint := opener.OpenDB() 311 | defer db.Close() 312 | _, err := db.Exec(fmt.Sprintf(`create virtual table nullkey using s3db ( 313 | s3_bucket='%s', 314 | s3_endpoint='%s', 315 | s3_prefix='nullkey', 316 | columns='a primary key')`, 317 | s3Bucket, s3Endpoint)) 318 | require.NoError(t, err) 319 | _, err = db.Exec(`insert into nullkey values (null);`) 320 | require.Error(t, err, "constraint failed") 321 | }) 322 | 323 | t.Run("NullValue", 324 | func(t *testing.T) { 325 | db, s3Bucket, s3Endpoint := opener.OpenDB() 326 | _, err := db.Exec(fmt.Sprintf(`create virtual table nullval using s3db ( 327 | s3_bucket='%s', 328 | s3_endpoint='%s', 329 | s3_prefix='nullval', 330 | columns='a')`, 331 | s3Bucket, s3Endpoint)) 332 | require.NoError(t, err) 333 | _, err = db.Exec(`insert into nullval values (null);`) 334 | require.NoError(t, err) 335 | require.Equal(t, 336 | `[["null",null]]`, 337 | mustQueryToJSON(db, `select typeof(a), a from nullval`)) 338 | }) 339 | 340 | t.Run("ZeroInt", func(t *testing.T) { 341 | db, s3Bucket, s3Endpoint := opener.OpenDB() 342 | _, err := db.Exec(fmt.Sprintf(`create virtual table zerointval using s3db ( 343 | s3_bucket='%s', 344 | s3_endpoint='%s', 345 | s3_prefix='zerointval', 346 | columns='a')`, 347 | s3Bucket, s3Endpoint)) 348 | require.NoError(t, err) 349 | _, err = db.Exec(`insert into zerointval values (0);`) 350 | require.NoError(t, err) 351 | require.Equal(t, 352 | `[["integer",0]]`, 353 | mustQueryToJSON(db, `select typeof(a), a from zerointval`)) 354 | }) 355 | 356 | t.Run("ZeroReal", func(t *testing.T) { 357 | db, s3Bucket, s3Endpoint := opener.OpenDB() 358 | _, err := db.Exec(fmt.Sprintf(`create virtual table zerorealval using s3db ( 359 | s3_bucket='%s', 360 | s3_endpoint='%s', 361 | s3_prefix='zerorealval', 362 | columns='a')`, 363 | s3Bucket, s3Endpoint)) 364 | require.NoError(t, err) 365 | _, err = db.Exec(`insert into zerorealval values (0.0);`) 366 | require.NoError(t, err) 367 | require.Equal(t, 368 | `[["real",0]]`, 369 | mustQueryToJSON(db, `select typeof(a), a from zerorealval`)) 370 | }) 371 | 372 | t.Run("InsertConflict", func(t *testing.T) { 373 | db, s3Bucket, s3Endpoint := opener.OpenDB() 374 | defer db.Close() 375 | 376 | openTableWithWriteTime := func(t *testing.T, name, tm string) { 377 | mustParseTime(s3db.SQLiteTimeFormat, tm) 378 | stmt := fmt.Sprintf(`create virtual table "%s" using s3db ( 379 | s3_bucket='%s', 380 | s3_endpoint='%s', 381 | s3_prefix='%s', 382 | columns='a primary key, b', 383 | write_time='%s')`, 384 | name, s3Bucket, s3Endpoint, t.Name(), tm) 385 | _, err := db.Exec(stmt) 386 | require.NoError(t, err) 387 | } 388 | 389 | for i, openerWithLatestWriteTime := range []func(*testing.T) string{ 390 | func(t *testing.T) (expectedWinner string) { 391 | openTableWithWriteTime(t, t.Name()+"1", "2006-01-01 00:00:00") 392 | openTableWithWriteTime(t, t.Name()+"2", "2007-01-01 00:00:00") 393 | return "two" 394 | }, 395 | func(t *testing.T) (expectedWinner string) { 396 | openTableWithWriteTime(t, t.Name()+"2", "2006-01-01 00:00:00") 397 | openTableWithWriteTime(t, t.Name()+"1", "2007-01-01 00:00:00") 398 | return "one" 399 | }, 400 | } { 401 | 402 | t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { 403 | expectedWinner := openerWithLatestWriteTime(t) 404 | 405 | _, err := db.Exec(fmt.Sprintf(`insert into "%s" values('row','one')`, t.Name()+"1")) 406 | require.NoError(t, err) 407 | _, err = db.Exec(fmt.Sprintf(`insert into "%s" values('row','two')`, t.Name()+"2")) 408 | require.NoError(t, err) 409 | 410 | openTableWithWriteTime(t, t.Name()+"read", "2008-01-01 00:00:00") 411 | query := fmt.Sprintf(`select * from "%s"`, t.Name()+"read") 412 | require.Equal(t, 413 | fmt.Sprintf(`[["row","%s"]]`, expectedWinner), 414 | mustQueryToJSON(db, query)) 415 | }) 416 | } 417 | }) 418 | 419 | t.Run("MergeValues", 420 | func(t *testing.T) { 421 | t1 := mustParseTime(s3db.SQLiteTimeFormat, "2006-01-01 00:00:00") 422 | t2 := mustParseTime(s3db.SQLiteTimeFormat, "2007-01-01 00:00:00") 423 | v1 := &v1proto.Row{ 424 | ColumnValues: map[string]*v1proto.ColumnValue{"b": s3db.ToColumnValue("one")}, 425 | } 426 | v2 := &v1proto.Row{ 427 | ColumnValues: map[string]*v1proto.ColumnValue{"b": s3db.ToColumnValue("two")}, 428 | } 429 | res := s3db.MergeRows(nil, t1, v1, t2, v2, t2) 430 | require.Equal(t, time.Duration(0), res.DeleteUpdateOffset.AsDuration()) 431 | require.Equal(t, time.Duration(0), res.ColumnValues["b"].UpdateOffset.AsDuration()) 432 | res = s3db.MergeRows(nil, t2, v2, t1, v1, t1) 433 | require.Equal(t, t2.Sub(t1), res.DeleteUpdateOffset.AsDuration()) 434 | require.Equal(t, t2.Sub(t1), res.ColumnValues["b"].UpdateOffset.AsDuration()) 435 | res = s3db.MergeRows(nil, t2, v2, t1, v1, t2) 436 | require.Equal(t, time.Duration(0), res.DeleteUpdateOffset.AsDuration()) 437 | require.Equal(t, time.Duration(0), res.ColumnValues["b"].UpdateOffset.AsDuration()) 438 | res = s3db.MergeRows(nil, t1, v1, t2, v2, t1) 439 | require.Equal(t, t2.Sub(t1), res.DeleteUpdateOffset.AsDuration()) 440 | require.Equal(t, t2.Sub(t1), res.ColumnValues["b"].UpdateOffset.AsDuration()) 441 | }) 442 | 443 | t.Run("Vacuum", 444 | func(t *testing.T) { 445 | db, s3Bucket, s3Endpoint := opener.OpenDB() 446 | defer db.Close() 447 | 448 | openTableWithWriteTime := func(t *testing.T, name, tm string) { 449 | mustParseTime(s3db.SQLiteTimeFormat, tm) 450 | stmt := fmt.Sprintf(`create virtual table "%s" using s3db ( 451 | s3_bucket='%s', 452 | s3_endpoint='%s', 453 | s3_prefix='%s', 454 | columns='a primary key, b', 455 | write_time='%s')`, 456 | name, s3Bucket, s3Endpoint, t.Name(), tm) 457 | _, err := db.Exec(stmt) 458 | require.NoError(t, err) 459 | } 460 | 461 | openTableWithWriteTime(t, "v1", "2006-01-01 00:00:00") 462 | _, err := db.Exec(`insert into v1 values(2006,0)`) 463 | require.NoError(t, err) 464 | _, err = db.Exec(`delete from v1`) 465 | require.NoError(t, err) 466 | require.Equal(t, `null`, mustQueryToJSON(db, `select * from v1`)) 467 | 468 | openTableWithWriteTime(t, "v2", "2007-01-01 00:00:00") 469 | _, err = db.Exec(`insert into v2 values(2007,0)`) 470 | require.NoError(t, err) 471 | require.Equal(t, `[[2007,0]]`, mustQueryToJSON(db, `select * from v2`)) 472 | 473 | openTableWithWriteTime(t, "vacuumv1", "2008-01-01 00:00:00") 474 | require.Equal(t, 475 | `[[null]]`, 476 | mustQueryToJSON(db, "select * from s3db_vacuum('vacuumv1','2007-01-01 00:00:00');")) 477 | openTableWithWriteTime(t, "readv2", "2009-01-01 00:00:00") 478 | require.Equal(t, `[[2007,0]]`, mustQueryToJSON(db, `select * from readv2`)) 479 | }) 480 | 481 | t.Run("NoPrefix", 482 | func(t *testing.T) { 483 | db, s3Bucket, s3Endpoint := opener.OpenDB() 484 | defer db.Close() 485 | 486 | _, err := db.Exec(fmt.Sprintf(`create virtual table v1noprefix using s3db ( 487 | s3_bucket='%s', 488 | s3_endpoint='%s', 489 | columns='a primary key, b')`, 490 | s3Bucket, s3Endpoint)) 491 | require.NoError(t, err) 492 | _, err = db.Exec(fmt.Sprintf(`create virtual table v2noprefix using s3db ( 493 | s3_bucket='%s', 494 | s3_endpoint='%s', 495 | columns='a primary key, b')`, 496 | s3Bucket, s3Endpoint)) 497 | require.NoError(t, err) 498 | 499 | _, err = db.Exec(`insert into v1noprefix values($1,$2)`, 1, 1) 500 | require.NoError(t, err) 501 | _, err = db.Exec(`insert into v2noprefix values($1,$2)`, 2, 2) 502 | require.NoError(t, err) 503 | 504 | _, err = db.Exec(fmt.Sprintf(`create virtual table noprefix using s3db ( 505 | readonly, 506 | s3_bucket='%s', 507 | s3_endpoint='%s', 508 | columns='a primary key, b')`, 509 | s3Bucket, s3Endpoint)) 510 | require.NoError(t, err) 511 | 512 | require.Equal(t, 513 | `[[1,1],[2,2]]`, 514 | mustQueryToJSON(db, "select * from noprefix;")) 515 | }) 516 | 517 | } 518 | -------------------------------------------------------------------------------- /vtable_common_test.go: -------------------------------------------------------------------------------- 1 | package s3db 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | v1proto "github.com/jrhy/s3db/proto/v1" 8 | "github.com/stretchr/testify/assert" 9 | "github.com/stretchr/testify/require" 10 | "google.golang.org/protobuf/types/known/durationpb" 11 | ) 12 | 13 | func TestMergeRows_LastDeleteWins(t *testing.T) { 14 | tm := time.Now() 15 | res := MergeRows(nil, 16 | tm.Add(time.Duration(-1000*time.Second)), 17 | &v1proto.Row{ 18 | Deleted: true, 19 | DeleteUpdateOffset: durationpb.New(time.Duration(1 * time.Hour)), 20 | }, 21 | tm.Add(time.Duration(-2000*time.Second)), 22 | &v1proto.Row{ 23 | Deleted: true, 24 | DeleteUpdateOffset: durationpb.New(time.Duration(2 * time.Hour)), 25 | }, 26 | tm) 27 | assert.True(t, res.Deleted) 28 | assert.Equal(t, 29 | tm.Add(time.Duration(2*time.Hour)).Add(time.Duration(-2000*time.Second)), 30 | tm.Add(res.DeleteUpdateOffset.AsDuration())) 31 | } 32 | 33 | func TestMergeRows_LastWriteWins(t *testing.T) { 34 | tm := time.Now() 35 | res := MergeRows(nil, 36 | tm.Add(time.Duration(-1000*time.Second)), 37 | &v1proto.Row{ 38 | ColumnValues: map[string]*v1proto.ColumnValue{ 39 | "col": { 40 | Value: toSQLiteValue("hi"), 41 | UpdateOffset: durationpb.New(time.Duration(time.Hour)), 42 | }, 43 | }, 44 | }, 45 | tm.Add(time.Duration(-2000*time.Second)), 46 | &v1proto.Row{ 47 | ColumnValues: map[string]*v1proto.ColumnValue{ 48 | "col": { 49 | Value: toSQLiteValue("there"), 50 | UpdateOffset: durationpb.New(time.Duration(2 * time.Hour)), 51 | }, 52 | }, 53 | }, 54 | tm) 55 | require.Equal(t, 1, len(res.ColumnValues)) 56 | require.Equal(t, 57 | toSQLiteValue("there"), 58 | res.ColumnValues["col"].Value) 59 | require.Equal(t, 60 | tm.Add(time.Duration(2*time.Hour)).Add(time.Duration(-2000*time.Second)), 61 | tm.Add(res.ColumnValues["col"].UpdateOffset.AsDuration())) 62 | } 63 | 64 | func TestToSQLiteValue(t *testing.T) { 65 | require.Equal(t, 66 | &v1proto.SQLiteValue{Type: v1proto.Type_TEXT, Text: "hi"}, 67 | toSQLiteValue("hi")) 68 | require.Equal(t, 69 | &v1proto.SQLiteValue{Type: v1proto.Type_TEXT}, 70 | toSQLiteValue("")) 71 | require.Equal(t, 72 | &v1proto.SQLiteValue{Type: v1proto.Type_BLOB, Blob: []byte{}}, 73 | toSQLiteValue([]byte{})) 74 | } 75 | 76 | func TestMergeRows_UnifyColumns(t *testing.T) { 77 | tm := time.Now() 78 | res := MergeRows(nil, 79 | tm.Add(time.Duration(-1000*time.Second)), 80 | &v1proto.Row{ 81 | ColumnValues: map[string]*v1proto.ColumnValue{ 82 | "col0": { 83 | Value: toSQLiteValue("hi"), 84 | UpdateOffset: durationpb.New(time.Duration(time.Hour)), 85 | }, 86 | }, 87 | }, 88 | tm.Add(time.Duration(-2000*time.Second)), 89 | &v1proto.Row{ 90 | ColumnValues: map[string]*v1proto.ColumnValue{ 91 | "col1": { 92 | Value: toSQLiteValue("there"), 93 | UpdateOffset: durationpb.New(time.Duration(2 * time.Hour)), 94 | }, 95 | }, 96 | }, 97 | tm) 98 | require.Equal(t, 2, len(res.ColumnValues)) 99 | require.Equal(t, toSQLiteValue("hi"), 100 | res.ColumnValues["col0"].Value) 101 | require.Equal(t, toSQLiteValue("there"), 102 | res.ColumnValues["col1"].Value) 103 | require.Equal(t, 104 | tm.Add(time.Duration(-1000*time.Second)).Add(time.Duration(time.Hour)), 105 | UpdateTime(tm, res.ColumnValues["col0"]), 106 | ) 107 | require.Equal(t, 108 | tm.Add(time.Duration(-2000*time.Second)).Add(time.Duration(2*time.Hour)), 109 | UpdateTime(tm, res.ColumnValues["col1"]), 110 | ) 111 | } 112 | 113 | func TestMergeRows_InsertAfterDelete(t *testing.T) { 114 | tm := time.Now() 115 | res := MergeRows(nil, 116 | tm.Add(time.Duration(-1000*time.Second)), 117 | &v1proto.Row{ 118 | Deleted: true, 119 | DeleteUpdateOffset: durationpb.New(time.Duration(time.Hour)), 120 | ColumnValues: map[string]*v1proto.ColumnValue{ 121 | "getnulledonnextinsert": { 122 | Value: toSQLiteValue("hi"), 123 | UpdateOffset: durationpb.New(time.Duration(time.Hour)), 124 | }, 125 | }, 126 | }, 127 | tm.Add(time.Duration(-2000*time.Second)), 128 | &v1proto.Row{ 129 | DeleteUpdateOffset: durationpb.New(time.Duration(2 * time.Hour)), 130 | ColumnValues: map[string]*v1proto.ColumnValue{ 131 | "version2": { 132 | Value: toSQLiteValue("there"), 133 | UpdateOffset: durationpb.New(time.Duration(2 * time.Hour)), 134 | }, 135 | }, 136 | }, 137 | tm) 138 | require.Equal(t, 1, len(res.ColumnValues)) 139 | require.Nil(t, res.ColumnValues["getnulledonnextinsert"]) 140 | require.Equal(t, toSQLiteValue("there"), 141 | res.ColumnValues["version2"].Value) 142 | require.Equal(t, 143 | tm.Add(time.Duration(-2000*time.Second)).Add(time.Duration(2*time.Hour)), 144 | UpdateTime(tm, res.ColumnValues["version2"]), 145 | ) 146 | } 147 | -------------------------------------------------------------------------------- /writetime/context.go: -------------------------------------------------------------------------------- 1 | package writetime 2 | 3 | import ( 4 | "context" 5 | "time" 6 | ) 7 | 8 | type keyType interface{} 9 | 10 | var ( 11 | i int 12 | key keyType = &i 13 | ) 14 | 15 | func FromContext(ctx context.Context) (time.Time, bool) { 16 | t, ok := ctx.Value(key).(time.Time) 17 | return t, ok 18 | } 19 | 20 | func NewContext(ctx context.Context, t time.Time) context.Context { 21 | return context.WithValue(ctx, key, t) 22 | } 23 | --------------------------------------------------------------------------------