├── .env ├── .github └── workflows │ ├── build-image.yml │ ├── build1.20.yml │ └── build1.21.yml ├── .gitignore ├── Dockerfile ├── README.md ├── arch.png ├── cmd ├── cli │ └── cli.go ├── http │ ├── client │ │ ├── client.go │ │ └── db.go │ ├── handler │ │ ├── create.go │ │ ├── execute.go │ │ ├── tables.go │ │ ├── tigger.go │ │ └── utils.go │ └── main.go └── server │ ├── db.go │ ├── db_provider.go │ ├── main.go │ └── readme.md ├── datasets ├── ai.md └── github.md ├── go.mod ├── go.sum ├── internal ├── prom │ └── prometheus.go ├── s3 │ ├── default.go │ ├── s3.go │ └── s3_test.go ├── sqlite3vfs │ ├── defaultvfsv1.go │ ├── error.go │ ├── file.go │ ├── options.go │ ├── sqlite3-binding.h │ ├── sqlite3ext.h │ ├── sqlite3vfs.c │ ├── sqlite3vfs.go │ ├── sqlite3vfs.h │ ├── sqlite3vfs_loadable_ext.go │ ├── sqlite3vfs_normal.go │ └── sqlite3vfscgo.go ├── sqlparser │ └── sqlparser.go ├── utils │ ├── const.go │ ├── environ.go │ ├── roundtrip.go │ ├── secretkey.go │ ├── sum.go │ └── sum_test.go └── vfsextend │ ├── diskcache.go │ ├── diskcache_test.go │ ├── httprange.go │ ├── httprange_test.go │ ├── nopcache.go │ ├── readme.md │ ├── sqlite3vfs_http.go │ ├── sqlite3vfs_http_test.go │ ├── sqlite3vfs_tmpvfs.go │ └── sqlite3vfs_tmpvfs_test.go ├── tests ├── createdb_test.go ├── http_create.http ├── http_exec.http ├── http_query.http └── http_tigger.http ├── thirdparty └── README.md └── ui ├── README.md ├── package-lock.json ├── package.json ├── src ├── app.html └── routes │ ├── +layout.js │ ├── +layout.svelte │ ├── +page.svelte │ └── styles.css ├── static └── favicon.png ├── svelte.config.js ├── vite.config.js └── yarn.lock /.env: -------------------------------------------------------------------------------- 1 | S3Endpoint= 2 | S3Region= 3 | S3AccessKey= 4 | S3SecretKey= 5 | S3Bucket= 6 | LESSDB_SLATKEY= 7 | LESSDB_LISTEN= 8 | LESSDB_TIGGERURL=http://localhost:9000/api/v1/tigger/s3events -------------------------------------------------------------------------------- /.github/workflows/build-image.yml: -------------------------------------------------------------------------------- 1 | name: BuildImage 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | 8 | jobs: 9 | 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v3 14 | 15 | - name: Set up Go 16 | uses: actions/setup-go@v3 17 | with: 18 | go-version: '1.20' 19 | 20 | - name: Declare some variables 21 | id: vars 22 | shell: bash 23 | run: | 24 | echo "::set-output name=sha_short::$(git rev-parse --short HEAD)" 25 | echo "::set-output name=time::$(date +'%Y%m%d%H')" 26 | echo "::set-output name=tag::${GITHUB_REF#refs/*/}" 27 | 28 | - name: Build 29 | run: go mod tidy && cd cmd/http && go build . 30 | 31 | - name: Test 32 | run: go test ./... 33 | 34 | - name: Set up Docker Buildx 35 | uses: docker/setup-buildx-action@v1 36 | 37 | - name: Login to Registry 38 | uses: docker/login-action@v1 39 | with: 40 | registry: ${{ secrets.REGISTRY_URL }} 41 | username: ${{ secrets.REGISTRY_USERNAME }} 42 | password: ${{ secrets.REGISTRY_TOKEN }} 43 | 44 | - name: Build and push 45 | uses: docker/build-push-action@v2 46 | with: 47 | context: . 48 | file: ./Dockerfile 49 | push: true 50 | platforms: linux/amd64 51 | tags: ccr.ccs.tencentyun.com/serverlessv1/lessdb:lessdb-${{ steps.vars.outputs.tag }}-${{ steps.vars.outputs.time }} -------------------------------------------------------------------------------- /.github/workflows/build1.20.yml: -------------------------------------------------------------------------------- 1 | name: Go1.20 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | jobs: 10 | 11 | build: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v3 15 | 16 | - name: Install sqlite3 17 | run: sudo apt-get install sqlite3 libsqlite3-dev 18 | 19 | - name: Set up Go 20 | uses: actions/setup-go@v3 21 | with: 22 | go-version: '1.20' 23 | 24 | - name: Build 25 | run: go mod tidy && cd cmd/http && go build . 26 | 27 | - name: Test 28 | run: go test ./... -------------------------------------------------------------------------------- /.github/workflows/build1.21.yml: -------------------------------------------------------------------------------- 1 | name: Go1.21 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | jobs: 10 | 11 | build: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v3 15 | 16 | - name: Install sqlite3 17 | run: sudo apt-get install sqlite3 libsqlite3-dev 18 | 19 | - name: Set up Go 20 | uses: actions/setup-go@v3 21 | with: 22 | go-version: '1.21' 23 | 24 | - name: Build 25 | run: go mod tidy && cd cmd/http && go build . 26 | 27 | - name: Test 28 | run: go test ./... -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .env.* 2 | .DS_Store 3 | 4 | **/node_modules 5 | **/.svelte-kit 6 | **/sqlite3vfs_httpdir_* 7 | **/sqlite3vfs_tmpdir_* 8 | **/httprange-test* 9 | **/diskcache-test* 10 | cmd/http/http 11 | cmd/cli/cli 12 | cmd/server/server 13 | **/*.db 14 | **/*.lessdb 15 | **/vfscache_* 16 | **/build -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.20 2 | COPY ./ /app 3 | RUN chmod +x -R * 4 | WORKDIR /app 5 | ENTRYPOINT ["./cmd/http/http"] -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LessDB 2 | 3 | [![build](https://github.com/linkxzhou/LessDB/actions/workflows/build1.20.yml/badge.svg)](https://github.com/linkxzhou/LessDB/actions/workflows/build1.20.yml) 4 | [![build](https://github.com/linkxzhou/LessDB/actions/workflows/build1.21.yml/badge.svg)](https://github.com/linkxzhou/LessDB/actions/workflows/build1.21.yml) 5 | 6 | LessDB a serverless SQLite service designed to simplify the use of cloud-based MySQL, PostgreSQL, and other databases. The project is still in the planning stage, and the planned features include. 7 | 8 | - [x] HTTP JSON API 9 | - [x] Optimizing Security 10 | - [ ] Sqlite Cli 11 | - [x] S3 Object Storage 12 | - [ ] Asynchronously Execute SQL for Eventual Consistency 13 | - [x] Local Cache File 14 | - [ ] Compatible with MySQL Protocol 15 | - [ ] Support Uploading Various Data Formats and Converting them into SQLite Files 16 | - [ ] Prometheus Metrics Data 17 | - [ ] Optimizing WebUI 18 | 19 | ## Installation 20 | 21 | ``` 22 | go get github.com/linkxzhou/LessDB 23 | ``` 24 | OR 25 | ``` 26 | git clone git@github.com:linkxzhou/LessDB.git 27 | cd LessDB/cmd/http 28 | go build . 29 | ``` 30 | 31 | **Startup:** 32 | ``` 33 | cd LessDB/cmd/http 34 | ./http 35 | ``` 36 | 37 | 38 | ## Architecture 39 | ![avatar](./arch.png) 40 | 41 | ## HTTP JSON API 42 | 43 | **(1)create databases** 44 | ``` 45 | POST http://localhost:18090/api/v1/createdb 46 | Content-Type: application/json 47 | 48 | { 49 | "list": [ 50 | { 51 | "cmd": "CREATE TABLE IF NOT EXISTS foo1 (id text NOT NULL PRIMARY KEY,title text)", 52 | "args": [] 53 | }, 54 | { 55 | "cmd": "CREATE TABLE IF NOT EXISTS foo2 (id text NOT NULL PRIMARY KEY,title text)", 56 | "args": [] 57 | }, 58 | { 59 | "cmd": "INSERT INTO foo1 (id, title) values (?, ?)", 60 | "args": [415, "romantic-swell"] 61 | }, 62 | { 63 | "cmd": "INSERT INTO foo2 (id, title) values (?, ?)", 64 | "args": [415, "romantic-swell"] 65 | } 66 | ] 67 | } 68 | ``` 69 | 70 | **(2)query tables** 71 | ``` 72 | GET http://localhost:18090/api/v1/{readkey}/tables?limit=10&offset=0 73 | Content-Type: application/json 74 | ``` 75 | 76 | **(3)query rows** 77 | ``` 78 | GET http://localhost:18090/api/v1/{readkey}/tables/foo1/rows?limit=100&offset=0 79 | Content-Type: application/json 80 | ``` 81 | 82 | **(4)execute sql: asynchronously execute SQL** 83 | ``` 84 | POST http://localhost:18090/api/v1/{readkey}/execute 85 | Content-Type: application/json 86 | 87 | { 88 | "list": [ 89 | { 90 | "cmd": "CREATE TABLE IF NOT EXISTS foo1 (id text NOT NULL PRIMARY KEY,title text)", 91 | "args": [] 92 | }, 93 | { 94 | "cmd": "CREATE TABLE IF NOT EXISTS foo2 (id text NOT NULL PRIMARY KEY,title text)", 95 | "args": [] 96 | }, 97 | { 98 | "cmd": "INSERT INTO foo1 (id, title) values (?, ?)", 99 | "args": [1, "unique-title-1"] 100 | }, 101 | { 102 | "cmd": "INSERT INTO foo1 (id, title) values (?, ?)", 103 | "args": [2, "unique-title-2"] 104 | } 105 | ], 106 | "writekey": "{writekey}" 107 | } 108 | ``` 109 | 110 | **(5)execute sql for query data** 111 | ``` 112 | POST http://localhost:18090/api/v1/{readkey}/query 113 | Content-Type: application/json 114 | 115 | { 116 | "list": [ 117 | { 118 | "cmd": "select * from foo1" 119 | }, 120 | { 121 | "cmd": "select * from foo2" 122 | } 123 | ] 124 | } 125 | ``` 126 | 127 | **(6)query asynchronously log** 128 | ``` 129 | POST http://localhost:18090/api/v1/{readkey}/executelog 130 | Content-Type: application/json 131 | 132 | { 133 | "seqid": "{seqid}" 134 | } 135 | ``` 136 | 137 | ## Goal 138 | 139 | Serve small and medium-sized developers, reducing the cost of cloud services. 140 | 141 | ## Quotation 142 | [1] https://github.com/rqlite/rqlite 143 | [2] https://github.com/psanford/sqlite3vfs 144 | [3] https://github.com/mattn/go-sqlite3 145 | [4] https://github.com/kahing/goofys 146 | [5] https://github.com/turbobytes/infreqdb 147 | [6] https://github.com/rclone/rclone 148 | [7] https://ieeexplore.ieee.org/abstract/document/9101371 149 | [8] https://dl.acm.org/doi/abs/10.1145/1376616.1376645 150 | [9] https://github.com/nalgeon/redka -------------------------------------------------------------------------------- /arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linkxzhou/LessDB/eab3e2fd53e8d4d5758cbf46a43084bb98958a8e/arch.png -------------------------------------------------------------------------------- /cmd/cli/cli.go: -------------------------------------------------------------------------------- 1 | package main 2 | -------------------------------------------------------------------------------- /cmd/http/client/client.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "github.com/linkxzhou/LessDB/internal/s3" 5 | "github.com/linkxzhou/LessDB/internal/sqlite3vfs" 6 | "github.com/linkxzhou/LessDB/internal/vfsextend" 7 | 8 | "fmt" 9 | "os" 10 | ) 11 | 12 | var s3client *s3.S3Client 13 | 14 | func init() { 15 | s3client = s3.DefaultS3Client() 16 | if s3client == nil { 17 | panic("NewS3Client failed!") 18 | } 19 | 20 | var vfs = &vfsextend.HttpVFS{ 21 | CacheHandler: vfsextend.NewDiskCache( 22 | func(fileName string) (*os.File, error) { 23 | return os.OpenFile(fmt.Sprintf("vfscache_%v", fileName), 24 | os.O_RDWR|os.O_CREATE, 0644) 25 | }, 26 | vfsextend.DefaultNoCacheSize), 27 | URIHandler: s3.S3URIHandler{Client: s3client}, 28 | } 29 | if err := sqlite3vfs.RegisterVFS("httpvfs", vfs); err != nil { 30 | panic("HttpVFS err: " + err.Error()) 31 | } 32 | } 33 | 34 | func S3() *s3.S3Client { 35 | return s3client 36 | } 37 | -------------------------------------------------------------------------------- /cmd/http/client/db.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | 7 | "github.com/labstack/echo/v4" 8 | "github.com/linkxzhou/LessDB/internal/utils" 9 | 10 | "database/sql" 11 | "strings" 12 | "sync" 13 | 14 | _ "github.com/mattn/go-sqlite3" 15 | ) 16 | 17 | var vfsCache sync.Map 18 | 19 | // GetVFSDB get httpvfs sqlite3 file 20 | func GetVFSDB(dbName string) (*sql.DB, string, error) { 21 | uri, err := s3client.GetFileLink(dbName) 22 | if err != nil { 23 | return nil, uri, err 24 | } 25 | db, err := sql.Open("sqlite3", fmt.Sprintf("%v?vfs=httpvfs&mode=rw", dbName)) 26 | return db, uri, err 27 | } 28 | 29 | // QuerySQLWithHTTPVFS query sql on httpvfs 30 | func QuerySQLWithHTTPVFS(c echo.Context, db *sql.DB, cmd SQLExecuteCommandArgs) (interface{}, 31 | interface{}, interface{}, int, error) { 32 | rows, err := db.Query(cmd.CMD, cmd.Args...) 33 | if err != nil { 34 | c.Logger().Error("Query err: ", err) 35 | return nil, nil, nil, 0, err 36 | } 37 | 38 | var columns []string 39 | var values []interface{} 40 | 41 | cols, err := rows.Columns() 42 | if err != nil { 43 | c.Logger().Error("Columns err: ", err) 44 | return nil, nil, nil, 0, err 45 | } 46 | 47 | for _, column := range cols { 48 | columns = append(columns, column) 49 | } 50 | 51 | types, err := rows.ColumnTypes() 52 | if err != nil { 53 | c.Logger().Error("ColumnTypes err: ", err) 54 | return nil, nil, nil, 0, err 55 | } 56 | xTypes := make([]string, len(types)) 57 | for i := range types { 58 | xTypes[i] = strings.ToLower(types[i].DatabaseTypeName()) 59 | } 60 | 61 | for rows.Next() { 62 | rows.Columns() 63 | columns := make([]*string, len(cols)) 64 | columnPointers := make([]interface{}, len(cols)) 65 | for i := range columns { 66 | columnPointers[i] = &columns[i] 67 | } 68 | 69 | err = rows.Scan(columnPointers...) 70 | if err != nil { 71 | c.Logger().Error("Scan err: ", err) 72 | return columns, nil, xTypes, 0, err 73 | } 74 | 75 | names := make([]string, 0, len(columns)) 76 | for _, col := range columns { 77 | if col == nil { 78 | names = append(names, "NULL") 79 | } else { 80 | names = append(names, *col) 81 | } 82 | } 83 | values = append(values, names) 84 | } 85 | 86 | if err = rows.Close(); err != nil { 87 | c.Logger().Error("Close err: ", err) 88 | return nil, nil, nil, 0, err 89 | } 90 | 91 | return columns, values, xTypes, len(values), nil 92 | } 93 | 94 | // ExecuteSQLWithHTTPVFS execute sql on httpvfs 95 | func ExecuteSQLWithHTTPVFS(c echo.Context, db *sql.DB, cmd SQLExecuteCommandArgs) error { 96 | if cmd.CMD == "" { 97 | return errors.New("invalid sql") 98 | } 99 | 100 | result, err := db.Exec(cmd.CMD, cmd.Args...) 101 | if err != nil { 102 | c.Logger().Error("Execute err: ", err) 103 | return err 104 | } 105 | 106 | lastInsertId, _ := result.LastInsertId() 107 | rowsAffected, _ := result.RowsAffected() 108 | c.Logger().Info("lastInsertId: ", lastInsertId, ", rowsAffected: ", rowsAffected) 109 | return nil 110 | } 111 | 112 | // GetFileDB get local sqlite3 file 113 | func GetFileDB(dbName string) (*sql.DB, error) { 114 | db, err := sql.Open("sqlite3", dbName) 115 | return db, err 116 | } 117 | 118 | type SQLExecuteCommandArgs struct { 119 | CMD string `json:"cmd"` 120 | Args []interface{} `json:"args"` 121 | } 122 | 123 | // ExecuteSQLWithFile execute sql on local sqlite3 124 | func ExecuteSQLWithFile(c echo.Context, db *sql.DB, sqlList []SQLExecuteCommandArgs) error { 125 | for _, sqlv := range sqlList { 126 | c.Logger().Info("CMD: ", sqlv.CMD, ", sqlv.Args: ", sqlv.Args) 127 | _, err := db.Exec(sqlv.CMD, sqlv.Args...) 128 | if err != nil { 129 | c.Logger().Error("Execute err: ", err, ", sqlv: ", sqlv) 130 | return err 131 | } 132 | } 133 | return nil 134 | } 135 | 136 | // Create system table sql template 137 | const systemDBName = "__LESSDBSYSTEM__" 138 | 139 | var systemInitSQLTemplate = fmt.Sprintf(`CREATE TABLE %v ( 140 | name TEXT PRIMARY KEY UNIQUE NOT NULL, 141 | value TEXT NOT NULL, 142 | value_int INTEGER DEFAULT 0, 143 | create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP, 144 | update_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP 145 | ); 146 | INSERT INTO %v (name, value) 147 | VALUES ('__version', '1'), 148 | ('__lessdbversion', ?), 149 | ('__readkey', ?), 150 | ('__writekey', ?); 151 | `, systemDBName, systemDBName) 152 | 153 | // SysTableInit system table init 154 | func SysTableInit(c echo.Context, db *sql.DB, readKey, writeKey string) error { 155 | _, err := db.Exec(systemInitSQLTemplate, utils.VERSION, readKey, writeKey) 156 | return err 157 | } 158 | 159 | // SysTableQuerySQL get system table sql 160 | func SysTableQuerySQL() string { 161 | return fmt.Sprintf(`SELECT name, value, value_int FROM %v where name = ?`, systemDBName) 162 | } 163 | 164 | func SysTableInsertStatus(c echo.Context, db *sql.DB, status int, name, value string) error { 165 | return ExecuteSQLWithFile(c, db, []SQLExecuteCommandArgs{ 166 | SQLExecuteCommandArgs{ 167 | CMD: fmt.Sprintf(`INSERT INTO %v(value_int, value, name) values(?, ?, ?)`, systemDBName), 168 | Args: []interface{}{status, value, name}, 169 | }, 170 | }) 171 | } 172 | 173 | func SysTableUpdateStatus(c echo.Context, db *sql.DB, status int, name, value string) error { 174 | return ExecuteSQLWithFile(c, db, []SQLExecuteCommandArgs{ 175 | SQLExecuteCommandArgs{ 176 | CMD: fmt.Sprintf(`UPDATE %v SET value_int = ?, value = ? where name = ?`, systemDBName), 177 | Args: []interface{}{status, value, name}, 178 | }, 179 | }) 180 | } 181 | -------------------------------------------------------------------------------- /cmd/http/handler/create.go: -------------------------------------------------------------------------------- 1 | package handler 2 | 3 | import ( 4 | "github.com/labstack/echo/v4" 5 | "github.com/linkxzhou/LessDB/cmd/http/client" 6 | "github.com/linkxzhou/LessDB/internal/utils" 7 | 8 | "context" 9 | "io" 10 | "net/http" 11 | "os" 12 | ) 13 | 14 | type AuthResp struct { 15 | ReadKey string `json:"readkey"` 16 | WriteKey string `json:"writekey"` 17 | } 18 | 19 | // UploadDB for upload sqlite3 file and to S3 20 | func UploadDB(c echo.Context) error { 21 | // Source 22 | file, err := c.FormFile("file") 23 | if err != nil { 24 | c.Logger().Error("FormFile err: ", err) 25 | return newBadRequestResp(c, err) 26 | } 27 | srcFile, err := file.Open() 28 | if err != nil { 29 | c.Logger().Error("OpenFile err: ", err) 30 | return newBadRequestResp(c, err) 31 | } 32 | defer srcFile.Close() 33 | 34 | // Destination 35 | readKey, dbName, err := utils.NewRandomName() 36 | if err != nil { 37 | c.Logger().Error("NewRandomName err: ", err) 38 | return newBadRequestResp(c, err) 39 | } 40 | 41 | dstFile, err := os.Create(dbName) 42 | if err != nil { 43 | c.Logger().Error("Failed to create local file err: ", err) 44 | return newBadRequestResp(c, err) 45 | } 46 | defer func() { 47 | dstFile.Close() 48 | // Remove tempfile 49 | os.Remove(dbName) 50 | }() 51 | 52 | if _, err = io.Copy(dstFile, srcFile); err != nil { 53 | c.Logger().Error("Failed to save file locally err: ", err) 54 | return newBadRequestResp(c, err) 55 | } 56 | 57 | db, err := client.GetFileDB(dbName) 58 | if err != nil { 59 | c.Logger().Error("sql.Open err: ", err) 60 | return newBadRequestResp(c, err) 61 | } 62 | defer db.Close() 63 | 64 | writeKey, _, err := utils.NewRandomKey() 65 | if err != nil { 66 | c.Logger().Error("NewRandomKey err: ", err) 67 | return newBadRequestResp(c, err) 68 | } 69 | 70 | err = client.SysTableInit(c, db, readKey, writeKey) 71 | if err != nil { 72 | c.Logger().Error("InitSQL err: ", err) 73 | return newBadRequestResp(c, err) 74 | } 75 | 76 | // Seek to start and upload S3 77 | dstFile.Seek(0, io.SeekStart) 78 | c.Logger().Info("S3Client: ", client.S3().String(dbName)) 79 | err = client.S3().Upload(context.TODO(), dbName, dstFile) 80 | if err != nil { 81 | c.Logger().Error("S3 UploadFile err: ", err) 82 | return newBadRequestResp(c, err) 83 | } 84 | 85 | return c.JSON(http.StatusOK, newOKResp(AuthResp{ 86 | ReadKey: readKey, 87 | WriteKey: writeKey, 88 | })) 89 | } 90 | 91 | type CMDListParams struct { 92 | List []client.SQLExecuteCommandArgs `json:"list"` 93 | } 94 | 95 | // CreateDB for use sql create sqlite3 file 96 | func CreateDB(c echo.Context) error { 97 | ep := new(CMDListParams) 98 | if err := c.Bind(ep); err != nil || ep.List == nil || len(ep.List) < 0 { 99 | return c.String(http.StatusBadRequest, "bad request") 100 | } 101 | 102 | readKey, dbName, err := utils.NewRandomName() 103 | if err != nil { 104 | c.Logger().Error("NewRandomName err: ", err) 105 | return newBadRequestResp(c, err) 106 | } 107 | 108 | db, err := client.GetFileDB(dbName) 109 | if err != nil { 110 | c.Logger().Error("sql.Open err: ", err) 111 | return newBadRequestResp(c, err) 112 | } 113 | defer db.Close() 114 | 115 | writeKey, _, err := utils.NewRandomKey() 116 | if err != nil { 117 | c.Logger().Error("NewRandomKey err: ", err) 118 | return newBadRequestResp(c, err) 119 | } 120 | 121 | err = client.SysTableInit(c, db, readKey, writeKey) 122 | if err != nil { 123 | c.Logger().Error("InitSQL err: ", err) 124 | return newBadRequestResp(c, err) 125 | } 126 | 127 | err = client.ExecuteSQLWithFile(c, db, ep.List) 128 | if err != nil { 129 | c.Logger().Error("ExecuteSQL err: ", err) 130 | return newBadRequestResp(c, err) 131 | } 132 | 133 | dbFile, err := os.Open(dbName) 134 | if err != nil { 135 | c.Logger().Error("OpenFile err: ", err) 136 | return newBadRequestResp(c, err) 137 | } 138 | defer func() { 139 | dbFile.Close() 140 | // Remove tempfile 141 | os.Remove(dbName) 142 | }() 143 | 144 | c.Logger().Info("S3Client: ", client.S3().String(dbName)) 145 | err = client.S3().Upload(context.TODO(), dbName, dbFile) 146 | if err != nil { 147 | c.Logger().Error("S3 UploadFile err: ", err) 148 | return newBadRequestResp(c, err) 149 | } 150 | 151 | return c.JSON(http.StatusOK, newOKResp(AuthResp{ 152 | ReadKey: readKey, 153 | WriteKey: writeKey, 154 | })) 155 | } 156 | -------------------------------------------------------------------------------- /cmd/http/handler/execute.go: -------------------------------------------------------------------------------- 1 | package handler 2 | 3 | import ( 4 | "github.com/linkxzhou/LessDB/cmd/http/client" 5 | "github.com/linkxzhou/LessDB/internal/utils" 6 | 7 | "github.com/labstack/echo/v4" 8 | 9 | "context" 10 | "encoding/json" 11 | "errors" 12 | "fmt" 13 | "net/http" 14 | "strings" 15 | "time" 16 | 17 | _ "github.com/mattn/go-sqlite3" 18 | ) 19 | 20 | type ( 21 | ExecuteDBParams struct { 22 | QueryParams 23 | CMDListParams 24 | WriteKey string `json:"writekey" form:"writekey" query:"writekey" param:"writekey"` 25 | } 26 | 27 | ExecuteLogParams struct { 28 | QueryParams 29 | SeqID string `json:"seqid" form:"seqid" query:"seqid" param:"seqid"` 30 | } 31 | 32 | UploadS3Redolog struct { 33 | DBName string `json:"dbname"` 34 | ReadKey string `json:"readkey"` 35 | WriteKey string `json:"writekey"` 36 | NanoTimestamp int64 `json:"nanotimestamp"` 37 | List []client.SQLExecuteCommandArgs `json:"list"` 38 | } 39 | 40 | ResultRedolog struct { 41 | SeqID string `json:"seqid"` 42 | Message string `json:"message"` 43 | Status int `json:"status"` 44 | } 45 | ) 46 | 47 | const ( 48 | ErrNoAuthWrite = "attempt to write a readonly database" 49 | // Execute sql has status on sqlite3 50 | ExecStatusPending int = iota 51 | ExecStatusOK 52 | ExecStatusFailed 53 | ExecStatusPartialFailed 54 | ) 55 | 56 | var tiggerURL = utils.GetEnviron("LESSDB_TIGGERURL") 57 | 58 | // ExecuteDB for exec sql on sqlite3 file 59 | func ExecuteDB(c echo.Context) error { 60 | edbp := new(ExecuteDBParams) 61 | if err := c.Bind(edbp); err != nil || edbp.List == nil || len(edbp.List) < 0 { 62 | return c.String(http.StatusBadRequest, "bad request") 63 | } 64 | 65 | c.Logger().Info("ExecuteDB edbp: ", edbp) 66 | 67 | // Check writeKey 68 | if _, writeKeyOK := utils.VerifyKey(edbp.WriteKey); !writeKeyOK { 69 | return c.JSON(http.StatusOK, newNoWriteAuthResp()) 70 | } 71 | 72 | readKey := edbp.ReadKey 73 | dbName, authOK := utils.VerifyKey(readKey) 74 | if !authOK || dbName == "" { 75 | return c.JSON(http.StatusOK, newNoAuthResp()) 76 | } 77 | 78 | db, uri, err := client.GetVFSDB(dbName) 79 | if err != nil { 80 | c.Logger().Error("getVFSDB err: ", err) 81 | return newBadRequestResp(c, err) 82 | } 83 | defer db.Close() 84 | 85 | c.Logger().Info("S3 GetFileLink: ", uri, ", dbName: ", dbName) 86 | 87 | var s3key string 88 | var execMessage string 89 | var execStatus int = ExecStatusPending 90 | 91 | // Upload redolog to S3 92 | if tiggerURL == utils.EmptyNil { 93 | var redologs []client.SQLExecuteCommandArgs 94 | for _, cmd := range edbp.List { 95 | if err := client.ExecuteSQLWithHTTPVFS(c, db, cmd); err != nil { 96 | if !strings.Contains(err.Error(), ErrNoAuthWrite) { 97 | c.Logger().Error("ExecuteSQL err: ", err) 98 | return newBadRequestResp(c, err) 99 | } 100 | redologs = append(redologs, cmd) 101 | } 102 | } 103 | 104 | nanoTimestamp := time.Now().UnixNano() 105 | uploadS3Redolog := UploadS3Redolog{ 106 | DBName: dbName, 107 | NanoTimestamp: nanoTimestamp, 108 | ReadKey: readKey, 109 | WriteKey: edbp.WriteKey, 110 | List: redologs, 111 | } 112 | jsons, err := json.Marshal(uploadS3Redolog) 113 | if err != nil { 114 | c.Logger().Error("Marshal err: ", err) 115 | return newBadRequestResp(c, err) 116 | } 117 | 118 | s3key = fmt.Sprintf("%v-%v.redolog", readKey, nanoTimestamp) 119 | err = client.S3().UploadString(context.TODO(), s3key, jsons) 120 | if err != nil { 121 | c.Logger().Error("S3 UploadFile err: ", err) 122 | return newBadRequestResp(c, err) 123 | } 124 | 125 | execMessage = "Execute pending" 126 | execStatus = ExecStatusPending 127 | } else { 128 | s3key = fmt.Sprintf("%v-%v.sync", readKey, time.Now().UnixNano()) 129 | if err := requestTigger(dbName, TiggerExecuteCommandArgs{ 130 | DBName: dbName, 131 | List: edbp.List, 132 | S3Key: s3key, 133 | }); err != nil { 134 | c.Logger().Error("Sync requestTigger err: ", err) 135 | return newBadRequestResp(c, err) 136 | } 137 | 138 | execMessage = "Execute finished" 139 | execStatus = ExecStatusOK 140 | } 141 | 142 | return c.JSON(http.StatusOK, newOKResp(ResultRedolog{ 143 | SeqID: s3key, 144 | Message: execMessage, 145 | Status: execStatus, 146 | })) 147 | } 148 | 149 | func requestTigger(dbName string, args TiggerExecuteCommandArgs) error { 150 | resp := DataResp{Code: -1} 151 | err := httpRequest(tiggerURL, TiggerReq{ 152 | TiggerExecuteCommandArgs: args, 153 | Sync: true, 154 | }, &resp) 155 | if err != nil { 156 | return errors.New("Request failed, err: " + err.Error()) 157 | } 158 | 159 | if resp.Code != 0 { 160 | return errors.New("Request failed, err: " + resp.Message) 161 | } 162 | 163 | return nil 164 | } 165 | 166 | // ExecuteLog for query redolog on sqlite3 file 167 | func ExecuteLog(c echo.Context) error { 168 | elp := new(ExecuteLogParams) 169 | if err := c.Bind(elp); err != nil { 170 | return c.String(http.StatusBadRequest, "bad request") 171 | } 172 | 173 | c.Logger().Info("ExecuteLogParams elp: ", elp) 174 | 175 | readKey := elp.ReadKey 176 | dbName, authOK := utils.VerifyKey(readKey) 177 | if !authOK || dbName == "" { 178 | return c.JSON(http.StatusOK, newNoAuthResp()) 179 | } 180 | 181 | db, uri, err := client.GetVFSDB(dbName) 182 | if err != nil { 183 | c.Logger().Error("getVFSDB err: ", err) 184 | return newBadRequestResp(c, err) 185 | } 186 | defer db.Close() 187 | 188 | c.Logger().Info("S3 GetFileLink: ", uri, ", dbName: ", dbName) 189 | _, values, _, _, err := client.QuerySQLWithHTTPVFS(c, db, 190 | client.SQLExecuteCommandArgs{ 191 | CMD: client.SysTableQuerySQL(), 192 | Args: []interface{}{elp.SeqID}, 193 | }) 194 | if err != nil { 195 | c.Logger().Error("ExecuteSQL err: ", err) 196 | return newBadRequestResp(c, err) 197 | } 198 | 199 | // TODO: fix values to redolog 200 | return c.JSON(http.StatusOK, newOKResp(values)) 201 | } 202 | 203 | // QueryDB for query data on sqlite3 file 204 | func QueryDB(c echo.Context) error { 205 | edbp := new(ExecuteDBParams) 206 | if err := c.Bind(edbp); err != nil || edbp.List == nil || len(edbp.List) < 0 { 207 | return c.String(http.StatusBadRequest, "bad request") 208 | } 209 | 210 | c.Logger().Info("QueryDB edbp: ", edbp) 211 | 212 | var result []DBValuesResp 213 | readKey := edbp.ReadKey 214 | dbName, authOK := utils.VerifyKey(readKey) 215 | if !authOK || dbName == "" { 216 | return c.JSON(http.StatusOK, newNoAuthResp()) 217 | } 218 | 219 | db, uri, err := client.GetVFSDB(dbName) 220 | if err != nil { 221 | c.Logger().Error("getVFSDB err: ", err) 222 | return newBadRequestResp(c, err) 223 | } 224 | defer db.Close() 225 | 226 | c.Logger().Info("S3 GetFileLink: ", uri, ", dbName: ", dbName) 227 | for _, cmd := range edbp.List { 228 | startTime := time.Now() 229 | columns, values, types, count, err := 230 | client.QuerySQLWithHTTPVFS(c, db, cmd) 231 | if err != nil { 232 | c.Logger().Error("ExecuteSQL err: ", err) 233 | return newBadRequestResp(c, err) 234 | } 235 | result = append(result, DBValuesResp{ 236 | Columns: columns, 237 | Values: values, 238 | Types: types, 239 | Count: count, 240 | TimeCost: float64(time.Since(startTime).Microseconds()) / 1e3, 241 | }) 242 | } 243 | 244 | return c.JSON(http.StatusOK, newOKResp(result)) 245 | } 246 | -------------------------------------------------------------------------------- /cmd/http/handler/tables.go: -------------------------------------------------------------------------------- 1 | package handler 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "time" 7 | 8 | "github.com/labstack/echo/v4" 9 | "github.com/linkxzhou/LessDB/cmd/http/client" 10 | "github.com/linkxzhou/LessDB/internal/utils" 11 | ) 12 | 13 | const defaultLimitSize int64 = 100 14 | 15 | type ( 16 | QueryParams struct { 17 | ReadKey string `json:"ReadKey" form:"ReadKey" query:"ReadKey" param:"ReadKey"` 18 | TableName string `json:"tableName" form:"tableName" query:"tableName" param:"tableName"` 19 | Limit int64 `json:"limit" form:"limit" query:"limit" param:"limit"` 20 | Offset int64 `json:"offset" form:"offset" query:"offset" param:"offset"` 21 | } 22 | 23 | DBValuesResp struct { 24 | Columns interface{} `json:"columns"` 25 | Values interface{} `json:"values"` 26 | Types interface{} `json:"types"` 27 | Count int `json:"count"` 28 | TimeCost float64 `json:"cost"` 29 | } 30 | 31 | DataResp struct { 32 | Code int `json:"code"` 33 | Message string `json:"message"` 34 | Data interface{} `json:"data"` 35 | } 36 | ) 37 | 38 | func GetTables(c echo.Context) (err error) { 39 | q := new(QueryParams) 40 | if c.Bind(q); err != nil { 41 | return c.String(http.StatusBadRequest, "bad request") 42 | } 43 | 44 | readKey := q.ReadKey 45 | dbName, authOK := utils.VerifyKey(readKey) 46 | if !authOK || dbName == "" { 47 | return c.JSON(http.StatusOK, newNoAuthResp()) 48 | } 49 | 50 | limit := defaultLimitSize 51 | if q.Limit > 0 { 52 | limit = q.Limit 53 | } 54 | 55 | c.Logger().Info("GetTables q: ", q) 56 | 57 | startTime := time.Now() 58 | db, uri, err := client.GetVFSDB(dbName) 59 | if err != nil { 60 | c.Logger().Error("getVFSDB err: ", err) 61 | return newBadRequestResp(c, err) 62 | } 63 | defer db.Close() 64 | 65 | c.Logger().Info("S3 GetFileLink: ", uri, ", dbName: ", dbName) 66 | columns, values, types, count, err := client.QuerySQLWithHTTPVFS(c, db, 67 | client.SQLExecuteCommandArgs{ 68 | CMD: `SELECT name FROM sqlite_master 69 | WHERE type='table' limit ? offset ?`, 70 | Args: []interface{}{limit, q.Offset * limit}, 71 | }) 72 | 73 | if err != nil { 74 | return c.String(http.StatusBadRequest, "query err: "+err.Error()) 75 | } 76 | 77 | return c.JSON(http.StatusOK, newOKResp(DBValuesResp{ 78 | Columns: columns, 79 | Values: values, 80 | Types: types, 81 | Count: count, 82 | TimeCost: float64(time.Since(startTime).Microseconds()) / 1e3, 83 | })) 84 | } 85 | 86 | func GetRows(c echo.Context) (err error) { 87 | q := new(QueryParams) 88 | if c.Bind(q); err != nil { 89 | return c.String(http.StatusBadRequest, "bad request") 90 | } 91 | 92 | readKey := q.ReadKey 93 | dbName, authOK := utils.VerifyKey(readKey) 94 | if !authOK || dbName == "" { 95 | return c.JSON(http.StatusOK, newNoAuthResp()) 96 | } 97 | 98 | limit := defaultLimitSize 99 | if q.Limit > 0 { 100 | limit = q.Limit 101 | } 102 | 103 | c.Logger().Info("GetRows q: ", q) 104 | 105 | startTime := time.Now() 106 | db, uri, err := client.GetVFSDB(dbName) 107 | if err != nil { 108 | c.Logger().Error("getVFSDB err: ", err) 109 | return newBadRequestResp(c, err) 110 | } 111 | defer db.Close() 112 | 113 | c.Logger().Info("S3 GetFileLink: ", uri, ", dbName: ", dbName) 114 | columns, values, types, count, err := client.QuerySQLWithHTTPVFS(c, db, 115 | client.SQLExecuteCommandArgs{ 116 | CMD: fmt.Sprintf(`SELECT * FROM %v limit ? offset ?`, q.TableName), 117 | Args: []interface{}{limit, q.Offset * limit}, 118 | }) 119 | if err != nil { 120 | c.Logger().Info("ExecuteSQL err: ", err) 121 | return newBadRequestResp(c, err) 122 | } 123 | 124 | return c.JSON(http.StatusOK, newOKResp(DBValuesResp{ 125 | Columns: columns, 126 | Values: values, 127 | Types: types, 128 | Count: count, 129 | TimeCost: float64(time.Since(startTime).Microseconds()) / 1e3, 130 | })) 131 | } 132 | -------------------------------------------------------------------------------- /cmd/http/handler/tigger.go: -------------------------------------------------------------------------------- 1 | package handler 2 | 3 | import ( 4 | "github.com/labstack/echo/v4" 5 | "github.com/linkxzhou/LessDB/cmd/http/client" 6 | 7 | "context" 8 | "encoding/json" 9 | "errors" 10 | "fmt" 11 | "net/http" 12 | "os" 13 | "time" 14 | ) 15 | 16 | type ( 17 | TiggerReq struct { 18 | Events []TiggerEvents `json:"events"` 19 | Sync bool `json:"sync"` 20 | TiggerExecuteCommandArgs 21 | } 22 | 23 | TiggerEvents struct { 24 | S3Key string `json:"s3key"` 25 | } 26 | 27 | TiggerExecuteCommandArgs struct { 28 | DBName string `json:"dbname"` 29 | List []client.SQLExecuteCommandArgs `json:"list"` 30 | S3Key string `json:"s3key"` 31 | } 32 | ) 33 | 34 | func TiggerS3Events(c echo.Context) error { 35 | req := new(TiggerReq) 36 | if err := c.Bind(req); err != nil { 37 | return newBadRequestResp(c, "bad request") 38 | } 39 | 40 | var commandList []TiggerExecuteCommandArgs 41 | // Download file from S3 42 | for _, event := range req.Events { 43 | redologStr, err := client.S3().DownloadString(context.TODO(), event.S3Key) 44 | if err != nil { 45 | c.Logger().Error("Download err: ", err) 46 | return newBadRequestResp(c, err) 47 | } 48 | 49 | var redolog UploadS3Redolog 50 | err = json.Unmarshal(redologStr, &redolog) 51 | if err != nil { 52 | c.Logger().Error("Unmarshal err: ", err) 53 | return newBadRequestResp(c, err) 54 | } 55 | 56 | commandList = append(commandList, TiggerExecuteCommandArgs{ 57 | DBName: redolog.DBName, 58 | List: redolog.List, 59 | S3Key: event.S3Key, 60 | }) 61 | } 62 | 63 | if req.List != nil { 64 | commandList = append(commandList, TiggerExecuteCommandArgs{ 65 | DBName: req.DBName, 66 | List: req.List, 67 | S3Key: fmt.Sprintf("custom-%v", time.Now().UnixNano()), 68 | }) 69 | } 70 | 71 | // Execute SQL on sqlite3 file 72 | for _, command := range commandList { 73 | lessdbName := fmt.Sprintf("%v.lessdb", command.DBName) 74 | // exist db file 75 | if _, err := os.Stat(lessdbName); os.IsNotExist(err) { 76 | dbFile, err := os.Create(lessdbName) 77 | if err != nil { 78 | c.Logger().Error("OpenFile err: ", err) 79 | return newBadRequestResp(c, err) 80 | } 81 | defer dbFile.Close() 82 | 83 | err = client.S3().Download(context.TODO(), command.DBName, dbFile) 84 | if err != nil { 85 | c.Logger().Error("Download err: ", err) 86 | return newBadRequestResp(c, err) 87 | } 88 | } 89 | 90 | db, err := client.GetFileDB(lessdbName) 91 | if err != nil { 92 | c.Logger().Error("sql.Open err: ", err) 93 | return newBadRequestResp(c, err) 94 | } 95 | defer db.Close() 96 | 97 | execStatus := ExecStatusPending 98 | execMessage := "Pending" 99 | // Insert into redolog result to system table 100 | err = client.SysTableInsertStatus(c, db, execStatus, command.S3Key, execMessage) 101 | if err != nil { 102 | c.Logger().Error("SysTableInsertStatus err: ", err) 103 | return newBadRequestResp(c, err) 104 | } 105 | 106 | // Execute redolog list on sqlite3 file 107 | execStatus = ExecStatusOK 108 | execMessage = "OK" 109 | execErr := client.ExecuteSQLWithFile(c, db, command.List) 110 | if execErr != nil { 111 | c.Logger().Error("ExecuteSQL err: ", execErr) 112 | execStatus = ExecStatusFailed 113 | execMessage = execErr.Error() 114 | } 115 | 116 | // Update redolog error to system table 117 | err = client.SysTableUpdateStatus(c, db, execStatus, command.S3Key, execMessage) 118 | if err != nil { 119 | c.Logger().Error("SysTableUpdateStatus err: ", err) 120 | return newBadRequestResp(c, err) 121 | } 122 | 123 | if execErr != nil && req.Sync { 124 | return c.JSON(http.StatusOK, newFailResp(-101, execErr.Error())) 125 | } 126 | } 127 | 128 | // Upload result to S3 129 | for _, command := range commandList { 130 | lessdbName := fmt.Sprintf("%v.lessdb", command.DBName) 131 | if _, err := os.Stat(lessdbName); os.IsNotExist(err) { 132 | return errors.New("DB file not found") 133 | } 134 | 135 | dbFile, err := os.Open(lessdbName) 136 | if err != nil { 137 | c.Logger().Error("OpenFile err: ", err) 138 | return newBadRequestResp(c, err) 139 | } 140 | defer dbFile.Close() 141 | 142 | c.Logger().Info("S3Client Upload: ", client.S3().String(command.DBName)) 143 | err = client.S3().Upload(context.TODO(), command.DBName, dbFile) 144 | if err != nil { 145 | c.Logger().Error("S3 UploadFile err: ", err) 146 | return newBadRequestResp(c, err) 147 | } 148 | } 149 | 150 | return c.JSON(http.StatusOK, newOKResp(nil)) 151 | } 152 | -------------------------------------------------------------------------------- /cmd/http/handler/utils.go: -------------------------------------------------------------------------------- 1 | package handler 2 | 3 | import ( 4 | "github.com/labstack/echo/v4" 5 | 6 | "bytes" 7 | "encoding/json" 8 | "io/ioutil" 9 | "net/http" 10 | "errors" 11 | ) 12 | 13 | func newNoAuthResp() DataResp { 14 | return DataResp{ 15 | Code: -999, 16 | Message: "No Auth", 17 | } 18 | } 19 | 20 | func newNoWriteAuthResp() DataResp { 21 | return DataResp{ 22 | Code: -998, 23 | Message: "No Write Auth", 24 | } 25 | } 26 | 27 | func newOKResp(data interface{}) DataResp { 28 | return DataResp{ 29 | Code: 0, 30 | Message: "OK", 31 | Data: data, 32 | } 33 | } 34 | 35 | func newFailResp(code int, message string) DataResp { 36 | return DataResp{ 37 | Code: code, 38 | Message: message, 39 | } 40 | } 41 | 42 | func newBadRequestResp(c echo.Context, err interface{}) error { 43 | switch err.(type) { 44 | case string: 45 | return c.String(http.StatusBadRequest, err.(string)) 46 | case error: 47 | return c.String(http.StatusBadRequest, err.(error).Error()) 48 | } 49 | 50 | return c.String(http.StatusBadRequest, "bad request") 51 | } 52 | 53 | func httpRequest(url string, req interface{}, resp interface{}) (err error) { 54 | var data *bytes.Buffer 55 | var method = "GET" 56 | if req != nil { 57 | jsonData, err := json.Marshal(req) 58 | if err != nil { 59 | return err 60 | } 61 | data = bytes.NewBuffer(jsonData) 62 | method = "POST" 63 | } 64 | 65 | httpReq, err := http.NewRequest(method, url, data) 66 | if err != nil { 67 | return err 68 | } 69 | httpReq.Header.Set("Content-Type", "application/json") 70 | 71 | httpResp, err := http.DefaultClient.Do(httpReq) 72 | if err != nil { 73 | return err 74 | } 75 | defer httpResp.Body.Close() 76 | 77 | body, err := ioutil.ReadAll(httpResp.Body) 78 | if err != nil { 79 | return err 80 | } 81 | 82 | if httpResp.StatusCode != http.StatusOK { 83 | return errors.New(string(body)) 84 | } 85 | 86 | if err = json.Unmarshal(body, resp); err != nil { 87 | return err 88 | } 89 | 90 | return nil 91 | } -------------------------------------------------------------------------------- /cmd/http/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/labstack/echo/v4" 5 | "github.com/labstack/echo/v4/middleware" 6 | "github.com/labstack/gommon/log" 7 | "github.com/linkxzhou/LessDB/cmd/http/handler" 8 | "github.com/linkxzhou/LessDB/internal/utils" 9 | "github.com/prometheus/client_golang/prometheus/promhttp" 10 | 11 | "fmt" 12 | ) 13 | 14 | const ( 15 | prefixVersion = "/api/v1" 16 | defaultListen = ":9000" 17 | ) 18 | 19 | func withVersion(uri string) string { 20 | return fmt.Sprintf("%s%s", prefixVersion, uri) 21 | } 22 | 23 | // http://localhost:9000/api/v1/uploaddb 24 | // http://localhost:9000/api/v1/createdb 25 | // http://localhost:9000/api/v1/tigger/s3events 26 | // http://localhost:9000/api/v1/:ReadKey/tables 27 | // http://localhost:9000/api/v1/:ReadKey/tables/:tableName/rows 28 | // http://localhost:9000/api/v1/:ReadKey/execute 29 | // http://localhost:9000/api/v1/:ReadKey/query 30 | // http://localhost:9000/api/v1/:ReadKey/executelog 31 | func main() { 32 | e := echo.New() 33 | e.Use(middleware.CORSWithConfig(middleware.CORSConfig{ 34 | AllowOrigins: []string{"*"}, 35 | AllowHeaders: []string{echo.HeaderOrigin, echo.HeaderContentType, echo.HeaderAccept}, 36 | })) 37 | e.Logger.SetLevel(log.DEBUG) 38 | e.POST(withVersion("/uploaddb"), handler.UploadDB) 39 | e.POST(withVersion("/createdb"), handler.CreateDB) 40 | e.POST(withVersion("/tigger/s3events"), handler.TiggerS3Events) 41 | e.GET(withVersion("/:ReadKey/tables"), handler.GetTables) 42 | e.GET(withVersion("/:ReadKey/tables/:tableName/rows"), handler.GetRows) 43 | e.POST(withVersion("/:ReadKey/execute"), handler.ExecuteDB) 44 | e.POST(withVersion("/:ReadKey/executelog"), handler.ExecuteLog) 45 | e.POST(withVersion("/:ReadKey/query"), handler.QueryDB) 46 | e.GET("/metrics", echo.WrapHandler(promhttp.Handler())) 47 | 48 | listenSvr := utils.GetEnviron("LESSDB_LISTEN") 49 | if listenSvr == utils.EmptyNil { 50 | listenSvr = defaultListen 51 | } 52 | e.Logger.Fatal(e.Start(listenSvr)) 53 | } 54 | -------------------------------------------------------------------------------- /cmd/server/db.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/dolthub/go-mysql-server/sql" 7 | ) 8 | 9 | var ( 10 | ErrNoSQLiteConn = errors.New("could not retrieve SQLite connection") 11 | ErrNoInsertsAllowed = errors.New("table does not permit INSERTs") 12 | ErrNoCreateTableAllowed = errors.New("database does not permit creating tables") 13 | ErrCouldNotFindDatabase = errors.New("could not find database") 14 | ) 15 | 16 | func (db *Database) Name() string { 17 | return db.name 18 | } 19 | 20 | func (db *Database) GetTableInsensitive(ctx *sql.Context, tblName string) (sql.Table, bool, error) { 21 | return nil, false, errors.New("Not support") 22 | } 23 | 24 | func (db *Database) GetTableNames(ctx *sql.Context) ([]string, error) { 25 | return nil, ErrNoSQLiteConn 26 | } 27 | 28 | func (db *Database) CreateTable(ctx *sql.Context, name string, schema sql.Schema) error { 29 | return errors.New("Not support") 30 | } 31 | -------------------------------------------------------------------------------- /cmd/server/db_provider.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "sort" 6 | "strings" 7 | "sync" 8 | 9 | "github.com/dolthub/go-mysql-server/sql" 10 | ) 11 | 12 | type ( 13 | // Database is an implementation of a go-mysql-server database 14 | // backed by a SQLite database. 15 | Database struct { 16 | name string 17 | options DatabaseOptions 18 | } 19 | 20 | // DatabaseOptions are options for managing the SQLite backend 21 | DatabaseOptions struct { 22 | // PreventInserts will block table insertions 23 | PreventInserts bool 24 | // PreventCreateTable will block table creation 25 | PreventCreateTable bool 26 | } 27 | 28 | provider struct { 29 | mut sync.RWMutex 30 | databases map[string]*Database 31 | } 32 | ) 33 | 34 | var _ sql.DatabaseProvider = &provider{} 35 | var _ sql.MutableDatabaseProvider = &provider{} 36 | 37 | func NewProvider(dbs ...sql.Database) *provider { 38 | databases := make(map[string]*Database, len(dbs)) 39 | for _, db := range dbs { 40 | if v, ok := db.(*Database); !ok { 41 | continue 42 | } else { 43 | databases[strings.ToLower(db.Name())] = v 44 | } 45 | } 46 | 47 | return &provider{ 48 | databases: databases, 49 | } 50 | } 51 | 52 | func (p *provider) Database(ctx *sql.Context, name string) (sql.Database, error) { 53 | p.mut.RLock() 54 | defer p.mut.RUnlock() 55 | name = strings.ToLower(name) 56 | 57 | if db, ok := p.databases[name]; !ok { 58 | return nil, sql.ErrDatabaseNotFound.New() 59 | } else { 60 | return db, nil 61 | } 62 | } 63 | 64 | func (p *provider) HasDatabase(ctx *sql.Context, name string) bool { 65 | p.mut.RLock() 66 | defer p.mut.RUnlock() 67 | name = strings.ToLower(name) 68 | 69 | _, ok := p.databases[name] 70 | return ok 71 | } 72 | 73 | func (p *provider) AllDatabases(ctx *sql.Context) []sql.Database { 74 | p.mut.RLock() 75 | defer p.mut.RUnlock() 76 | 77 | all := make([]sql.Database, len(p.databases)) 78 | var i int 79 | for _, db := range p.databases { 80 | all[i] = db 81 | i++ 82 | } 83 | 84 | sort.Slice(all, func(i, j int) bool { 85 | return all[i].Name() < all[j].Name() 86 | }) 87 | 88 | return all 89 | } 90 | 91 | func (p *provider) CreateDatabase(ctx *sql.Context, name string) error { 92 | return errors.New("Not support") // TODO: fix 93 | } 94 | 95 | func (p *provider) DropDatabase(ctx *sql.Context, name string) error { 96 | return errors.New("Not support") // TODO: fix 97 | } 98 | -------------------------------------------------------------------------------- /cmd/server/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | sqle "github.com/dolthub/go-mysql-server" 5 | "github.com/dolthub/go-mysql-server/server" 6 | "github.com/dolthub/go-mysql-server/sql" 7 | "github.com/dolthub/go-mysql-server/sql/information_schema" 8 | "github.com/linkxzhou/LessDB/internal/utils" 9 | ) 10 | 11 | func main() { 12 | engine := sqle.NewDefault( 13 | sql.NewDatabaseProvider( 14 | information_schema.NewInformationSchemaDatabase(), 15 | )) 16 | 17 | listenSvr := utils.GetEnviron("LESSDB_LISTEN") 18 | if listenSvr == utils.EmptyNil { 19 | listenSvr = "localhost:3306" 20 | } 21 | 22 | config := server.Config{ 23 | Protocol: "tcp", 24 | Address: listenSvr, 25 | } 26 | 27 | svr, err := server.NewServer(config, engine, nil, nil) 28 | if err != nil { 29 | panic(err) 30 | } 31 | 32 | svr.Start() 33 | } 34 | -------------------------------------------------------------------------------- /cmd/server/readme.md: -------------------------------------------------------------------------------- 1 | # project 2 | https://github.com/mergestat/go-mysql-sqlite-server -------------------------------------------------------------------------------- /datasets/ai.md: -------------------------------------------------------------------------------- 1 | # 数据来源 2 | 3 | https://zhuanlan.zhihu.com/p/377746284 4 | https://cloud.tencent.com/developer/article/2018449 5 | 6 | # 数据展示 7 | 8 | https://github.com/coleifer/sqlite-web/tree/master 9 | https://datatables.net/ -------------------------------------------------------------------------------- /datasets/github.md: -------------------------------------------------------------------------------- 1 | # 数据来源 2 | 3 | https://www.tableau.com/zh-cn/blog/public-data-sets-102221 4 | 5 | # 测试数据 6 | 7 | https://www.sanford.io/demo.db -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/linkxzhou/LessDB 2 | 3 | go 1.22 4 | 5 | toolchain go1.22.1 6 | 7 | require ( 8 | github.com/aws/aws-sdk-go v1.49.9 9 | github.com/dolthub/go-mysql-server v0.18.1 10 | github.com/google/go-cmp v0.6.0 11 | github.com/google/uuid v1.6.0 12 | github.com/labstack/echo/v4 v4.11.4 13 | github.com/labstack/gommon v0.4.2 14 | github.com/mattn/go-sqlite3 v1.14.16 15 | github.com/prometheus/client_golang v1.19.0 16 | ) 17 | 18 | require ( 19 | github.com/beorn7/perks v1.0.1 // indirect 20 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 21 | github.com/dolthub/flatbuffers/v23 v23.3.3-dh.2 // indirect 22 | github.com/dolthub/go-icu-regex v0.0.0-20230524105445-af7e7991c97e // indirect 23 | github.com/dolthub/jsonpath v0.0.2-0.20240227200619-19675ab05c71 // indirect 24 | github.com/dolthub/vitess v0.0.0-20240404214255-c5a87fc7b325 // indirect 25 | github.com/go-kit/kit v0.10.0 // indirect 26 | github.com/golang-jwt/jwt v3.2.2+incompatible // indirect 27 | github.com/golang/protobuf v1.5.3 // indirect 28 | github.com/hashicorp/golang-lru v0.5.4 // indirect 29 | github.com/jmespath/go-jmespath v0.4.0 // indirect 30 | github.com/lestrrat-go/strftime v1.0.4 // indirect 31 | github.com/mattn/go-colorable v0.1.13 // indirect 32 | github.com/mattn/go-isatty v0.0.20 // indirect 33 | github.com/pkg/errors v0.9.1 // indirect 34 | github.com/pmezard/go-difflib v1.0.0 // indirect 35 | github.com/prometheus/client_model v0.5.0 // indirect 36 | github.com/prometheus/common v0.48.0 // indirect 37 | github.com/prometheus/procfs v0.12.0 // indirect 38 | github.com/shopspring/decimal v1.3.1 // indirect 39 | github.com/sirupsen/logrus v1.8.1 // indirect 40 | github.com/tetratelabs/wazero v1.1.0 // indirect 41 | github.com/valyala/bytebufferpool v1.0.0 // indirect 42 | github.com/valyala/fasttemplate v1.2.2 // indirect 43 | go.opentelemetry.io/otel v1.7.0 // indirect 44 | go.opentelemetry.io/otel/trace v1.7.0 // indirect 45 | golang.org/x/crypto v0.18.0 // indirect 46 | golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect 47 | golang.org/x/mod v0.12.0 // indirect 48 | golang.org/x/net v0.20.0 // indirect 49 | golang.org/x/sync v0.3.0 // indirect 50 | golang.org/x/sys v0.16.0 // indirect 51 | golang.org/x/text v0.14.0 // indirect 52 | golang.org/x/time v0.5.0 // indirect 53 | golang.org/x/tools v0.13.0 // indirect 54 | google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect 55 | google.golang.org/grpc v1.53.0 // indirect 56 | google.golang.org/protobuf v1.32.0 // indirect 57 | gopkg.in/src-d/go-errors.v1 v1.0.0 // indirect 58 | ) 59 | -------------------------------------------------------------------------------- /internal/prom/prometheus.go: -------------------------------------------------------------------------------- 1 | package prom 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | 6 | _ "github.com/prometheus/client_golang/prometheus/promhttp" 7 | 8 | "time" 9 | ) 10 | 11 | const ( 12 | RNameVFS = "vfs" 13 | 14 | TNameCacheGet = "cache_get" 15 | TNameCacheSize = "cache_size" 16 | TNameHTTPGet = "http_get" 17 | TNameHTTPSize = "http_size" 18 | 19 | CodeCacheMiss = "0" 20 | CodeCacheHit = "1" 21 | ) 22 | 23 | var ( 24 | rtcodeList = []string{"r", "t", "code"} 25 | rtcodeDurationList = []float64{30.0, 100.0, 200.0, 500.0, 1000.0, 3000.0, 5000.0, 10000.0} 26 | rtcodeBytesList = []float64{32, 128, 512, 1024, 4196, 10240, 102400, 1024000, 2048000} 27 | 28 | rtcodeSysCounts = prometheus.NewCounterVec( 29 | prometheus.CounterOpts{ 30 | Name: "sys_total", 31 | Help: "Number of request.", 32 | }, 33 | rtcodeList, 34 | ) 35 | 36 | rtcodeSysDurations = prometheus.NewHistogramVec( 37 | prometheus.HistogramOpts{ 38 | Name: "sys_durations_seconds", 39 | Help: "latency distributions.", 40 | Buckets: rtcodeDurationList, 41 | }, 42 | rtcodeList, 43 | ) 44 | 45 | rtcodeReqCounts = prometheus.NewCounterVec( 46 | prometheus.CounterOpts{ 47 | Name: "req_total", 48 | Help: "Number of request.", 49 | }, 50 | rtcodeList, 51 | ) 52 | 53 | rtcodeReqDurations = prometheus.NewHistogramVec( 54 | prometheus.HistogramOpts{ 55 | Name: "req_durations_seconds", 56 | Help: "latency distributions.", 57 | Buckets: rtcodeDurationList, 58 | }, 59 | rtcodeList, 60 | ) 61 | 62 | rtcodeRPCReqCounts = prometheus.NewCounterVec( 63 | prometheus.CounterOpts{ 64 | Name: "rpc_req_total", 65 | Help: "Number of rpc request.", 66 | }, 67 | rtcodeList, 68 | ) 69 | 70 | rtcodeRPCDurations = prometheus.NewHistogramVec( 71 | prometheus.HistogramOpts{ 72 | Name: "rpc_durations_seconds", 73 | Help: "rpc latency distributions.", 74 | Buckets: rtcodeDurationList, 75 | }, 76 | rtcodeList, 77 | ) 78 | 79 | rtcodeRPCBytes = prometheus.NewHistogramVec( 80 | prometheus.HistogramOpts{ 81 | Name: "rpc_bytes", 82 | Help: "rpc bytes distributions.", 83 | Buckets: rtcodeBytesList, 84 | }, 85 | rtcodeList, 86 | ) 87 | 88 | promInit bool = false 89 | refreshMetricsInit bool = false 90 | ) 91 | 92 | func init() { 93 | prometheus.MustRegister(rtcodeSysCounts) 94 | prometheus.MustRegister(rtcodeSysDurations) 95 | prometheus.MustRegister(rtcodeReqCounts) 96 | prometheus.MustRegister(rtcodeReqDurations) 97 | prometheus.MustRegister(rtcodeRPCReqCounts) 98 | prometheus.MustRegister(rtcodeRPCDurations) 99 | prometheus.MustRegister(rtcodeRPCBytes) 100 | promInit = true 101 | } 102 | 103 | type PromTrace struct { 104 | R string 105 | T string 106 | Code string 107 | startTime time.Time 108 | } 109 | 110 | func NewPromTrace(r, t string) *PromTrace { 111 | return &PromTrace{ 112 | R: r, 113 | T: t, 114 | startTime: time.Now(), 115 | } 116 | } 117 | 118 | func (p *PromTrace) Cost() float64 { 119 | return float64(time.Since(p.startTime) / time.Millisecond) 120 | } 121 | 122 | func getPrometheusLabels(r, t, code string) prometheus.Labels { 123 | return prometheus.Labels{"r": r, "t": t, "code": code} 124 | } 125 | 126 | // SysCounts sys counts 127 | func (p *PromTrace) SysCounts() { 128 | if promInit { 129 | rtcodeSysCounts.With( 130 | getPrometheusLabels(p.R, p.T, p.Code)).Inc() 131 | } 132 | } 133 | 134 | func (p *PromTrace) SysDurations() { 135 | if promInit { 136 | rtcodeSysDurations.With( 137 | getPrometheusLabels(p.R, p.T, p.Code)).Observe(p.Cost()) 138 | } 139 | } 140 | 141 | // ReqCounts req_total 142 | func (p *PromTrace) ReqCounts() { 143 | if promInit { 144 | rtcodeReqCounts.With( 145 | getPrometheusLabels(p.R, p.T, p.Code)).Inc() 146 | } 147 | } 148 | 149 | // ReqDurations req_durations_seconds 150 | func (p *PromTrace) ReqDurations() { 151 | if promInit { 152 | rtcodeReqDurations.With( 153 | getPrometheusLabels(p.R, p.T, p.Code)).Observe(p.Cost()) 154 | } 155 | } 156 | 157 | // RPCReqCounts rpc_req_total 158 | func (p *PromTrace) RPCReqCounts() { 159 | if promInit { 160 | rtcodeRPCReqCounts.With(getPrometheusLabels(p.R, p.T, p.Code)).Inc() 161 | } 162 | } 163 | 164 | // RPCDurations rpc_durations_seconds 165 | func (p *PromTrace) RPCDurations(r, t, code string, cost int64) { 166 | if promInit { 167 | rtcodeRPCDurations.With( 168 | getPrometheusLabels(p.R, p.T, p.Code)).Observe(p.Cost()) 169 | } 170 | } 171 | 172 | // RPCBytes rpc_bytes 173 | func (p *PromTrace) RPCBytes(bytes int64) { 174 | if promInit { 175 | rtcodeRPCBytes.With( 176 | getPrometheusLabels(p.R, p.T, p.Code)).Observe(float64(bytes)) 177 | } 178 | } -------------------------------------------------------------------------------- /internal/s3/default.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | import "github.com/linkxzhou/LessDB/internal/utils" 4 | 5 | var ( 6 | s3Endpoint = utils.GetEnviron("S3Endpoint") 7 | s3Region = utils.GetEnviron("S3Region") 8 | s3AccessKey = utils.GetEnviron("S3AccessKey") 9 | s3SecretKey = utils.GetEnviron("S3SecretKey") 10 | s3Bucket = utils.GetEnviron("S3Bucket") 11 | 12 | defaultClient *S3Client 13 | ) 14 | 15 | func DefaultS3Client() *S3Client { 16 | if defaultClient == nil { 17 | defaultClient = NewS3Client( 18 | s3Endpoint, 19 | s3Region, 20 | s3AccessKey, 21 | s3SecretKey, 22 | s3Bucket) 23 | } 24 | 25 | return defaultClient 26 | } 27 | 28 | type URIHandler interface { 29 | URI(key string) (string, error) 30 | } 31 | 32 | type S3URIHandler struct { 33 | Client *S3Client 34 | } 35 | 36 | func (s S3URIHandler) URI(key string) (string, error) { 37 | return s.Client.GetFileLink(key) 38 | } 39 | 40 | type HttpURIHandler struct { 41 | PrefixURI string 42 | } 43 | 44 | func (h HttpURIHandler) URI(key string) (string, error) { 45 | return h.PrefixURI + "/" + key, nil 46 | } 47 | -------------------------------------------------------------------------------- /internal/s3/s3.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "io" 8 | "time" 9 | 10 | "github.com/aws/aws-sdk-go/aws" 11 | "github.com/aws/aws-sdk-go/aws/credentials" 12 | "github.com/aws/aws-sdk-go/aws/session" 13 | "github.com/aws/aws-sdk-go/service/s3" 14 | "github.com/aws/aws-sdk-go/service/s3/s3manager" 15 | ) 16 | 17 | type ( 18 | // S3Config is the subconfig for the S3 storage type 19 | S3Config struct { 20 | Endpoint string `json:"endpoint,omitempty"` 21 | Region string `json:"region"` 22 | AccessKeyID string `json:"access_key_id"` 23 | SecretAccessKey string `json:"secret_access_key"` 24 | Bucket string `json:"bucket"` 25 | } 26 | 27 | // S3Client is a client for uploading data to S3. 28 | S3Client struct { 29 | endpoint string 30 | region string 31 | accessKey string 32 | secretKey string 33 | bucket string 34 | 35 | // These fields are used for testing via dependency injection. 36 | uploader uploader 37 | downloader downloader 38 | } 39 | ) 40 | 41 | // NewS3Client returns an instance of an S3Client. 42 | func NewS3Client(endpoint, region, accessKey, secretKey, bucket string) *S3Client { 43 | return &S3Client{ 44 | endpoint: endpoint, 45 | region: region, 46 | accessKey: accessKey, 47 | secretKey: secretKey, 48 | bucket: bucket, 49 | } 50 | } 51 | 52 | // String returns a string representation of the S3Client. 53 | func (s *S3Client) String(key string) string { 54 | return fmt.Sprintf("s3://%s/%s", s.bucket, key) 55 | } 56 | 57 | // Bucket returns the bucket name. 58 | func (s *S3Client) Bucket() string { 59 | return s.bucket 60 | } 61 | 62 | // GetFileLink returns a presigned URL for a file in AWS S3. 63 | func (s *S3Client) GetFileLink(key string) (string, error) { 64 | // Create a new S3 client from the configuration. 65 | sess, err := s.createSession() 66 | if err != nil { 67 | return "", err 68 | } 69 | 70 | presignClient := s3.New(sess) 71 | req, _ := presignClient.GetObjectRequest(&s3.GetObjectInput{ 72 | Bucket: aws.String(s.bucket), 73 | Key: aws.String(key), 74 | }) 75 | // Set 7 day expire, TODO: configs 76 | httpURL, err := req.Presign(time.Duration(7*24) * time.Hour) 77 | if err != nil { 78 | return "", err 79 | } 80 | return httpURL, nil 81 | } 82 | 83 | // Upload uploads data to S3. 84 | func (s *S3Client) Upload(ctx context.Context, key string, reader io.Reader) error { 85 | sess, err := s.createSession() 86 | if err != nil { 87 | return err 88 | } 89 | 90 | // If an uploader was not provided, use a real S3 uploader. 91 | var uploader uploader 92 | if s.uploader == nil { 93 | uploader = s3manager.NewUploader(sess) 94 | } else { 95 | uploader = s.uploader 96 | } 97 | 98 | _, err = uploader.UploadWithContext(ctx, &s3manager.UploadInput{ 99 | Bucket: aws.String(s.bucket), 100 | Key: aws.String(key), 101 | Body: reader, 102 | }) 103 | if err != nil { 104 | return fmt.Errorf("failed to upload to %v: %w", key, err) 105 | } 106 | return nil 107 | } 108 | 109 | // UploadString uploads string to S3. 110 | func (s *S3Client) UploadString(ctx context.Context, key string, buffer []byte) error { 111 | return s.Upload(ctx, key, bytes.NewReader(buffer)) 112 | } 113 | 114 | // Download downloads data from S3. 115 | func (s *S3Client) Download(ctx context.Context, key string, writer io.WriterAt) error { 116 | sess, err := s.createSession() 117 | if err != nil { 118 | return err 119 | } 120 | 121 | // If a downloader was not provided, use a real S3 downloader. 122 | var downloader downloader 123 | if s.downloader == nil { 124 | downloader = s3manager.NewDownloader(sess) 125 | } else { 126 | downloader = s.downloader 127 | } 128 | 129 | _, err = downloader.DownloadWithContext(ctx, writer, &s3.GetObjectInput{ 130 | Bucket: aws.String(s.bucket), 131 | Key: aws.String(key), 132 | }) 133 | if err != nil { 134 | return fmt.Errorf("failed to download %v: %w", key, err) 135 | } 136 | return nil 137 | } 138 | 139 | // DownloadString downloads string to S3. 140 | func (s *S3Client) DownloadString(ctx context.Context, key string) ([]byte, error) { 141 | sess, err := s.createSession() 142 | if err != nil { 143 | return nil, err 144 | } 145 | 146 | s3Client := s3.New(sess) 147 | req := &s3.GetObjectInput{ 148 | Bucket: aws.String(s.bucket), 149 | Key: aws.String(key), 150 | } 151 | 152 | resp, err := s3Client.GetObject(req) 153 | if err != nil { 154 | return nil, err 155 | } 156 | 157 | return io.ReadAll(resp.Body) 158 | } 159 | 160 | func (s *S3Client) createSession() (*session.Session, error) { 161 | disableSSL := false 162 | sess, err := session.NewSession(&aws.Config{ 163 | Endpoint: aws.String(s.endpoint), 164 | Region: aws.String(s.region), 165 | Credentials: credentials.NewStaticCredentials(s.accessKey, s.secretKey, ""), 166 | DisableSSL: &disableSSL, 167 | }) 168 | if err != nil { 169 | return nil, fmt.Errorf("failed to create S3 session: %w", err) 170 | } 171 | return sess, nil 172 | } 173 | 174 | type uploader interface { 175 | UploadWithContext(ctx aws.Context, input *s3manager.UploadInput, opts ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) 176 | } 177 | 178 | type downloader interface { 179 | DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, opts ...func(*s3manager.Downloader)) (n int64, err error) 180 | } 181 | -------------------------------------------------------------------------------- /internal/s3/s3_test.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "io" 8 | "strings" 9 | "testing" 10 | 11 | "github.com/aws/aws-sdk-go/aws" 12 | "github.com/aws/aws-sdk-go/service/s3" 13 | "github.com/aws/aws-sdk-go/service/s3/s3manager" 14 | ) 15 | 16 | func TestNewS3Client(t *testing.T) { 17 | c := NewS3Client("endpoint1", "region1", "access", "secret", "bucket2") 18 | if c.region != "region1" { 19 | t.Fatalf("expected region to be %q, got %q", "region1", c.region) 20 | } 21 | if c.accessKey != "access" { 22 | t.Fatalf("expected accessKey to be %q, got %q", "access", c.accessKey) 23 | } 24 | if c.secretKey != "secret" { 25 | t.Fatalf("expected secretKey to be %q, got %q", "secret", c.secretKey) 26 | } 27 | if c.bucket != "bucket2" { 28 | t.Fatalf("expected bucket to be %q, got %q", "bucket2", c.bucket) 29 | } 30 | } 31 | 32 | func TestS3ClientString(t *testing.T) { 33 | key := "key3" 34 | c := NewS3Client("endpoint1", "region1", "access", "secret", "bucket2") 35 | if c.String(key) != "s3://bucket2/key3" { 36 | t.Fatalf("expected String() to be %q, got %q", "s3://bucket2/key3", c.String(key)) 37 | } 38 | } 39 | 40 | func TestS3ClientUploadOK(t *testing.T) { 41 | endpoint := "https://my-custom-s3-endpoint.com" 42 | region := "us-west-2" 43 | accessKey := "your-access-key" 44 | secretKey := "your-secret-key" 45 | bucket := "your-bucket" 46 | key := "your/key/path" 47 | expectedData := "test data" 48 | uploadedData := new(bytes.Buffer) 49 | 50 | mockUploader := &mockUploader{ 51 | uploadFn: func(ctx aws.Context, input *s3manager.UploadInput, opts ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) { 52 | if *input.Bucket != bucket { 53 | t.Errorf("expected bucket to be %q, got %q", bucket, *input.Bucket) 54 | } 55 | if *input.Key != key { 56 | t.Errorf("expected key to be %q, got %q", key, *input.Key) 57 | } 58 | if input.Body == nil { 59 | t.Errorf("expected body to be non-nil") 60 | } 61 | _, err := uploadedData.ReadFrom(input.Body) 62 | if err != nil { 63 | t.Errorf("error reading from input body: %v", err) 64 | } 65 | return &s3manager.UploadOutput{}, nil 66 | }, 67 | } 68 | 69 | client := &S3Client{ 70 | endpoint: endpoint, 71 | region: region, 72 | accessKey: accessKey, 73 | secretKey: secretKey, 74 | bucket: bucket, 75 | uploader: mockUploader, 76 | } 77 | 78 | reader := strings.NewReader("test data") 79 | err := client.Upload(context.Background(), key, reader) 80 | if err != nil { 81 | t.Fatalf("Unexpected error: %v", err) 82 | } 83 | if uploadedData.String() != expectedData { 84 | t.Errorf("expected uploaded data to be %q, got %q", expectedData, uploadedData.String()) 85 | } 86 | } 87 | 88 | func TestS3ClientUploadFail(t *testing.T) { 89 | region := "us-west-2" 90 | accessKey := "your-access-key" 91 | secretKey := "your-secret-key" 92 | bucket := "your-bucket" 93 | key := "your/key/path" 94 | 95 | mockUploader := &mockUploader{ 96 | uploadFn: func(ctx aws.Context, input *s3manager.UploadInput, opts ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) { 97 | return &s3manager.UploadOutput{}, fmt.Errorf("some error related to S3") 98 | }, 99 | } 100 | 101 | client := &S3Client{ 102 | region: region, 103 | accessKey: accessKey, 104 | secretKey: secretKey, 105 | bucket: bucket, 106 | uploader: mockUploader, 107 | } 108 | 109 | reader := strings.NewReader("test data") 110 | err := client.Upload(context.Background(), key, reader) 111 | if err == nil { 112 | t.Fatal("Expected error, got nil") 113 | } 114 | if !strings.Contains(err.Error(), "some error related to S3") { 115 | t.Fatalf("Expected error to contain %q, got %q", "some error related to S3", err.Error()) 116 | } 117 | } 118 | 119 | func TestS3ClientDownloadOK(t *testing.T) { 120 | region := "us-west-2" 121 | accessKey := "your-access-key" 122 | secretKey := "your-secret-key" 123 | bucket := "your-bucket" 124 | key := "your/key/path" 125 | expectedData := "test data" 126 | 127 | mockDownloader := &mockDownloader{ 128 | downloadFn: func(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, opts ...func(*s3manager.Downloader)) (int64, error) { 129 | if *input.Bucket != bucket { 130 | t.Errorf("expected bucket to be %q, got %q", bucket, *input.Bucket) 131 | } 132 | if *input.Key != key { 133 | t.Errorf("expected key to be %q, got %q", key, *input.Key) 134 | } 135 | n, err := w.WriteAt([]byte(expectedData), 0) 136 | if err != nil { 137 | t.Errorf("error writing to writer: %v", err) 138 | } 139 | return int64(n), nil 140 | }, 141 | } 142 | 143 | client := &S3Client{ 144 | region: region, 145 | accessKey: accessKey, 146 | secretKey: secretKey, 147 | bucket: bucket, 148 | downloader: mockDownloader, 149 | } 150 | 151 | writer := aws.NewWriteAtBuffer(make([]byte, len(expectedData))) 152 | err := client.Download(context.Background(), key, writer) 153 | if err != nil { 154 | t.Fatalf("Unexpected error: %v", err) 155 | } 156 | if string(writer.Bytes()) != expectedData { 157 | t.Errorf("expected downloaded data to be %q, got %q", expectedData, writer.Bytes()) 158 | } 159 | } 160 | 161 | func TestS3ClientDownloadFail(t *testing.T) { 162 | endpoint := "https://my-custom-s3-endpoint.com" 163 | region := "us-west-2" 164 | accessKey := "your-access-key" 165 | secretKey := "your-secret-key" 166 | bucket := "your-bucket" 167 | key := "your/key/path" 168 | 169 | mockDownloader := &mockDownloader{ 170 | downloadFn: func(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, opts ...func(*s3manager.Downloader)) (n int64, err error) { 171 | return 0, fmt.Errorf("some error related to S3") 172 | }, 173 | } 174 | 175 | client := &S3Client{ 176 | endpoint: endpoint, 177 | region: region, 178 | accessKey: accessKey, 179 | secretKey: secretKey, 180 | bucket: bucket, 181 | downloader: mockDownloader, 182 | } 183 | 184 | writer := aws.NewWriteAtBuffer(nil) 185 | err := client.Download(context.Background(), key, writer) 186 | if err == nil { 187 | t.Fatal("Expected error, got nil") 188 | } 189 | if !strings.Contains(err.Error(), "some error related to S3") { 190 | t.Fatalf("Expected error to contain %q, got %q", "some error related to S3", err.Error()) 191 | } 192 | } 193 | 194 | type mockDownloader struct { 195 | downloadFn func(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, opts ...func(*s3manager.Downloader)) (n int64, err error) 196 | } 197 | 198 | func (m *mockDownloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s3.GetObjectInput, opts ...func(*s3manager.Downloader)) (n int64, err error) { 199 | if m.downloadFn != nil { 200 | return m.downloadFn(ctx, w, input, opts...) 201 | } 202 | return 0, nil 203 | } 204 | 205 | type mockUploader struct { 206 | uploadFn func(ctx aws.Context, input *s3manager.UploadInput, opts ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) 207 | } 208 | 209 | func (m *mockUploader) UploadWithContext(ctx aws.Context, input *s3manager.UploadInput, opts ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error) { 210 | if m.uploadFn != nil { 211 | return m.uploadFn(ctx, input, opts...) 212 | } 213 | return &s3manager.UploadOutput{}, nil 214 | } 215 | -------------------------------------------------------------------------------- /internal/sqlite3vfs/defaultvfsv1.go: -------------------------------------------------------------------------------- 1 | package sqlite3vfs 2 | 3 | import ( 4 | "crypto/rand" 5 | "time" 6 | ) 7 | 8 | type defaultVFSv1 struct { 9 | VFS 10 | } 11 | 12 | func (vfs *defaultVFSv1) Randomness(n []byte) int { 13 | i, err := rand.Read(n) 14 | if err != nil { 15 | panic(err) 16 | } 17 | return i 18 | } 19 | 20 | func (vfs *defaultVFSv1) Sleep(d time.Duration) { 21 | time.Sleep(d) 22 | } 23 | 24 | func (vfs *defaultVFSv1) CurrentTime() time.Time { 25 | return time.Now() 26 | } 27 | -------------------------------------------------------------------------------- /internal/sqlite3vfs/error.go: -------------------------------------------------------------------------------- 1 | package sqlite3vfs 2 | 3 | import "fmt" 4 | 5 | type sqliteError struct { 6 | code int 7 | text string 8 | } 9 | 10 | func (e sqliteError) Error() string { 11 | return fmt.Sprintf("sqlite (%d) %s", e.code, e.text) 12 | } 13 | 14 | // https://www.sqlite.org/rescode.html 15 | 16 | const ( 17 | sqliteOK = 0 18 | ) 19 | 20 | var ( 21 | GenericError = sqliteError{1, "Generic Error"} 22 | InternalError = sqliteError{2, "Internal Error"} 23 | PermError = sqliteError{3, "Perm Error"} 24 | AbortError = sqliteError{4, "Abort Error"} 25 | BusyError = sqliteError{5, "Busy Error"} 26 | LockedError = sqliteError{6, "Locked Error"} 27 | NoMemError = sqliteError{7, "No Mem Error"} 28 | ReadOnlyError = sqliteError{8, "Read Only Error"} 29 | InterruptError = sqliteError{9, "Interrupt Error"} 30 | IOError = sqliteError{10, "IO Error"} 31 | CorruptError = sqliteError{11, "Corrupt Error"} 32 | NotFoundError = sqliteError{12, "Not Found Error"} 33 | FullError = sqliteError{13, "Full Error"} 34 | CantOpenError = sqliteError{14, "CantOpen Error"} 35 | ProtocolError = sqliteError{15, "Protocol Error"} 36 | EmptyError = sqliteError{16, "Empty Error"} 37 | SchemaError = sqliteError{17, "Schema Error"} 38 | TooBigError = sqliteError{18, "TooBig Error"} 39 | ConstraintError = sqliteError{19, "Constraint Error"} 40 | MismatchError = sqliteError{20, "Mismatch Error"} 41 | MisuseError = sqliteError{21, "Misuse Error"} 42 | NoLFSError = sqliteError{22, "No Large File Support Error"} 43 | AuthError = sqliteError{23, "Auth Error"} 44 | FormatError = sqliteError{24, "Format Error"} 45 | RangeError = sqliteError{25, "Range Error"} 46 | NotaDBError = sqliteError{26, "Not a DB Error"} 47 | NoticeError = sqliteError{27, "Notice Error"} 48 | WarningError = sqliteError{28, "Warning Error"} 49 | 50 | IOErrorRead = sqliteError{266, "IO Error Read"} 51 | IOErrorShortRead = sqliteError{522, "IO Error Short Read"} 52 | IOErrorWrite = sqliteError{778, "IO Error Write"} 53 | 54 | PermissionsErrorDelete = sqliteError{1000, "Permissions Error Delete"} 55 | PermissionsErrorWrite = sqliteError{1001, "Permissions Error Write"} 56 | PermissionsErrorTruncate = sqliteError{1001, "Permissions Error Truncate"} 57 | ) 58 | 59 | var errMap = map[int]sqliteError{ 60 | 1: GenericError, 61 | 2: InternalError, 62 | 3: PermError, 63 | 4: AbortError, 64 | 5: BusyError, 65 | 6: LockedError, 66 | 7: NoMemError, 67 | 8: ReadOnlyError, 68 | 9: InterruptError, 69 | 10: IOError, 70 | 11: CorruptError, 71 | 12: NotFoundError, 72 | 13: FullError, 73 | 14: CantOpenError, 74 | 15: ProtocolError, 75 | 16: EmptyError, 76 | 17: SchemaError, 77 | 18: TooBigError, 78 | 19: ConstraintError, 79 | 20: MismatchError, 80 | 21: MisuseError, 81 | 22: NoLFSError, 82 | 23: AuthError, 83 | 24: FormatError, 84 | 25: RangeError, 85 | 26: NotaDBError, 86 | 27: NoticeError, 87 | 28: WarningError, 88 | 89 | 266: IOErrorRead, 90 | 522: IOErrorShortRead, 91 | 778: IOErrorWrite, 92 | } 93 | 94 | func errFromCode(code int) error { 95 | if code == 0 { 96 | return nil 97 | } 98 | err, ok := errMap[code] 99 | if ok { 100 | return err 101 | } 102 | 103 | return sqliteError{ 104 | code: code, 105 | text: "unknown err code", 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /internal/sqlite3vfs/file.go: -------------------------------------------------------------------------------- 1 | package sqlite3vfs 2 | 3 | import "fmt" 4 | 5 | type File interface { 6 | Close() error 7 | 8 | // ReadAt reads len(p) bytes into p starting at offset off in the underlying input source. 9 | // It returns the number of bytes read (0 <= n <= len(p)) and any error encountered. 10 | // If n < len(p), SQLITE_IOERR_SHORT_READ will be returned to sqlite. 11 | ReadAt(p []byte, off int64) (n int, err error) 12 | 13 | // WriteAt writes len(p) bytes from p to the underlying data stream at offset off. 14 | // It returns the number of bytes written from p (0 <= n <= len(p)) and any error encountered that caused the write to stop early. 15 | // WriteAt must return a non-nil error if it returns n < len(p). 16 | WriteAt(p []byte, off int64) (n int, err error) 17 | 18 | Truncate(size int64) error 19 | 20 | Sync(flag SyncType) error 21 | 22 | FileSize() (int64, error) 23 | 24 | // Acquire or upgrade a lock. 25 | // elock can be one of the following: 26 | // LockShared, LockReserved, LockPending, LockExclusive. 27 | // 28 | // Additional states can be inserted between the current lock level 29 | // and the requested lock level. The locking might fail on one of the later 30 | // transitions leaving the lock state different from what it started but 31 | // still short of its goal. The following chart shows the allowed 32 | // transitions and the inserted intermediate states: 33 | // 34 | // UNLOCKED -> SHARED 35 | // SHARED -> RESERVED 36 | // SHARED -> (PENDING) -> EXCLUSIVE 37 | // RESERVED -> (PENDING) -> EXCLUSIVE 38 | // PENDING -> EXCLUSIVE 39 | // 40 | // This function should only increase a lock level. 41 | // See the sqlite source documentation for unixLock for more details. 42 | Lock(elock LockType) error 43 | 44 | // Lower the locking level on file to eFileLock. eFileLock must be 45 | // either NO_LOCK or SHARED_LOCK. If the locking level of the file 46 | // descriptor is already at or below the requested locking level, 47 | // this routine is a no-op. 48 | Unlock(elock LockType) error 49 | 50 | // Check whether any database connection, either in this process or 51 | // in some other process, is holding a RESERVED, PENDING, or 52 | // EXCLUSIVE lock on the file. It returns true if such a lock exists 53 | // and false otherwise. 54 | CheckReservedLock() (bool, error) 55 | 56 | // SectorSize returns the sector size of the device that underlies 57 | // the file. The sector size is the minimum write that can be 58 | // performed without disturbing other bytes in the file. 59 | SectorSize() int64 60 | 61 | // DeviceCharacteristics returns a bit vector describing behaviors 62 | // of the underlying device. 63 | DeviceCharacteristics() DeviceCharacteristic 64 | } 65 | 66 | type SyncType int 67 | 68 | const ( 69 | SyncNormal SyncType = 0x00002 70 | SyncFull SyncType = 0x00003 71 | SyncDataOnly SyncType = 0x00010 72 | ) 73 | 74 | // https://www.sqlite.org/c3ref/c_lock_exclusive.html 75 | type LockType int 76 | 77 | const ( 78 | LockNone LockType = 0 79 | LockShared LockType = 1 80 | LockReserved LockType = 2 81 | LockPending LockType = 3 82 | LockExclusive LockType = 4 83 | ) 84 | 85 | func (lt LockType) String() string { 86 | switch lt { 87 | case LockNone: 88 | return "LockNone" 89 | case LockShared: 90 | return "LockShared" 91 | case LockReserved: 92 | return "LockReserved" 93 | case LockPending: 94 | return "LockPending" 95 | case LockExclusive: 96 | return "LockExclusive" 97 | default: 98 | return fmt.Sprintf("LockTypeUnknown<%d>", lt) 99 | } 100 | } 101 | 102 | // https://www.sqlite.org/c3ref/c_iocap_atomic.html 103 | type DeviceCharacteristic int 104 | 105 | const ( 106 | IocapAtomic DeviceCharacteristic = 0x00000001 107 | IocapAtomic512 DeviceCharacteristic = 0x00000002 108 | IocapAtomic1K DeviceCharacteristic = 0x00000004 109 | IocapAtomic2K DeviceCharacteristic = 0x00000008 110 | IocapAtomic4K DeviceCharacteristic = 0x00000010 111 | IocapAtomic8K DeviceCharacteristic = 0x00000020 112 | IocapAtomic16K DeviceCharacteristic = 0x00000040 113 | IocapAtomic32K DeviceCharacteristic = 0x00000080 114 | IocapAtomic64K DeviceCharacteristic = 0x00000100 115 | IocapSafeAppend DeviceCharacteristic = 0x00000200 116 | IocapSequential DeviceCharacteristic = 0x00000400 117 | IocapUndeletableWhenOpen DeviceCharacteristic = 0x00000800 118 | IocapPowersafeOverwrite DeviceCharacteristic = 0x00001000 119 | IocapImmutable DeviceCharacteristic = 0x00002000 120 | IocapBatchAtomic DeviceCharacteristic = 0x00004000 121 | ) 122 | -------------------------------------------------------------------------------- /internal/sqlite3vfs/options.go: -------------------------------------------------------------------------------- 1 | package sqlite3vfs 2 | 3 | type options struct { 4 | maxPathName int 5 | } 6 | 7 | type Option interface { 8 | setOption(*options) error 9 | } 10 | 11 | type maxPathOption struct { 12 | maxPath int 13 | } 14 | 15 | func (o maxPathOption) setOption(opts *options) error { 16 | opts.maxPathName = o.maxPath 17 | return nil 18 | } 19 | 20 | func WithMaxPathName(n int) Option { 21 | return maxPathOption{maxPath: n} 22 | } 23 | -------------------------------------------------------------------------------- /internal/sqlite3vfs/sqlite3vfs.c: -------------------------------------------------------------------------------- 1 | #include "sqlite3vfs.h" 2 | #include 3 | #include 4 | 5 | #ifdef SQLITE3VFS_LOADABLE_EXT 6 | SQLITE_EXTENSION_INIT1 7 | #endif 8 | 9 | extern int goVFSOpen(sqlite3_vfs *vfs, const char *name, sqlite3_file *file, int flags, int *outFlags); 10 | extern int goVFSDelete(sqlite3_vfs *, const char *zName, int syncDir); 11 | extern int goVFSAccess(sqlite3_vfs *, const char *zName, int flags, int *pResOut); 12 | extern int goVFSFullPathname(sqlite3_vfs *, const char *zName, int nOut, char *zOut); 13 | extern int goVFSRandomness(sqlite3_vfs *, int nByte, char *zOut); 14 | extern int goVFSSleep(sqlite3_vfs *, int microseconds); 15 | extern int goVFSCurrentTimeInt64(sqlite3_vfs *, sqlite3_int64 *piNow); 16 | 17 | extern int goVFSClose(sqlite3_file *file); 18 | extern int goVFSRead(sqlite3_file *file, void *buf, int iAmt, sqlite3_int64 iOfst); 19 | extern int goVFSWrite(sqlite3_file *file, const void *buf, int iAmt, sqlite3_int64 iOfst); 20 | extern int goVFSTruncate(sqlite3_file *file, sqlite3_int64 size); 21 | extern int goVFSSync(sqlite3_file *file, int flags); 22 | extern int goVFSFileSize(sqlite3_file *file, sqlite3_int64 *pSize); 23 | extern int goVFSLock(sqlite3_file *file, int eLock); 24 | extern int goVFSUnlock(sqlite3_file *, int eLock); 25 | extern int goVFSCheckReservedLock(sqlite3_file *file, int *pResOut); 26 | extern int goVFSSectorSize(sqlite3_file *file); 27 | extern int goVFSDeviceCharacteristics(sqlite3_file *file); 28 | 29 | int s3vfsNew(char *name, int maxPathName) 30 | { 31 | sqlite3_vfs *vfs; 32 | sqlite3_vfs *delegate; 33 | vfs = calloc(1, sizeof(sqlite3_vfs)); 34 | if (vfs == NULL) 35 | { 36 | return SQLITE_ERROR; 37 | } 38 | 39 | delegate = sqlite3_vfs_find(0); 40 | 41 | vfs->iVersion = 2; 42 | vfs->szOsFile = sizeof(s3vfsFile); 43 | vfs->mxPathname = maxPathName; 44 | vfs->zName = name; 45 | vfs->xOpen = s3vfsOpen; 46 | vfs->xDelete = s3vfsDelete; 47 | vfs->xAccess = s3vfsAccess; 48 | vfs->xFullPathname = s3vfsFullPathname; 49 | vfs->xDlOpen = delegate->xDlOpen; 50 | vfs->xDlError = delegate->xDlError; 51 | vfs->xDlSym = delegate->xDlSym; 52 | vfs->xDlClose = delegate->xDlClose; 53 | vfs->xRandomness = s3vfsRandomness; 54 | vfs->xSleep = s3vfsSleep; 55 | vfs->xCurrentTime = s3vfsCurrentTime; 56 | vfs->xGetLastError = delegate->xGetLastError; 57 | vfs->xCurrentTimeInt64 = s3vfsCurrentTimeInt64; 58 | 59 | return sqlite3_vfs_register(vfs, 0); 60 | } 61 | 62 | int s3vfsOpen(sqlite3_vfs *vfs, const char *name, sqlite3_file *file, int flags, int *outFlags) 63 | { 64 | int ret = goVFSOpen(vfs, name, file, flags, outFlags); 65 | file->pMethods = &s3vfs_io_methods; 66 | return ret; 67 | } 68 | 69 | int s3vfsDelete(sqlite3_vfs *vfs, const char *zName, int syncDir) 70 | { 71 | int ret = goVFSDelete(vfs, zName, syncDir); 72 | return ret; 73 | } 74 | 75 | int s3vfsAccess(sqlite3_vfs *vfs, const char *zName, int flags, int *pResOut) 76 | { 77 | return goVFSAccess(vfs, zName, flags, pResOut); 78 | } 79 | 80 | int s3vfsFullPathname(sqlite3_vfs *vfs, const char *zName, int nOut, char *zOut) 81 | { 82 | return goVFSFullPathname(vfs, zName, nOut, zOut); 83 | } 84 | 85 | int s3vfsRandomness(sqlite3_vfs *vfs, int nByte, char *zOut) 86 | { 87 | return goVFSRandomness(vfs, nByte, zOut); 88 | } 89 | 90 | int s3vfsSleep(sqlite3_vfs *vfs, int microseconds) 91 | { 92 | return goVFSSleep(vfs, microseconds); 93 | } 94 | 95 | int s3vfsCurrentTime(sqlite3_vfs *vfs, double *prNow) 96 | { 97 | sqlite3_int64 i = 0; 98 | int rc; 99 | rc = s3vfsCurrentTimeInt64(0, &i); 100 | *prNow = i / 86400000.0; 101 | return rc; 102 | } 103 | 104 | int s3vfsCurrentTimeInt64(sqlite3_vfs *vfs, sqlite3_int64 *piNow) 105 | { 106 | return goVFSCurrentTimeInt64(vfs, piNow); 107 | } 108 | 109 | int s3vfsClose(sqlite3_file *file) 110 | { 111 | return goVFSClose(file); 112 | } 113 | 114 | int s3vfsRead(sqlite3_file *file, void *zBuf, int iAmt, sqlite3_int64 iOfst) 115 | { 116 | return goVFSRead(file, zBuf, iAmt, iOfst); 117 | } 118 | 119 | int s3vfsWrite(sqlite3_file *file, const void *zBuf, int iAmt, sqlite3_int64 iOfst) 120 | { 121 | return goVFSWrite(file, zBuf, iAmt, iOfst); 122 | } 123 | 124 | int s3vfsTruncate(sqlite3_file *file, sqlite3_int64 size) 125 | { 126 | return goVFSTruncate(file, size); 127 | } 128 | 129 | int s3vfsSync(sqlite3_file *file, int flags) 130 | { 131 | return goVFSSync(file, flags); 132 | } 133 | 134 | int s3vfsFileSize(sqlite3_file *file, sqlite3_int64 *pSize) 135 | { 136 | return goVFSFileSize(file, pSize); 137 | } 138 | 139 | int s3vfsLock(sqlite3_file *file, int eLock) 140 | { 141 | return goVFSLock(file, eLock); 142 | } 143 | 144 | int s3vfsUnlock(sqlite3_file *file, int eLock) 145 | { 146 | return goVFSUnlock(file, eLock); 147 | } 148 | 149 | int s3vfsCheckReservedLock(sqlite3_file *file, int *pResOut) 150 | { 151 | return goVFSCheckReservedLock(file, pResOut); 152 | } 153 | 154 | int s3vfsSectorSize(sqlite3_file *file) 155 | { 156 | return goVFSSectorSize(file); 157 | } 158 | 159 | int s3vfsDeviceCharacteristics(sqlite3_file *file) 160 | { 161 | return goVFSDeviceCharacteristics(file); 162 | } 163 | 164 | int s3vfsFileControl(sqlite3_file *pFile, int op, void *pArg) 165 | { 166 | return SQLITE_NOTFOUND; 167 | } 168 | 169 | const sqlite3_io_methods s3vfs_io_methods = { 170 | 1, /* iVersion */ 171 | s3vfsClose, /* xClose */ 172 | s3vfsRead, /* xRead */ 173 | s3vfsWrite, /* xWrite */ 174 | s3vfsTruncate, /* xTruncate */ 175 | s3vfsSync, /* xSync */ 176 | s3vfsFileSize, /* xFileSize */ 177 | s3vfsLock, /* xLock */ 178 | s3vfsUnlock, /* xUnlock */ 179 | s3vfsCheckReservedLock, /* xCheckReservedLock */ 180 | s3vfsFileControl, /* xFileControl */ 181 | s3vfsSectorSize, /* xSectorSize */ 182 | s3vfsDeviceCharacteristics, /* xDeviceCharacteristics */ 183 | }; 184 | -------------------------------------------------------------------------------- /internal/sqlite3vfs/sqlite3vfs.go: -------------------------------------------------------------------------------- 1 | package sqlite3vfs 2 | 3 | import "time" 4 | 5 | // Register a VFS with sqlite. The name specified must be unique and 6 | // should match the name given when opening the database: 7 | // `?vfs={{name}}`. 8 | func RegisterVFS(name string, vfs VFS, opts ...Option) error { 9 | var vfsOpts options 10 | 11 | for _, opt := range opts { 12 | err := opt.setOption(&vfsOpts) 13 | if err != nil { 14 | return err 15 | } 16 | } 17 | 18 | extVFS, ok := vfs.(ExtendedVFSv1) 19 | if !ok { 20 | extVFS = &defaultVFSv1{vfs} 21 | } 22 | 23 | maxPathName := vfsOpts.maxPathName 24 | if maxPathName == 0 { 25 | maxPathName = 1024 26 | } 27 | 28 | return newVFS(name, extVFS, maxPathName) 29 | } 30 | 31 | type VFS interface { 32 | // Open a file. 33 | // Name will either be the name of the file to open or "" for a temp file. 34 | Open(name string, flags OpenFlag) (File, OpenFlag, error) 35 | 36 | // Delete the named file. If dirSync is true them ensure the file-system 37 | // modification has been synced to disk before returning. 38 | Delete(name string, dirSync bool) error 39 | 40 | // Test for access permission. Returns true if the requested permission is available. 41 | Access(name string, flags AccessFlag) (bool, error) 42 | 43 | // FullPathname returns the canonicalized version of name. 44 | FullPathname(name string) string 45 | } 46 | 47 | type ExtendedVFSv1 interface { 48 | VFS 49 | 50 | // Randomness populates n with pseudo-random data. 51 | // Returns the number of bytes of randomness obtained. 52 | Randomness(n []byte) int 53 | 54 | // Sleep for duration 55 | Sleep(d time.Duration) 56 | 57 | CurrentTime() time.Time 58 | } 59 | 60 | type OpenFlag int 61 | 62 | const ( 63 | OpenReadOnly OpenFlag = 0x00000001 64 | OpenReadWrite OpenFlag = 0x00000002 65 | OpenCreate OpenFlag = 0x00000004 66 | OpenDeleteOnClose OpenFlag = 0x00000008 67 | OpenExclusive OpenFlag = 0x00000010 68 | OpenAutoProxy OpenFlag = 0x00000020 69 | OpenURI OpenFlag = 0x00000040 70 | OpenMemory OpenFlag = 0x00000080 71 | OpenMainDB OpenFlag = 0x00000100 72 | OpenTempDB OpenFlag = 0x00000200 73 | OpenTransientDB OpenFlag = 0x00000400 74 | OpenMainJournal OpenFlag = 0x00000800 75 | OpenTempJournal OpenFlag = 0x00001000 76 | OpenSubJournal OpenFlag = 0x00002000 77 | OpenSuperJournal OpenFlag = 0x00004000 78 | OpenNoMutex OpenFlag = 0x00008000 79 | OpenFullMutex OpenFlag = 0x00010000 80 | OpenSharedCache OpenFlag = 0x00020000 81 | OpenPrivateCache OpenFlag = 0x00040000 82 | OpenWAL OpenFlag = 0x00080000 83 | OpenNoFollow OpenFlag = 0x01000000 84 | ) 85 | 86 | type AccessFlag int 87 | 88 | const ( 89 | AccessExists AccessFlag = 0 // Does the file exist? 90 | AccessReadWrite AccessFlag = 1 // Is the file both readable and writeable? 91 | AccessRead AccessFlag = 2 // Is the file readable? 92 | ) 93 | -------------------------------------------------------------------------------- /internal/sqlite3vfs/sqlite3vfs.h: -------------------------------------------------------------------------------- 1 | #ifndef SQLITE3VFS_H 2 | #define SQLITE3VFS_H 3 | 4 | #ifdef SQLITE3VFS_LOADABLE_EXT 5 | #include "sqlite3ext.h" 6 | #else 7 | #include "sqlite3-binding.h" 8 | #endif 9 | 10 | typedef struct s3vfsFile 11 | { 12 | sqlite3_file base; /* IO methods */ 13 | sqlite3_uint64 id; /* Go object id */ 14 | } s3vfsFile; 15 | 16 | int s3vfsNew(char *name, int maxPathName); 17 | 18 | int s3vfsClose(sqlite3_file *); 19 | int s3vfsRead(sqlite3_file *, void *, int iAmt, sqlite3_int64 iOfst); 20 | int s3vfsWrite(sqlite3_file *, const void *, int iAmt, sqlite3_int64 iOfst); 21 | int s3vfsTruncate(sqlite3_file *, sqlite3_int64 size); 22 | int s3vfsSync(sqlite3_file *, int flags); 23 | int s3vfsFileSize(sqlite3_file *, sqlite3_int64 *pSize); 24 | int s3vfsLock(sqlite3_file *, int); 25 | int s3vfsUnlock(sqlite3_file *, int); 26 | int s3vfsCheckReservedLock(sqlite3_file *, int *pResOut); 27 | int s3vfsFileControl(sqlite3_file *, int op, void *pArg); 28 | int s3vfsSectorSize(sqlite3_file *); 29 | int s3vfsDeviceCharacteristics(sqlite3_file *); 30 | int s3vfsShmMap(sqlite3_file *, int iPg, int pgsz, int, void volatile **); 31 | int s3vfsShmLock(sqlite3_file *, int offset, int n, int flags); 32 | void s3vfsShmBarrier(sqlite3_file *); 33 | int s3vfsShmUnmap(sqlite3_file *, int deleteFlag); 34 | int s3vfsFetch(sqlite3_file *, sqlite3_int64 iOfst, int iAmt, void **pp); 35 | int s3vfsUnfetch(sqlite3_file *, sqlite3_int64 iOfst, void *p); 36 | 37 | int s3vfsOpen(sqlite3_vfs *, const char *, sqlite3_file *, int, int *); 38 | int s3vfsDelete(sqlite3_vfs *, const char *, int); 39 | int s3vfsAccess(sqlite3_vfs *, const char *, int, int *); 40 | int s3vfsFullPathname(sqlite3_vfs *, const char *zName, int, char *zOut); 41 | void *s3vfsDlOpen(sqlite3_vfs *, const char *zFilename); 42 | void s3vfsDlError(sqlite3_vfs *, int nByte, char *zErrMsg); 43 | void (*s3vfsDlSym(sqlite3_vfs *pVfs, void *p, const char *zSym))(void); 44 | void s3vfsDlClose(sqlite3_vfs *, void *); 45 | int s3vfsRandomness(sqlite3_vfs *, int nByte, char *zOut); 46 | int s3vfsSleep(sqlite3_vfs *, int microseconds); 47 | int s3vfsCurrentTime(sqlite3_vfs *, double *); 48 | int s3vfsGetLastError(sqlite3_vfs *, int, char *); 49 | int s3vfsCurrentTimeInt64(sqlite3_vfs *, sqlite3_int64 *); 50 | 51 | const extern sqlite3_io_methods s3vfs_io_methods; 52 | 53 | #endif /* SQLITE3_VFS */ 54 | -------------------------------------------------------------------------------- /internal/sqlite3vfs/sqlite3vfs_loadable_ext.go: -------------------------------------------------------------------------------- 1 | //go:build SQLITE3VFS_LOADABLE_EXT 2 | // +build SQLITE3VFS_LOADABLE_EXT 3 | 4 | package sqlite3vfs 5 | 6 | /* 7 | #cgo CFLAGS: -DSQLITE3VFS_LOADABLE_EXT=1 8 | */ 9 | 10 | import "C" 11 | -------------------------------------------------------------------------------- /internal/sqlite3vfs/sqlite3vfs_normal.go: -------------------------------------------------------------------------------- 1 | package sqlite3vfs 2 | 3 | /* 4 | #cgo darwin LDFLAGS: -Wl,-undefined,dynamic_lookup 5 | */ 6 | 7 | import "C" 8 | -------------------------------------------------------------------------------- /internal/sqlite3vfs/sqlite3vfscgo.go: -------------------------------------------------------------------------------- 1 | package sqlite3vfs 2 | 3 | /* 4 | #include "sqlite3vfs.h" 5 | #include 6 | #include 7 | */ 8 | import "C" 9 | 10 | import ( 11 | "io" 12 | "sync" 13 | "time" 14 | "unsafe" 15 | ) 16 | 17 | var ( 18 | vfsMap = make(map[string]ExtendedVFSv1) 19 | 20 | fileMux sync.Mutex 21 | nextFileID uint64 22 | fileMap = make(map[uint64]File) 23 | ) 24 | 25 | func newVFS(name string, goVFS ExtendedVFSv1, maxPathName int) error { 26 | vfsMap[name] = goVFS 27 | 28 | rc := C.s3vfsNew(C.CString(name), C.int(maxPathName)) 29 | if rc == C.SQLITE_OK { 30 | return nil 31 | } 32 | 33 | return errFromCode(int(rc)) 34 | } 35 | 36 | //export goVFSOpen 37 | func goVFSOpen(cvfs *C.sqlite3_vfs, name *C.char, retFile *C.sqlite3_file, flags C.int, outFlags *C.int) C.int { 38 | fileName := C.GoString(name) 39 | 40 | vfs := vfsFromC(cvfs) 41 | 42 | file, retFlags, err := vfs.Open(fileName, OpenFlag(flags)) 43 | if err != nil { 44 | return errToC(err) 45 | } 46 | 47 | if retFlags != 0 && outFlags != nil { 48 | *outFlags = C.int(retFlags) 49 | } 50 | 51 | cfile := (*C.s3vfsFile)(unsafe.Pointer(retFile)) 52 | C.memset(unsafe.Pointer(cfile), 0, C.sizeof_s3vfsFile) 53 | 54 | fileMux.Lock() 55 | fileID := nextFileID 56 | nextFileID++ 57 | cfile.id = C.sqlite3_uint64(fileID) 58 | fileMap[fileID] = file 59 | fileMux.Unlock() 60 | 61 | return sqliteOK 62 | } 63 | 64 | //export goVFSDelete 65 | func goVFSDelete(cvfs *C.sqlite3_vfs, zName *C.char, syncDir C.int) C.int { 66 | vfs := vfsFromC(cvfs) 67 | 68 | fileName := C.GoString(zName) 69 | 70 | err := vfs.Delete(fileName, syncDir > 0) 71 | if err != nil { 72 | return errToC(err) 73 | } 74 | 75 | return sqliteOK 76 | } 77 | 78 | // The flags argument to xAccess() may be SQLITE_ACCESS_EXISTS to test 79 | // for the existence of a file, or SQLITE_ACCESS_READWRITE to test 80 | // whether a file is readable and writable, or SQLITE_ACCESS_READ to 81 | // test whether a file is at least readable. The SQLITE_ACCESS_READ 82 | // flag is never actually used and is not implemented in the built-in 83 | // VFSes of SQLite. The file is named by the second argument and can 84 | // be a directory. The xAccess method returns SQLITE_OK on success or 85 | // some non-zero error code if there is an I/O error or if the name of 86 | // the file given in the second argument is illegal. If SQLITE_OK is 87 | // returned, then non-zero or zero is written into *pResOut to 88 | // indicate whether or not the file is accessible. 89 | // 90 | //export goVFSAccess 91 | func goVFSAccess(cvfs *C.sqlite3_vfs, zName *C.char, cflags C.int, pResOut *C.int) C.int { 92 | vfs := vfsFromC(cvfs) 93 | 94 | fileName := C.GoString(zName) 95 | flags := AccessFlag(cflags) 96 | 97 | ok, err := vfs.Access(fileName, flags) 98 | 99 | out := 0 100 | if ok { 101 | out = 1 102 | } 103 | *pResOut = C.int(out) 104 | 105 | if err != nil { 106 | return errToC(err) 107 | } 108 | 109 | return sqliteOK 110 | } 111 | 112 | //export goVFSFullPathname 113 | func goVFSFullPathname(cvfs *C.sqlite3_vfs, zName *C.char, nOut C.int, zOut *C.char) C.int { 114 | vfs := vfsFromC(cvfs) 115 | 116 | fileName := C.GoString(zName) 117 | 118 | s := vfs.FullPathname(fileName) 119 | 120 | path := C.CString(s) 121 | defer C.free(unsafe.Pointer(path)) 122 | 123 | if len(s)+1 >= int(nOut) { 124 | return errToC(TooBigError) 125 | } 126 | 127 | C.memcpy(unsafe.Pointer(zOut), unsafe.Pointer(path), C.size_t(len(s)+1)) 128 | 129 | return sqliteOK 130 | } 131 | 132 | //export goVFSRandomness 133 | func goVFSRandomness(cvfs *C.sqlite3_vfs, nByte C.int, zOut *C.char) C.int { 134 | vfs := vfsFromC(cvfs) 135 | 136 | buf := (*[1 << 28]byte)(unsafe.Pointer(zOut))[:int(nByte):int(nByte)] 137 | 138 | count := vfs.Randomness(buf) 139 | return C.int(count) 140 | } 141 | 142 | //export goVFSSleep 143 | func goVFSSleep(cvfs *C.sqlite3_vfs, microseconds C.int) C.int { 144 | vfs := vfsFromC(cvfs) 145 | 146 | d := time.Duration(microseconds) * time.Microsecond 147 | 148 | vfs.Sleep(d) 149 | 150 | return sqliteOK 151 | } 152 | 153 | // Find the current time (in Universal Coordinated Time). Write into *piNow 154 | // the current time and date as a Julian Day number times 86_400_000. In 155 | // other words, write into *piNow the number of milliseconds since the Julian 156 | // epoch of noon in Greenwich on November 24, 4714 B.C according to the 157 | // proleptic Gregorian calendar. 158 | 159 | // On success, return SQLITE_OK. Return SQLITE_ERROR if the time and date 160 | // cannot be found. 161 | // 162 | //export goVFSCurrentTimeInt64 163 | func goVFSCurrentTimeInt64(cvfs *C.sqlite3_vfs, piNow *C.sqlite3_int64) C.int { 164 | vfs := vfsFromC(cvfs) 165 | 166 | ts := vfs.CurrentTime() 167 | 168 | unixEpoch := int64(24405875) * 8640000 169 | *piNow = C.sqlite3_int64(unixEpoch + ts.UnixNano()/1000000) 170 | 171 | return sqliteOK 172 | } 173 | 174 | //export goVFSClose 175 | func goVFSClose(cfile *C.sqlite3_file) C.int { 176 | s3vfsFile := (*C.s3vfsFile)(unsafe.Pointer(cfile)) 177 | 178 | fileID := uint64(s3vfsFile.id) 179 | 180 | fileMux.Lock() 181 | file := fileMap[fileID] 182 | delete(fileMap, fileID) 183 | fileMux.Unlock() 184 | 185 | if file == nil { 186 | return errToC(GenericError) 187 | } 188 | 189 | err := file.Close() 190 | if err != nil { 191 | return errToC(err) 192 | } 193 | 194 | return sqliteOK 195 | } 196 | 197 | //export goVFSRead 198 | func goVFSRead(cfile *C.sqlite3_file, buf unsafe.Pointer, iAmt C.int, iOfst C.sqlite3_int64) C.int { 199 | s3vfsFile := (*C.s3vfsFile)(unsafe.Pointer(cfile)) 200 | 201 | fileID := uint64(s3vfsFile.id) 202 | 203 | fileMux.Lock() 204 | file := fileMap[fileID] 205 | fileMux.Unlock() 206 | 207 | if file == nil { 208 | return errToC(GenericError) 209 | } 210 | 211 | goBuf := (*[1 << 28]byte)(buf)[:int(iAmt):int(iAmt)] 212 | n, err := file.ReadAt(goBuf, int64(iOfst)) 213 | if n < len(goBuf) { 214 | if err == nil { 215 | // io.ReadAt requires an error if n < len(goBuf) 216 | panic("ReadAt invalid semantics: returned n < len(p) but with a nil error") 217 | } 218 | // If xRead() returns SQLITE_IOERR_SHORT_READ it must also fill in the unread portions of the buffer with zeros. 219 | for i := n; i < len(goBuf); i++ { 220 | goBuf[i] = 0 221 | } 222 | } 223 | 224 | if err == io.EOF { 225 | return errToC(IOErrorShortRead) 226 | } else if err != nil { 227 | return errToC(err) 228 | } 229 | 230 | return sqliteOK 231 | } 232 | 233 | //export goVFSWrite 234 | func goVFSWrite(cfile *C.sqlite3_file, buf unsafe.Pointer, iAmt C.int, iOfst C.sqlite3_int64) C.int { 235 | s3vfsFile := (*C.s3vfsFile)(unsafe.Pointer(cfile)) 236 | 237 | fileID := uint64(s3vfsFile.id) 238 | 239 | fileMux.Lock() 240 | file := fileMap[fileID] 241 | fileMux.Unlock() 242 | 243 | if file == nil { 244 | return errToC(GenericError) 245 | } 246 | 247 | goBuf := (*[1 << 28]byte)(buf)[:int(iAmt):int(iAmt)] 248 | 249 | _, err := file.WriteAt(goBuf, int64(iOfst)) 250 | if err != nil { 251 | return errToC(IOErrorWrite) 252 | } 253 | 254 | return sqliteOK 255 | } 256 | 257 | //export goVFSTruncate 258 | func goVFSTruncate(cfile *C.sqlite3_file, size C.sqlite3_int64) C.int { 259 | s3vfsFile := (*C.s3vfsFile)(unsafe.Pointer(cfile)) 260 | 261 | fileID := uint64(s3vfsFile.id) 262 | 263 | fileMux.Lock() 264 | file := fileMap[fileID] 265 | fileMux.Unlock() 266 | 267 | if file == nil { 268 | return errToC(GenericError) 269 | } 270 | 271 | err := file.Truncate(int64(size)) 272 | if err != nil { 273 | return errToC(err) 274 | } 275 | 276 | return sqliteOK 277 | } 278 | 279 | //export goVFSSync 280 | func goVFSSync(cfile *C.sqlite3_file, flags C.int) C.int { 281 | s3vfsFile := (*C.s3vfsFile)(unsafe.Pointer(cfile)) 282 | 283 | fileID := uint64(s3vfsFile.id) 284 | 285 | fileMux.Lock() 286 | file := fileMap[fileID] 287 | fileMux.Unlock() 288 | 289 | if file == nil { 290 | return errToC(GenericError) 291 | } 292 | 293 | err := file.Sync(SyncType(flags)) 294 | if err != nil { 295 | return errToC(err) 296 | } 297 | 298 | return sqliteOK 299 | } 300 | 301 | //export goVFSFileSize 302 | func goVFSFileSize(cfile *C.sqlite3_file, pSize *C.sqlite3_int64) C.int { 303 | s3vfsFile := (*C.s3vfsFile)(unsafe.Pointer(cfile)) 304 | 305 | fileID := uint64(s3vfsFile.id) 306 | 307 | fileMux.Lock() 308 | file := fileMap[fileID] 309 | fileMux.Unlock() 310 | 311 | if file == nil { 312 | return errToC(GenericError) 313 | } 314 | 315 | n, err := file.FileSize() 316 | if err != nil { 317 | return errToC(err) 318 | } 319 | 320 | *pSize = C.sqlite3_int64(n) 321 | 322 | return sqliteOK 323 | } 324 | 325 | //export goVFSLock 326 | func goVFSLock(cfile *C.sqlite3_file, eLock C.int) C.int { 327 | s3vfsFile := (*C.s3vfsFile)(unsafe.Pointer(cfile)) 328 | 329 | fileID := uint64(s3vfsFile.id) 330 | 331 | fileMux.Lock() 332 | file := fileMap[fileID] 333 | fileMux.Unlock() 334 | 335 | if file == nil { 336 | return errToC(GenericError) 337 | } 338 | 339 | err := file.Lock(LockType(eLock)) 340 | if err != nil { 341 | return errToC(err) 342 | } 343 | 344 | return sqliteOK 345 | } 346 | 347 | //export goVFSUnlock 348 | func goVFSUnlock(cfile *C.sqlite3_file, eLock C.int) C.int { 349 | s3vfsFile := (*C.s3vfsFile)(unsafe.Pointer(cfile)) 350 | 351 | fileID := uint64(s3vfsFile.id) 352 | 353 | fileMux.Lock() 354 | file := fileMap[fileID] 355 | fileMux.Unlock() 356 | 357 | if file == nil { 358 | return errToC(GenericError) 359 | } 360 | 361 | err := file.Unlock(LockType(eLock)) 362 | if err != nil { 363 | return errToC(err) 364 | } 365 | 366 | return sqliteOK 367 | } 368 | 369 | //export goVFSCheckReservedLock 370 | func goVFSCheckReservedLock(cfile *C.sqlite3_file, pResOut *C.int) C.int { 371 | s3vfsFile := (*C.s3vfsFile)(unsafe.Pointer(cfile)) 372 | 373 | fileID := uint64(s3vfsFile.id) 374 | 375 | fileMux.Lock() 376 | file := fileMap[fileID] 377 | fileMux.Unlock() 378 | 379 | if file == nil { 380 | return errToC(GenericError) 381 | } 382 | 383 | locked, err := file.CheckReservedLock() 384 | if err != nil { 385 | return errToC(err) 386 | } 387 | 388 | if locked { 389 | *pResOut = C.int(0) 390 | } else { 391 | *pResOut = C.int(1) 392 | } 393 | 394 | return sqliteOK 395 | } 396 | 397 | //export goVFSSectorSize 398 | func goVFSSectorSize(cfile *C.sqlite3_file) C.int { 399 | s3vfsFile := (*C.s3vfsFile)(unsafe.Pointer(cfile)) 400 | 401 | fileID := uint64(s3vfsFile.id) 402 | 403 | fileMux.Lock() 404 | file := fileMap[fileID] 405 | fileMux.Unlock() 406 | 407 | if file == nil { 408 | return 1024 409 | } 410 | 411 | return C.int(file.SectorSize()) 412 | } 413 | 414 | //export goVFSDeviceCharacteristics 415 | func goVFSDeviceCharacteristics(cfile *C.sqlite3_file) C.int { 416 | s3vfsFile := (*C.s3vfsFile)(unsafe.Pointer(cfile)) 417 | 418 | fileID := uint64(s3vfsFile.id) 419 | 420 | fileMux.Lock() 421 | file := fileMap[fileID] 422 | fileMux.Unlock() 423 | 424 | if file == nil { 425 | return 0 426 | } 427 | 428 | return C.int(file.DeviceCharacteristics()) 429 | } 430 | 431 | func vfsFromC(cvfs *C.sqlite3_vfs) ExtendedVFSv1 { 432 | vfsName := C.GoString(cvfs.zName) 433 | return vfsMap[vfsName] 434 | } 435 | 436 | func errToC(err error) C.int { 437 | if e, ok := err.(sqliteError); ok { 438 | return C.int(e.code) 439 | } 440 | return C.int(GenericError.code) 441 | } 442 | -------------------------------------------------------------------------------- /internal/sqlparser/sqlparser.go: -------------------------------------------------------------------------------- 1 | package sqlparser 2 | 3 | // import ( 4 | // "github.com/pingcap/parser" 5 | // "github.com/pingcap/parser/model" 6 | // "github.com/pingcap/parser/mysql" 7 | // ) 8 | 9 | // func IsValidSQL(sqlStatement string) bool { 10 | // _, err := parser.New().ParseOneStmt(sqlStatement) 11 | // return err == nil 12 | // } 13 | -------------------------------------------------------------------------------- /internal/utils/const.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "crypto/rand" 5 | "math/big" 6 | 7 | "github.com/google/uuid" 8 | ) 9 | 10 | const VERSION = "v1.0" 11 | const EmptyNil = "nil" 12 | 13 | var u = uuid.New() 14 | 15 | const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" 16 | 17 | func stringWithCharset(length int) (string, error) { 18 | b := make([]byte, length) 19 | for i := range b { 20 | num, err := rand.Int(rand.Reader, big.NewInt(int64(len(charset)))) 21 | if err != nil { 22 | return "", err 23 | } 24 | b[i] = charset[num.Int64()] 25 | } 26 | return string(b), nil 27 | } 28 | -------------------------------------------------------------------------------- /internal/utils/environ.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "errors" 5 | "os" 6 | "strings" 7 | ) 8 | 9 | func split(s string) (string, string, error) { 10 | arr := strings.Split(s, "=") 11 | if len(arr) <= 0 { 12 | return "", "", errors.New("environ invalid") 13 | } else if len(arr) == 1 { 14 | return arr[0], "", nil 15 | } else { 16 | return arr[0], arr[1], nil 17 | } 18 | } 19 | 20 | // GetEnvironInfo get all environment map 21 | func GetEnvironInfo() map[string]string { 22 | env := make(map[string]string, 4) 23 | environ := os.Environ() 24 | for k := range environ { 25 | if k1, v1, err := split(environ[k]); err == nil { 26 | env[k1] = v1 27 | } 28 | } 29 | 30 | return env 31 | } 32 | 33 | var environInfo = GetEnvironInfo() 34 | 35 | // GetEnviron only to read environment 36 | func GetEnviron(name string) string { 37 | if v, ok := environInfo[name]; ok { 38 | return v 39 | } 40 | 41 | return EmptyNil 42 | } 43 | -------------------------------------------------------------------------------- /internal/utils/roundtrip.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "net/http" 5 | "path/filepath" 6 | ) 7 | 8 | type RoundTripper struct { 9 | referer string 10 | userAgent string 11 | } 12 | 13 | func (rt *RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { 14 | if rt.referer != "" { 15 | req.Header.Set("Referer", rt.referer) 16 | } 17 | 18 | if rt.userAgent != "" { 19 | req.Header.Set("User-Agent", rt.userAgent) 20 | } 21 | 22 | tr := http.DefaultTransport 23 | 24 | if req.URL.Scheme == "file" { 25 | path := req.URL.Path 26 | root := filepath.Dir(path) 27 | base := filepath.Base(path) 28 | tr = http.NewFileTransport(http.Dir(root)) 29 | req.URL.Path = base 30 | } 31 | 32 | return tr.RoundTrip(req) 33 | } 34 | -------------------------------------------------------------------------------- /internal/utils/secretkey.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "crypto/rc4" 5 | "encoding/hex" 6 | "fmt" 7 | "strings" 8 | ) 9 | 10 | var slatKey = GetEnviron("LESSDB_SLATKEY") 11 | 12 | func rc4Cipher(plaintext, key []byte) ([]byte, error) { 13 | if len(key) < 32 { 14 | zerokey := make([]byte, 32-len(key)) 15 | key = append(key, zerokey...) 16 | } 17 | 18 | cipher, err := rc4.NewCipher(key) 19 | if err != nil { 20 | return nil, err 21 | } 22 | 23 | encrypted := make([]byte, len(plaintext)) 24 | cipher.XORKeyStream(encrypted, plaintext) 25 | return encrypted, nil 26 | } 27 | 28 | func rc4Open(encrypted, key []byte) ([]byte, error) { 29 | if len(key) < 32 { 30 | zerokey := make([]byte, 32-len(key)) 31 | key = append(key, zerokey...) 32 | } 33 | 34 | cipher, err := rc4.NewCipher(key) 35 | if err != nil { 36 | return nil, err 37 | } 38 | 39 | decrypted := make([]byte, len(encrypted)) 40 | cipher.XORKeyStream(decrypted, encrypted) 41 | 42 | return decrypted, nil 43 | } 44 | 45 | // NewRandomName create random name 46 | func NewRandomName() (string, string, error) { 47 | return randomName(20) 48 | } 49 | 50 | // NewRandomKey create random key 51 | func NewRandomKey() (string, string, error) { 52 | return randomName(64) 53 | } 54 | 55 | func VerifyKey(ciphertexthex string) (string, bool) { 56 | ciphertext, err := hex.DecodeString(ciphertexthex) 57 | if err != nil { 58 | return "", false 59 | } 60 | 61 | plaintext, err := rc4Open(ciphertext, []byte(slatKey)) 62 | if err != nil { 63 | return "", false 64 | } 65 | 66 | nameArr := strings.Split(string(plaintext), "-") 67 | if len(nameArr) == 3 && nameArr[1] == slatKey { 68 | return nameArr[2], true 69 | } 70 | return "", false 71 | } 72 | 73 | func randomName(l int) (string, string, error) { 74 | name, err := stringWithCharset(l) 75 | if err != nil { 76 | return "", "", err 77 | } 78 | 79 | ciphertext, err := rc4Cipher( 80 | []byte(fmt.Sprintf("%v-%v-%v", VERSION, slatKey, name)), 81 | []byte(slatKey)) 82 | return hex.EncodeToString(ciphertext), name, err 83 | } 84 | -------------------------------------------------------------------------------- /internal/utils/sum.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha256" 6 | "encoding/hex" 7 | "io" 8 | "os" 9 | ) 10 | 11 | // SHA256Sum is a SHA256 hash. 12 | type SHA256Sum []byte 13 | 14 | // String returns the hex-encoded string representation of the SHA256Sum. 15 | func (s SHA256Sum) String() string { 16 | return hex.EncodeToString(s) 17 | } 18 | 19 | // Equals returns true if the SHA256Sum is equal to the other. 20 | func (s SHA256Sum) Equals(other SHA256Sum) bool { 21 | return bytes.Equal(s, other) 22 | } 23 | 24 | // FileSHA256 returns the SHA256 hash of the file at the given path. 25 | func FileSHA256(filePath string) (SHA256Sum, error) { 26 | file, err := os.Open(filePath) 27 | if err != nil { 28 | return nil, err 29 | } 30 | defer file.Close() 31 | 32 | hasher := sha256.New() 33 | if _, err := io.Copy(hasher, file); err != nil { 34 | return nil, err 35 | } 36 | 37 | hash := hasher.Sum(nil) 38 | return SHA256Sum(hash), nil 39 | } 40 | -------------------------------------------------------------------------------- /internal/utils/sum_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha256" 6 | "io" 7 | "io/ioutil" 8 | "os" 9 | "testing" 10 | ) 11 | 12 | func TestSHA256SumString(t *testing.T) { 13 | tests := []struct { 14 | name string 15 | input SHA256Sum 16 | expected string 17 | }{ 18 | { 19 | name: "empty hash", 20 | input: SHA256Sum([]byte{}), 21 | expected: "", 22 | }, 23 | { 24 | name: "non-empty hash", 25 | input: SHA256Sum([]byte{0x12, 0x34, 0x56}), 26 | expected: "123456", 27 | }, 28 | } 29 | 30 | for _, tt := range tests { 31 | t.Run(tt.name, func(t *testing.T) { 32 | output := tt.input.String() 33 | if output != tt.expected { 34 | t.Errorf("Expected: %s, got: %s", tt.expected, output) 35 | } 36 | }) 37 | } 38 | } 39 | 40 | func TestSHA256SumEquals(t *testing.T) { 41 | tests := []struct { 42 | name string 43 | input1 SHA256Sum 44 | input2 SHA256Sum 45 | expected bool 46 | }{ 47 | { 48 | name: "equal hashes", 49 | input1: SHA256Sum([]byte{0x12, 0x34, 0x56}), 50 | input2: SHA256Sum([]byte{0x12, 0x34, 0x56}), 51 | expected: true, 52 | }, 53 | { 54 | name: "unequal hashes", 55 | input1: SHA256Sum([]byte{0x12, 0x34, 0x56}), 56 | input2: SHA256Sum([]byte{0x12, 0x34, 0x57}), 57 | expected: false, 58 | }, 59 | { 60 | name: "unequal hashes with one being nil", 61 | input1: SHA256Sum([]byte{0x12, 0x34, 0x56}), 62 | input2: nil, 63 | expected: false, 64 | }, 65 | } 66 | 67 | for _, tt := range tests { 68 | t.Run(tt.name, func(t *testing.T) { 69 | output := tt.input1.Equals(tt.input2) 70 | if output != tt.expected { 71 | t.Errorf("Expected: %v, got: %v", tt.expected, output) 72 | } 73 | }) 74 | } 75 | } 76 | 77 | func TestFileSha256(t *testing.T) { 78 | data := []byte("Test file content") 79 | tempFileName := mustWriteDataTempFile(data) 80 | defer os.Remove(tempFileName) 81 | 82 | // Calculate the SHA256 sum of the file contents using crypto/sha256 calls 83 | hasher := sha256.New() 84 | if _, err := io.Copy(hasher, bytes.NewReader(data)); err != nil { 85 | t.Fatalf("Error calculating hash with crypto/sha256: %v", err) 86 | } 87 | expectedHash := hasher.Sum(nil) 88 | 89 | // Call fileSha256 and check that it returns the same hash as the direct call 90 | hash, err := FileSHA256(tempFileName) 91 | if err != nil { 92 | t.Fatalf("Error calling fileSha256: %v", err) 93 | } 94 | 95 | if !hash.Equals(SHA256Sum(expectedHash)) { 96 | t.Errorf("Expected: %x, got: %s", expectedHash, hash) 97 | } 98 | } 99 | 100 | func mustWriteDataTempFile(data []byte) string { 101 | tempFile, err := ioutil.TempFile("", "uploader_test") 102 | if err != nil { 103 | panic("Error creating temp file: " + err.Error()) 104 | } 105 | 106 | if _, err := tempFile.Write(data); err != nil { 107 | panic("Error writing to temp file: " + err.Error()) 108 | } 109 | 110 | if err := tempFile.Close(); err != nil { 111 | panic("Error closing temp file: " + err.Error()) 112 | } 113 | 114 | return tempFile.Name() 115 | } 116 | -------------------------------------------------------------------------------- /internal/vfsextend/diskcache.go: -------------------------------------------------------------------------------- 1 | package vfsextend 2 | 3 | import ( 4 | "github.com/linkxzhou/LessDB/internal/prom" 5 | "github.com/linkxzhou/LessDB/internal/utils" 6 | 7 | "fmt" 8 | "os" 9 | "strconv" 10 | "sync" 11 | "time" 12 | ) 13 | 14 | const DefaultPageSize = 1 << 12 15 | const DefaultNoCacheSize = -1 16 | 17 | var envPageSize = utils.GetEnviron("LESSDB_CACHE_PAGESIZE") 18 | var envPeriodStr = utils.GetEnviron("LESSDB_CACHE_PERIOD") 19 | var defaultPeriod = 1000 * time.Millisecond 20 | 21 | type ( 22 | DiskCacheHandler struct { 23 | PageSize int 24 | 25 | fileHandler FileHandler 26 | 27 | fileName string 28 | fileSize int64 29 | fileSizeUpdateTime time.Time 30 | 31 | f *os.File 32 | pages sync.Map 33 | 34 | cacheEtag string 35 | cacheHit int 36 | cacheMiss int 37 | 38 | rwMutex sync.RWMutex 39 | } 40 | 41 | FileHandler func(string) (*os.File, error) 42 | ) 43 | 44 | func NewDiskCache(fileHandler FileHandler, fileSize int64) *DiskCacheHandler { 45 | h := &DiskCacheHandler{ 46 | fileHandler: fileHandler, 47 | fileSize: fileSize, 48 | } 49 | 50 | if h.PageSize < 1 { 51 | // Set page size 52 | h.PageSize = DefaultPageSize 53 | pageSize, err := strconv.ParseInt(envPageSize, 10, 64) 54 | if err == nil { 55 | h.PageSize = int(pageSize) 56 | } 57 | } 58 | 59 | if peroid, err := strconv.ParseInt(envPeriodStr, 10, 64); err == nil { 60 | defaultPeriod = time.Duration(peroid) * time.Millisecond 61 | } 62 | 63 | return h 64 | } 65 | 66 | func (h *DiskCacheHandler) newFileCache(etag string) (err error) { 67 | h.rwMutex.Lock() 68 | defer h.rwMutex.Unlock() 69 | 70 | if h.fileHandler != nil { 71 | oldf := h.f 72 | h.f, err = h.fileHandler(fmt.Sprintf("%v.%v", h.fileName, etag)) 73 | if err == nil { 74 | h.cacheEtag = etag 75 | h.pages = *new(sync.Map) 76 | } 77 | if oldf != nil { 78 | oldf.Close() 79 | } 80 | } 81 | 82 | return err 83 | } 84 | 85 | func (h *DiskCacheHandler) Get(p []byte, off int64, fetcher VFSReadAt) (int, error) { 86 | // Cache filesize or size cache expire time > 1000ms 87 | if h.resize() { 88 | if _, sizeErr := h.Size(fetcher); sizeErr != nil { 89 | return 0, sizeErr 90 | } 91 | } 92 | 93 | t := prom.NewPromTrace(prom.RNameVFS, prom.TNameCacheGet) 94 | defer t.SysDurations() 95 | 96 | // Create new cache 97 | etag := fetcher.Etag() 98 | if h.EtagModify(etag) || h.f == nil { 99 | if err := h.newFileCache(etag); err != nil { 100 | return 0, err 101 | } 102 | } 103 | 104 | startPage, endPage := h.pagesForRange(off, len(p)) 105 | 106 | firstMissingPage := int64(-1) 107 | lastMissingPage := int64(-1) 108 | for i := int64(startPage); i <= endPage; i++ { 109 | if v, ok := h.pages.Load(i); ok && v.(bool) { 110 | continue 111 | } 112 | if firstMissingPage < 0 { 113 | firstMissingPage = i 114 | } 115 | if lastMissingPage < i { 116 | lastMissingPage = i 117 | } 118 | } 119 | 120 | lastPage := h.fileSize / int64(h.PageSize) 121 | if firstMissingPage >= 0 { 122 | t.Code = prom.CodeCacheMiss 123 | h.cacheMiss++ 124 | pageCount := (lastMissingPage + 1) - firstMissingPage 125 | size := pageCount * int64(h.PageSize) 126 | if lastMissingPage == lastPage { 127 | size = size - int64(h.PageSize) + (h.fileSize % int64(h.PageSize)) 128 | } 129 | offset := firstMissingPage * int64(h.PageSize) 130 | if h.fileSize >= 0 && h.fileSize < (size+offset) { 131 | size = h.fileSize - offset 132 | } 133 | buffer := make([]byte, size) 134 | n, readAtErr := fetcher.ReadAt(buffer, offset) 135 | if readAtErr != nil { 136 | return 0, readAtErr 137 | } 138 | 139 | // ReadAt has modify etag 140 | etag = fetcher.Etag() 141 | if h.EtagModify(etag) { 142 | h.cacheEtag = etag 143 | h.fileSize = DefaultNoCacheSize 144 | return 0, missCacheErr 145 | } 146 | 147 | buffer = buffer[:n] 148 | if wn, writeErr := h.f.WriteAt(buffer, offset); writeErr != nil && wn != n { 149 | return 0, writeErr 150 | } 151 | fullPagesRead := n / h.PageSize 152 | for i := int64(0); i < int64(fullPagesRead); i++ { 153 | h.pages.Store(firstMissingPage+i, true) 154 | } 155 | } else { 156 | t.Code = prom.CodeCacheHit 157 | h.cacheHit++ 158 | } 159 | 160 | return h.f.ReadAt(p, off) 161 | } 162 | 163 | func (h *DiskCacheHandler) Size(fetcher VFSReadAt) (int64, error) { 164 | t := prom.NewPromTrace(prom.RNameVFS, prom.TNameCacheSize) 165 | defer t.SysDurations() 166 | 167 | if h.resize() { 168 | t.Code = prom.CodeCacheMiss 169 | fileSize, sizeErr := fetcher.Size() 170 | if sizeErr != nil { 171 | return 0, sizeErr 172 | } 173 | h.fileSize = fileSize 174 | h.fileSizeUpdateTime = time.Now() 175 | } else { 176 | t.Code = prom.CodeCacheHit 177 | } 178 | 179 | return h.fileSize, nil 180 | } 181 | 182 | func (h *DiskCacheHandler) EtagModify(etag string) bool { 183 | return h.cacheEtag != etag 184 | } 185 | 186 | func (h *DiskCacheHandler) Set(name string) { 187 | h.fileName = name 188 | } 189 | 190 | func (h *DiskCacheHandler) pagesForRange(offset int64, size int) (startPage, endPage int64) { 191 | startPage = offset / int64(h.PageSize) 192 | endPage = (offset+int64(size))/int64(h.PageSize) + 1 193 | return startPage, endPage 194 | } 195 | 196 | func (h *DiskCacheHandler) resize() bool { 197 | return h.fileSize <= DefaultNoCacheSize || 198 | time.Since(h.fileSizeUpdateTime) > defaultPeriod 199 | } 200 | -------------------------------------------------------------------------------- /internal/vfsextend/diskcache_test.go: -------------------------------------------------------------------------------- 1 | package vfsextend 2 | 3 | import ( 4 | "bytes" 5 | "crypto/rand" 6 | "io" 7 | "os" 8 | "testing" 9 | ) 10 | 11 | func TestDiskCache(t *testing.T) { 12 | absTestDir, _ := os.Getwd() 13 | backingFile, err := os.CreateTemp(absTestDir, "diskcache-test") 14 | if err != nil { 15 | t.Fatal(err) 16 | } 17 | defer os.RemoveAll(backingFile.Name()) 18 | 19 | // make sure filesize isn't a pagesize multiple 20 | fileSize := int64(1<<17 + 37) 21 | _, err = io.Copy(backingFile, io.LimitReader(rand.Reader, int64(fileSize))) 22 | if err != nil { 23 | t.Fatal(err) 24 | } 25 | 26 | cache := NewDiskCache(func(string) (*os.File, error) { 27 | return backingFile, nil 28 | }, fileSize) 29 | cache.Set(backingFile.Name()) 30 | // 1<<17 / 1<<13 == 16 pages 31 | cache.PageSize = 1 << 13 32 | 33 | readAndCheck := func(p []byte, off int64) (int, error) { 34 | fromBacking := make([]byte, len(p)) 35 | cn, cacheErr := cache.Get(p, off, readerAt{ 36 | readAt: backingFile.ReadAt, 37 | readSize: func() (int64, error) { 38 | return fileSize, nil 39 | }, 40 | }) 41 | bn, backingErr := backingFile.ReadAt(fromBacking, off) 42 | if cacheErr != backingErr { 43 | t.Fatalf("Fetch error mismatch: cache_err=%s vs backing_err=%s", cacheErr, backingErr) 44 | } 45 | 46 | if cn != bn { 47 | t.Fatalf("fetch count mismatch cache_n=%d backing_n=%d", cn, bn) 48 | } 49 | 50 | if !bytes.Equal(p[:cn], fromBacking[:bn]) { 51 | t.Fatalf("fetch mismatch %+v vs %+v", p[:cn], fromBacking[:bn]) 52 | } 53 | return cn, cacheErr 54 | } 55 | 56 | checks := []struct { 57 | start int64 58 | size int 59 | cacheHit bool 60 | }{ 61 | {0, 100, false}, 62 | // read within a single page 63 | {1<<13 + 6, 100, false}, 64 | // read again within that page, see that we cache hit 65 | {1<<13 + 6, 100, true}, 66 | // read again within that page in a different part of the page, see that we cache hit 67 | {1<<13 + 106, 100, true}, 68 | // read again exactly on the page boundry, see that we hit 69 | {1 << 13, 1<<13 - 1, true}, 70 | // read across multiple pages 71 | {1<<13 + 5, 2<<13 + 5, false}, 72 | // read again see that we cache hit 73 | {1<<13 + 5, 2<<13 + 5, true}, 74 | // read up to last byte 75 | {fileSize - 7, 7, false}, 76 | } 77 | 78 | for i, check := range checks { 79 | p := make([]byte, check.size) 80 | beforeHit := cache.cacheHit 81 | beforeMiss := cache.cacheMiss 82 | _, err := readAndCheck(p, int64(check.start)) 83 | if err != nil { 84 | t.Fatalf("[%d] read err: %s", i, err) 85 | } 86 | if check.cacheHit { 87 | if cache.cacheHit != beforeHit+1 { 88 | t.Fatalf("[%d] expected cache hit but was not start:%d size:%d", i, check.start, check.size) 89 | } 90 | } else { 91 | if cache.cacheMiss != beforeMiss+1 { 92 | t.Fatalf("[%d] expected cache miss but was not start:%d size:%d", i, check.start, check.size) 93 | } 94 | } 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /internal/vfsextend/httprange.go: -------------------------------------------------------------------------------- 1 | package vfsextend 2 | 3 | import ( 4 | "github.com/linkxzhou/LessDB/internal/s3" 5 | 6 | "errors" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | "strconv" 11 | "strings" 12 | ) 13 | 14 | var invalidContentRangeErr = errors.New("invalid Content-Range response") 15 | var missCacheErr = errors.New("miss Cache") 16 | 17 | type Option interface { 18 | set(*RangeReader) 19 | } 20 | 21 | type roundTripperOption struct { 22 | r http.RoundTripper 23 | } 24 | 25 | func (o *roundTripperOption) set(rr *RangeReader) { 26 | rr.roundTripper = o.r 27 | } 28 | 29 | func WithRoundTripper(r http.RoundTripper) Option { 30 | return &roundTripperOption{ 31 | r: r, 32 | } 33 | } 34 | 35 | type cacheHandlerOption struct { 36 | h CacheHandler 37 | } 38 | 39 | func (o *cacheHandlerOption) set(rr *RangeReader) { 40 | rr.cacheHandler = o.h 41 | } 42 | 43 | func WithCacheHandler(c CacheHandler) Option { 44 | return &cacheHandlerOption{ 45 | h: c, 46 | } 47 | } 48 | 49 | type uriHandlerOption struct { 50 | h s3.URIHandler 51 | } 52 | 53 | func (o *uriHandlerOption) set(rr *RangeReader) { 54 | rr.uriHandler = o.h 55 | } 56 | 57 | func WithURIHandler(u s3.URIHandler) Option { 58 | return &uriHandlerOption{ 59 | h: u, 60 | } 61 | } 62 | 63 | type CacheHandler interface { 64 | Get(p []byte, off int64, fetcher VFSReadAt) (int, error) 65 | Size(fetcher VFSReadAt) (int64, error) 66 | Set(fileName string) 67 | } 68 | 69 | type RangeReader struct { 70 | name string 71 | roundTripper http.RoundTripper 72 | cacheHandler CacheHandler 73 | uriHandler s3.URIHandler 74 | lastEtag string // Etag 75 | 76 | fetcher readerAt 77 | } 78 | 79 | func New(name string, opts ...Option) *RangeReader { 80 | rr := RangeReader{ 81 | name: name, 82 | } 83 | 84 | rr.fetcher = readerAt{ 85 | readAt: rr.rawReadAt, 86 | readSize: rr.rawSize, 87 | readEtag: rr.rawEtag, 88 | } 89 | 90 | for _, opt := range opts { 91 | opt.set(&rr) 92 | } 93 | 94 | return &rr 95 | } 96 | 97 | func (rr *RangeReader) CacheHandler() CacheHandler { 98 | cacheHandler := rr.cacheHandler 99 | if cacheHandler == nil { 100 | cacheHandler = &nopCacheHandler{} 101 | } 102 | 103 | cacheHandler.Set(rr.name) 104 | return cacheHandler 105 | } 106 | 107 | func (rr *RangeReader) ReadAt(p []byte, off int64) (n int, err error) { 108 | if n, err = rr.CacheHandler().Get(p, off, rr.fetcher); err == missCacheErr { 109 | return rr.CacheHandler().Get(p, off, rr.fetcher) 110 | } 111 | return 112 | } 113 | 114 | func (rr *RangeReader) Size() (n int64, err error) { 115 | return rr.CacheHandler().Size(rr.fetcher) 116 | } 117 | 118 | func (rr *RangeReader) rawEtag() string { 119 | return rr.lastEtag 120 | } 121 | 122 | func (rr *RangeReader) rawReadAt(p []byte, off int64) (n int, err error) { 123 | url, err := rr.uriHandler.URI(rr.name) 124 | if err != nil { 125 | return 0, err 126 | } 127 | 128 | fetchSize := len(p) 129 | 130 | req, err := http.NewRequest("GET", url, nil) 131 | if err != nil { 132 | return 0, err 133 | } 134 | 135 | req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", off, off+int64(fetchSize-1))) 136 | 137 | resp, err := rr.client().Do(req) 138 | if err != nil { 139 | return 0, err 140 | } 141 | 142 | defer resp.Body.Close() 143 | 144 | n, err = io.ReadFull(resp.Body, p) 145 | if err == io.ErrUnexpectedEOF { 146 | return n, io.EOF 147 | } else if err != nil { 148 | return n, err 149 | } else { 150 | // pass 151 | } 152 | 153 | etag := strings.ReplaceAll(resp.Header.Get("Etag"), `"`, "") 154 | if etag != "" { 155 | rr.lastEtag = etag 156 | } 157 | 158 | return n, nil 159 | } 160 | 161 | func (rr *RangeReader) client() *http.Client { 162 | if rr.roundTripper == nil { 163 | return http.DefaultClient 164 | } 165 | 166 | return &http.Client{Transport: rr.roundTripper} 167 | } 168 | 169 | func (rr *RangeReader) rawSize() (n int64, err error) { 170 | url, err := rr.uriHandler.URI(rr.name) 171 | if err != nil { 172 | return 0, err 173 | } 174 | 175 | req, err := http.NewRequest("GET", url, nil) 176 | if err != nil { 177 | return 0, err 178 | } 179 | 180 | req.Header.Set("Range", "bytes=0-0") 181 | resp, err := rr.client().Do(req) 182 | if err != nil { 183 | return 0, err 184 | } 185 | 186 | io.Copy(io.Discard, resp.Body) 187 | defer resp.Body.Close() 188 | 189 | rangeHeader := resp.Header.Get("Content-Range") 190 | rangeFields := strings.Fields(rangeHeader) 191 | if len(rangeFields) != 2 { 192 | return 0, invalidContentRangeErr 193 | } 194 | 195 | if strings.ToLower(rangeFields[0]) != "bytes" { 196 | return 0, invalidContentRangeErr 197 | } 198 | 199 | amts := strings.Split(rangeFields[1], "/") 200 | 201 | if len(amts) != 2 { 202 | return 0, invalidContentRangeErr 203 | } 204 | 205 | if amts[1] == "*" { 206 | return 0, invalidContentRangeErr 207 | } 208 | 209 | n, err = strconv.ParseInt(amts[1], 10, 64) 210 | if err != nil { 211 | return 0, invalidContentRangeErr 212 | } 213 | 214 | etag := strings.ReplaceAll(resp.Header.Get("Etag"), `"`, "") 215 | if etag != "" { 216 | rr.lastEtag = etag 217 | } 218 | 219 | return n, nil 220 | } 221 | 222 | type VFSReadAt interface { 223 | io.ReaderAt 224 | Size() (int64, error) 225 | Etag() string 226 | } 227 | 228 | type readerAt struct { 229 | readAt func([]byte, int64) (int, error) 230 | readSize func() (int64, error) 231 | readEtag func() string 232 | } 233 | 234 | func (r readerAt) ReadAt(p []byte, off int64) (n int, err error) { 235 | return r.readAt(p, off) 236 | } 237 | 238 | func (r readerAt) Size() (n int64, err error) { 239 | return r.readSize() 240 | } 241 | 242 | func (r readerAt) Etag() string { 243 | if r.readEtag == nil { 244 | return "default" 245 | } 246 | 247 | return r.readEtag() 248 | } 249 | -------------------------------------------------------------------------------- /internal/vfsextend/httprange_test.go: -------------------------------------------------------------------------------- 1 | package vfsextend 2 | 3 | import ( 4 | "github.com/linkxzhou/LessDB/internal/s3" 5 | 6 | "bytes" 7 | "crypto/rand" 8 | "io" 9 | "net/http" 10 | "net/http/httptest" 11 | "os" 12 | "path/filepath" 13 | "testing" 14 | ) 15 | 16 | func TestHTTPRangeRead(t *testing.T) { 17 | absTestDir, _ := os.Getwd() 18 | dir, err := os.MkdirTemp(absTestDir, "httprange-test") 19 | if err != nil { 20 | t.Fatal(err) 21 | } 22 | defer os.RemoveAll(dir) 23 | 24 | f, err := os.Create(filepath.Join(dir, "file")) 25 | if err != nil { 26 | t.Fatal(err) 27 | } 28 | 29 | fileSize := 1 << 17 30 | _, err = io.Copy(f, io.LimitReader(rand.Reader, int64(fileSize))) 31 | if err != nil { 32 | t.Fatal(err) 33 | } 34 | 35 | server := httptest.NewServer(http.FileServer(http.Dir(dir))) 36 | defer server.Close() 37 | 38 | rr := New("file", WithURIHandler(s3.HttpURIHandler{PrefixURI: server.URL})) 39 | 40 | checks := []struct { 41 | off int 42 | size int 43 | }{ 44 | {0, 0}, 45 | {0, 1 << 17}, 46 | {0, 1<<17 + 1}, 47 | {389, 687}, 48 | {389, 687}, 49 | {217, 548}, 50 | } 51 | 52 | for i, check := range checks { 53 | rrb := make([]byte, check.size) 54 | fb := make([]byte, check.size) 55 | 56 | rrn, rrErr := rr.ReadAt(rrb, int64(check.off)) 57 | fn, fErr := f.ReadAt(fb, int64(check.off)) 58 | 59 | if rrErr != fErr { 60 | t.Fatalf("[%d] read err mismatch: rr_err=%s f_err=%s", i, rrErr, fErr) 61 | } 62 | 63 | if rrn != fn { 64 | t.Fatalf("[%d] read len mismatch: rrn=%d fn=%d", i, rrn, fn) 65 | } 66 | 67 | if !bytes.Equal(rrb[:rrn], fb[:fn]) { 68 | t.Fatalf("[%d] read mismatch %+v vs %+v", i, rrb[:rrn], fb[:fn]) 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /internal/vfsextend/nopcache.go: -------------------------------------------------------------------------------- 1 | package vfsextend 2 | 3 | type nopCacheHandler struct { 4 | } 5 | 6 | func (h *nopCacheHandler) Get(p []byte, off int64, fetcher VFSReadAt) (int, error) { 7 | return fetcher.ReadAt(p, off) 8 | } 9 | 10 | func (h *nopCacheHandler) Size(fetcher VFSReadAt) (int64, error) { 11 | return fetcher.Size() 12 | } 13 | 14 | func (h *nopCacheHandler) Set(name string) { 15 | } 16 | -------------------------------------------------------------------------------- /internal/vfsextend/readme.md: -------------------------------------------------------------------------------- 1 | # errors 2 | 3 | ## 1、error: pointer is missing a nullability type specifier (_Nonnull, _Nullable, or _Null_unspecified) [-Werror,-Wnullability-completeness] 4 | 5 | answer: https://github.com/golang/go/issues/35247 6 | 7 | try: 8 | ``` 9 | export CGO_CPPFLAGS="-Wno-error -Wno-nullability-completeness -Wno-expansion-to-defined" 10 | ``` 11 | 12 | ## 2、_cgo_export.c:3:10: fatal error: 'stdlib.h' file not found 13 | 14 | upgrade golang version, this is error on <= go1.19 15 | 16 | ## 3、sqlite3vfs_test.go:33: SQL logic error 17 | 18 | please use sqlite absolute path -------------------------------------------------------------------------------- /internal/vfsextend/sqlite3vfs_http.go: -------------------------------------------------------------------------------- 1 | package vfsextend 2 | 3 | import ( 4 | "github.com/linkxzhou/LessDB/internal/s3" 5 | "github.com/linkxzhou/LessDB/internal/sqlite3vfs" 6 | 7 | "net/http" 8 | "strings" 9 | "sync" 10 | ) 11 | 12 | var vfscache sync.Map 13 | 14 | type HttpVFS struct { 15 | CacheHandler CacheHandler 16 | URIHandler s3.URIHandler 17 | RoundTripper http.RoundTripper 18 | } 19 | 20 | func (vfs *HttpVFS) Open(name string, flags sqlite3vfs.OpenFlag) (sqlite3vfs.File, sqlite3vfs.OpenFlag, error) { 21 | var opts []Option 22 | if vfs.CacheHandler != nil { 23 | opts = append(opts, WithCacheHandler(vfs.CacheHandler)) 24 | } 25 | 26 | if vfs.RoundTripper != nil { 27 | opts = append(opts, WithRoundTripper(vfs.RoundTripper)) 28 | } 29 | 30 | if vfs.URIHandler != nil { 31 | opts = append(opts, WithURIHandler(vfs.URIHandler)) 32 | } 33 | 34 | if v, ok := vfscache.Load(name); ok { 35 | return v.(*httpFile), flags, nil 36 | } 37 | 38 | hf := &httpFile{rr: New(name, opts...)} 39 | vfscache.Store(name, hf) 40 | 41 | return hf, flags, nil 42 | } 43 | 44 | func (vfs *HttpVFS) Delete(name string, dirSync bool) error { 45 | return sqlite3vfs.PermissionsErrorDelete 46 | } 47 | 48 | func (vfs *HttpVFS) Access(name string, flag sqlite3vfs.AccessFlag) (bool, error) { 49 | // TODO: access has error 50 | if strings.HasSuffix(name, "-wal") || strings.HasSuffix(name, "-journal") { 51 | return false, nil 52 | } 53 | 54 | return true, nil 55 | } 56 | 57 | func (vfs *HttpVFS) FullPathname(name string) string { 58 | return name 59 | } 60 | 61 | type httpFile struct { 62 | rr *RangeReader 63 | } 64 | 65 | func (hf *httpFile) Close() error { 66 | return nil // TODO: close file 67 | } 68 | 69 | func (hf *httpFile) ReadAt(p []byte, off int64) (int, error) { 70 | return hf.rr.ReadAt(p, off) 71 | } 72 | 73 | func (hf *httpFile) WriteAt(b []byte, off int64) (n int, err error) { 74 | return 0, sqlite3vfs.PermissionsErrorWrite 75 | } 76 | 77 | func (hf *httpFile) Truncate(size int64) error { 78 | return sqlite3vfs.PermissionsErrorTruncate 79 | } 80 | 81 | func (hf *httpFile) Sync(flag sqlite3vfs.SyncType) error { 82 | return nil // TODO: add sync 83 | } 84 | 85 | func (hf *httpFile) FileSize() (int64, error) { 86 | return hf.rr.Size() 87 | } 88 | 89 | func (hf *httpFile) Lock(elock sqlite3vfs.LockType) error { 90 | return nil 91 | } 92 | 93 | func (hf *httpFile) Unlock(elock sqlite3vfs.LockType) error { 94 | return nil 95 | } 96 | 97 | func (hf *httpFile) CheckReservedLock() (bool, error) { 98 | return false, nil 99 | } 100 | 101 | func (hf *httpFile) SectorSize() int64 { 102 | return 0 103 | } 104 | 105 | func (hf *httpFile) DeviceCharacteristics() sqlite3vfs.DeviceCharacteristic { 106 | return sqlite3vfs.IocapImmutable 107 | } 108 | -------------------------------------------------------------------------------- /internal/vfsextend/sqlite3vfs_http_test.go: -------------------------------------------------------------------------------- 1 | package vfsextend 2 | 3 | import ( 4 | "github.com/google/go-cmp/cmp" 5 | "github.com/linkxzhou/LessDB/internal/s3" 6 | "github.com/linkxzhou/LessDB/internal/sqlite3vfs" 7 | _ "github.com/mattn/go-sqlite3" 8 | 9 | "database/sql" 10 | "net/http" 11 | "net/http/httptest" 12 | "os" 13 | "path/filepath" 14 | "strings" 15 | "testing" 16 | ) 17 | 18 | type FooRow struct { 19 | ID string 20 | Title string 21 | } 22 | 23 | func TestSqlite3vfsHTTP(t *testing.T) { 24 | absTestDir, _ := os.Getwd() 25 | dir, err := os.MkdirTemp(absTestDir, "sqlite3vfs_httpdir_") 26 | if err != nil { 27 | t.Fatal(err) 28 | } 29 | defer os.RemoveAll(dir) 30 | 31 | db, err := sql.Open("sqlite3", filepath.Join(dir, "test.db")) 32 | if err != nil { 33 | t.Fatal(err) 34 | } 35 | 36 | _, err = db.Exec(` 37 | CREATE TABLE IF NOT EXISTS foo ( 38 | id text NOT NULL PRIMARY KEY, 39 | title text 40 | )`) 41 | if err != nil { 42 | t.Fatal(err) 43 | } 44 | 45 | rows := []FooRow{ 46 | { 47 | ID: "415", 48 | Title: "romantic-swell", 49 | }, 50 | { 51 | ID: "610", 52 | Title: "ironically-gnarl", 53 | }, 54 | { 55 | ID: "768", 56 | Title: "biophysicist-straddled", 57 | }, 58 | } 59 | 60 | for _, row := range rows { 61 | _, err = db.Exec(`INSERT INTO foo (id, title) values (?, ?)`, row.ID, row.Title) 62 | if err != nil { 63 | t.Fatal(err) 64 | } 65 | } 66 | 67 | err = db.Close() 68 | if err != nil { 69 | t.Fatal(err) 70 | } 71 | 72 | server := httptest.NewServer(http.FileServer(http.Dir(dir))) 73 | 74 | cacheFile, err := os.Create(filepath.Join(dir, "cache")) 75 | if err != nil { 76 | t.Fatal(err) 77 | } 78 | 79 | vfs := HttpVFS{ 80 | CacheHandler: NewDiskCache(func(name string) (*os.File, error) { 81 | return cacheFile, nil 82 | }, -1), 83 | URIHandler: s3.HttpURIHandler{PrefixURI: server.URL}, 84 | } 85 | 86 | err = sqlite3vfs.RegisterVFS("httpvfs", &vfs) 87 | if err != nil { 88 | t.Fatal(err) 89 | } 90 | 91 | db, err = sql.Open("sqlite3", "test.db?vfs=httpvfs") 92 | if err != nil { 93 | t.Fatal(err) 94 | } 95 | 96 | rowIter, err := db.Query(`SELECT id, title from foo order by id`) 97 | if err != nil { 98 | t.Fatal(err) 99 | } 100 | 101 | var gotRows []FooRow 102 | 103 | for rowIter.Next() { 104 | var row FooRow 105 | err = rowIter.Scan(&row.ID, &row.Title) 106 | if err != nil { 107 | t.Fatal(err) 108 | } 109 | gotRows = append(gotRows, row) 110 | } 111 | 112 | err = rowIter.Close() 113 | if err != nil { 114 | t.Fatal(err) 115 | } 116 | 117 | if !cmp.Equal(rows, gotRows) { 118 | t.Fatal(cmp.Diff(rows, gotRows)) 119 | } 120 | 121 | err = db.Close() 122 | if err != nil { 123 | t.Fatal(err) 124 | } 125 | } 126 | 127 | func TestSqlite3vfsDelete(t *testing.T) { 128 | absTestDir, _ := os.Getwd() 129 | dir, err := os.MkdirTemp(absTestDir, "sqlite3vfs_httpdir_") 130 | if err != nil { 131 | t.Fatal(err) 132 | } 133 | defer os.RemoveAll(dir) 134 | 135 | db, err := sql.Open("sqlite3", filepath.Join(dir, "test.db")) 136 | if err != nil { 137 | t.Fatal(err) 138 | } 139 | 140 | _, err = db.Exec(` 141 | CREATE TABLE IF NOT EXISTS foo ( 142 | id text NOT NULL PRIMARY KEY, 143 | title text 144 | )`) 145 | if err != nil { 146 | t.Fatal(err) 147 | } 148 | 149 | rows := []FooRow{ 150 | { 151 | ID: "415", 152 | Title: "romantic-swell", 153 | }, 154 | { 155 | ID: "610", 156 | Title: "ironically-gnarl", 157 | }, 158 | { 159 | ID: "768", 160 | Title: "biophysicist-straddled", 161 | }, 162 | } 163 | 164 | for _, row := range rows { 165 | _, err = db.Exec(`INSERT INTO foo (id, title) values (?, ?)`, row.ID, row.Title) 166 | if err != nil { 167 | t.Fatal(err) 168 | } 169 | } 170 | 171 | err = db.Close() 172 | if err != nil { 173 | t.Fatal(err) 174 | } 175 | 176 | server := httptest.NewServer(http.FileServer(http.Dir(dir))) 177 | vfs := HttpVFS{ 178 | URIHandler: s3.HttpURIHandler{PrefixURI: server.URL}, 179 | } 180 | 181 | err = sqlite3vfs.RegisterVFS("httpvfs", &vfs) 182 | if err != nil { 183 | t.Fatal(err) 184 | } 185 | 186 | db, err = sql.Open("sqlite3", "test.db?vfs=httpvfs") 187 | if err != nil { 188 | t.Fatal(err) 189 | } 190 | 191 | _, err = db.Exec(`DELETE from foo where id = 415`) 192 | if err != nil && !strings.Contains(err.Error(), "attempt to write a readonly database") { 193 | t.Fatal(err) 194 | } 195 | 196 | err = db.Close() 197 | if err != nil { 198 | t.Fatal(err) 199 | } 200 | } 201 | -------------------------------------------------------------------------------- /internal/vfsextend/sqlite3vfs_tmpvfs.go: -------------------------------------------------------------------------------- 1 | package vfsextend 2 | 3 | import ( 4 | "github.com/linkxzhou/LessDB/internal/sqlite3vfs" 5 | 6 | "errors" 7 | "io" 8 | "os" 9 | "path/filepath" 10 | "strings" 11 | "sync/atomic" 12 | ) 13 | 14 | type TmpVFS struct { 15 | tmpdir string 16 | } 17 | 18 | func newTmpVFS(dir string) *TmpVFS { 19 | dir, err := os.MkdirTemp(dir, "sqlite3vfs_tmpdir_") 20 | if err != nil { 21 | panic(err) 22 | } 23 | 24 | return &TmpVFS{ 25 | tmpdir: dir, 26 | } 27 | } 28 | 29 | func (vfs *TmpVFS) Open(name string, flags sqlite3vfs.OpenFlag) (sqlite3vfs.File, sqlite3vfs.OpenFlag, error) { 30 | var ( 31 | f *os.File 32 | err error 33 | ) 34 | 35 | if name == "" { 36 | f, err = os.CreateTemp(vfs.tmpdir, "") 37 | if err != nil { 38 | return nil, 0, sqlite3vfs.CantOpenError 39 | } 40 | } else { 41 | fname := filepath.Join(vfs.tmpdir, name) 42 | if !strings.HasPrefix(fname, vfs.tmpdir) { 43 | return nil, 0, sqlite3vfs.PermError 44 | } 45 | var fileFlags int 46 | if flags&sqlite3vfs.OpenExclusive != 0 { 47 | fileFlags |= os.O_EXCL 48 | } 49 | if flags&sqlite3vfs.OpenCreate != 0 { 50 | fileFlags |= os.O_CREATE 51 | } 52 | if flags&sqlite3vfs.OpenReadOnly != 0 { 53 | fileFlags |= os.O_RDONLY 54 | } 55 | if flags&sqlite3vfs.OpenReadWrite != 0 { 56 | fileFlags |= os.O_RDWR 57 | } 58 | f, err = os.OpenFile(fname, fileFlags, 0600) 59 | if err != nil { 60 | return nil, 0, sqlite3vfs.CantOpenError 61 | } 62 | } 63 | 64 | tf := &TmpFile{f: f} 65 | return tf, flags, nil 66 | } 67 | 68 | func (vfs *TmpVFS) Delete(name string, dirSync bool) error { 69 | fname := filepath.Join(vfs.tmpdir, name) 70 | if !strings.HasPrefix(fname, vfs.tmpdir) { 71 | return errors.New("illegal path") 72 | } 73 | return os.Remove(fname) 74 | } 75 | 76 | func (vfs *TmpVFS) Access(name string, flag sqlite3vfs.AccessFlag) (bool, error) { 77 | fname := filepath.Join(vfs.tmpdir, name) 78 | if !strings.HasPrefix(fname, vfs.tmpdir) { 79 | return false, errors.New("illegal path") 80 | } 81 | 82 | exists := true 83 | _, err := os.Stat(fname) 84 | if err != nil && os.IsNotExist(err) { 85 | exists = false 86 | } else if err != nil { 87 | return false, err 88 | } 89 | 90 | if flag == sqlite3vfs.AccessExists { 91 | return exists, nil 92 | } 93 | 94 | return true, nil 95 | } 96 | 97 | func (vfs *TmpVFS) FullPathname(name string) string { 98 | fname := filepath.Join(vfs.tmpdir, name) 99 | if !strings.HasPrefix(fname, vfs.tmpdir) { 100 | return "" 101 | } 102 | 103 | return strings.TrimPrefix(fname, vfs.tmpdir) 104 | } 105 | 106 | type TmpFile struct { 107 | lockCount int64 108 | f *os.File 109 | } 110 | 111 | func (tf *TmpFile) Close() error { 112 | return tf.f.Close() 113 | } 114 | 115 | func (tf *TmpFile) ReadAt(p []byte, off int64) (n int, err error) { 116 | return tf.f.ReadAt(p, off) 117 | } 118 | 119 | func (tf *TmpFile) WriteAt(b []byte, off int64) (n int, err error) { 120 | return tf.f.WriteAt(b, off) 121 | } 122 | 123 | func (tf *TmpFile) Truncate(size int64) error { 124 | return tf.f.Truncate(size) 125 | } 126 | 127 | func (tf *TmpFile) Sync(flag sqlite3vfs.SyncType) error { 128 | return tf.f.Sync() 129 | } 130 | 131 | func (tf *TmpFile) FileSize() (int64, error) { 132 | cur, _ := tf.f.Seek(0, io.SeekCurrent) 133 | end, err := tf.f.Seek(0, io.SeekEnd) 134 | if err != nil { 135 | return 0, err 136 | } 137 | 138 | tf.f.Seek(cur, io.SeekStart) 139 | return end, nil 140 | } 141 | 142 | func (tf *TmpFile) Lock(elock sqlite3vfs.LockType) error { 143 | if elock == sqlite3vfs.LockNone { 144 | return nil 145 | } 146 | atomic.AddInt64(&tf.lockCount, 1) 147 | return nil 148 | } 149 | 150 | func (tf *TmpFile) Unlock(elock sqlite3vfs.LockType) error { 151 | if elock == sqlite3vfs.LockNone { 152 | return nil 153 | } 154 | atomic.AddInt64(&tf.lockCount, -1) 155 | return nil 156 | } 157 | 158 | func (tf *TmpFile) CheckReservedLock() (bool, error) { 159 | count := atomic.LoadInt64(&tf.lockCount) 160 | return count > 0, nil 161 | } 162 | 163 | func (tf *TmpFile) SectorSize() int64 { 164 | return 0 165 | } 166 | 167 | func (tf *TmpFile) DeviceCharacteristics() sqlite3vfs.DeviceCharacteristic { 168 | return 0 169 | } 170 | -------------------------------------------------------------------------------- /internal/vfsextend/sqlite3vfs_tmpvfs_test.go: -------------------------------------------------------------------------------- 1 | package vfsextend 2 | 3 | import ( 4 | "github.com/google/go-cmp/cmp" 5 | "github.com/linkxzhou/LessDB/internal/sqlite3vfs" 6 | _ "github.com/mattn/go-sqlite3" 7 | 8 | "database/sql" 9 | "fmt" 10 | "os" 11 | "testing" 12 | ) 13 | 14 | func TestSqlite3vfsTmp(t *testing.T) { 15 | absTestDir, _ := os.Getwd() 16 | vfs := newTmpVFS(absTestDir) 17 | 18 | vfsName := "tmpfs" 19 | err := sqlite3vfs.RegisterVFS(vfsName, vfs) 20 | if err != nil { 21 | t.Fatal(err) 22 | } 23 | 24 | db, err := sql.Open("sqlite3", fmt.Sprintf("foo.db?vfs=%s", vfsName)) 25 | if err != nil { 26 | t.Fatal(err) 27 | } 28 | 29 | _, err = db.Exec(`CREATE TABLE IF NOT EXISTS foo ( 30 | id text NOT NULL PRIMARY KEY, 31 | title text 32 | )`) 33 | if err != nil { 34 | t.Fatal(err) 35 | } 36 | 37 | rows := []FooRow{ 38 | { 39 | ID: "415", 40 | Title: "romantic-swell", 41 | }, 42 | { 43 | ID: "610", 44 | Title: "ironically-gnarl", 45 | }, 46 | { 47 | ID: "768", 48 | Title: "biophysicist-straddled", 49 | }, 50 | } 51 | 52 | for _, row := range rows { 53 | _, err = db.Exec(`INSERT INTO foo (id, title) values (?, ?)`, row.ID, row.Title) 54 | if err != nil { 55 | t.Fatal(err) 56 | } 57 | } 58 | 59 | rowIter, err := db.Query(`SELECT id, title from foo order by id`) 60 | if err != nil { 61 | t.Fatal(err) 62 | } 63 | 64 | var gotRows []FooRow 65 | 66 | for rowIter.Next() { 67 | var row FooRow 68 | err = rowIter.Scan(&row.ID, &row.Title) 69 | if err != nil { 70 | t.Fatal(err) 71 | } 72 | gotRows = append(gotRows, row) 73 | } 74 | err = rowIter.Close() 75 | if err != nil { 76 | t.Fatal(err) 77 | } 78 | 79 | if !cmp.Equal(rows, gotRows) { 80 | t.Fatal(cmp.Diff(rows, gotRows)) 81 | } 82 | 83 | err = db.Close() 84 | if err != nil { 85 | t.Fatal(err) 86 | } 87 | 88 | // reopen db 89 | db, err = sql.Open("sqlite3", fmt.Sprintf("foo.db?vfs=%s", vfsName)) 90 | if err != nil { 91 | t.Fatal(err) 92 | } 93 | 94 | rowIter, err = db.Query(`SELECT id, title from foo order by id`) 95 | if err != nil { 96 | t.Fatal(err) 97 | } 98 | 99 | gotRows = gotRows[:0] 100 | 101 | for rowIter.Next() { 102 | var row FooRow 103 | err = rowIter.Scan(&row.ID, &row.Title) 104 | if err != nil { 105 | t.Fatal(err) 106 | } 107 | gotRows = append(gotRows, row) 108 | } 109 | 110 | err = rowIter.Close() 111 | if err != nil { 112 | t.Fatal(err) 113 | } 114 | 115 | if !cmp.Equal(rows, gotRows) { 116 | t.Fatal(cmp.Diff(rows, gotRows)) 117 | } 118 | 119 | err = db.Close() 120 | if err != nil { 121 | t.Fatal(err) 122 | } 123 | } 124 | 125 | func TestSqlite3vfsTmpDelete(t *testing.T) { 126 | absTestDir, _ := os.Getwd() 127 | vfs := newTmpVFS(absTestDir) 128 | 129 | vfsName := "tmpfs" 130 | err := sqlite3vfs.RegisterVFS(vfsName, vfs) 131 | if err != nil { 132 | t.Fatal(err) 133 | } 134 | 135 | db, err := sql.Open("sqlite3", fmt.Sprintf("foo.db?vfs=%s", vfsName)) 136 | if err != nil { 137 | t.Fatal(err) 138 | } 139 | 140 | _, err = db.Exec(`CREATE TABLE IF NOT EXISTS foo ( 141 | id text NOT NULL PRIMARY KEY, 142 | title text 143 | )`) 144 | if err != nil { 145 | t.Fatal(err) 146 | } 147 | 148 | rows := []FooRow{ 149 | { 150 | ID: "415", 151 | Title: "romantic-swell", 152 | }, 153 | { 154 | ID: "610", 155 | Title: "ironically-gnarl", 156 | }, 157 | { 158 | ID: "768", 159 | Title: "biophysicist-straddled", 160 | }, 161 | } 162 | 163 | for _, row := range rows { 164 | _, err = db.Exec(`INSERT INTO foo (id, title) values (?, ?)`, row.ID, row.Title) 165 | if err != nil { 166 | t.Fatal(err) 167 | } 168 | } 169 | 170 | rowIter, err := db.Query(`select id, title from foo order by id`) 171 | if err != nil { 172 | t.Fatal(err) 173 | } 174 | 175 | var gotRows []FooRow 176 | 177 | for rowIter.Next() { 178 | var row FooRow 179 | err = rowIter.Scan(&row.ID, &row.Title) 180 | if err != nil { 181 | t.Fatal(err) 182 | } 183 | gotRows = append(gotRows, row) 184 | } 185 | err = rowIter.Close() 186 | if err != nil { 187 | t.Fatal(err) 188 | } 189 | 190 | if !cmp.Equal(rows, gotRows) { 191 | t.Fatal(cmp.Diff(rows, gotRows)) 192 | } 193 | 194 | err = db.Close() 195 | if err != nil { 196 | t.Fatal(err) 197 | } 198 | 199 | // reopen db 200 | db, err = sql.Open("sqlite3", fmt.Sprintf("foo.db?vfs=%s", vfsName)) 201 | if err != nil { 202 | t.Fatal(err) 203 | } 204 | 205 | _, err = db.Exec(`DELETE from foo where id = 415`) 206 | if err != nil { 207 | t.Fatal(err) 208 | } 209 | 210 | err = db.Close() 211 | if err != nil { 212 | t.Fatal(err) 213 | } 214 | } 215 | -------------------------------------------------------------------------------- /tests/createdb_test.go: -------------------------------------------------------------------------------- 1 | package vfsextend 2 | 3 | import ( 4 | "fmt" 5 | 6 | _ "github.com/mattn/go-sqlite3" 7 | 8 | "database/sql" 9 | "testing" 10 | ) 11 | 12 | type FooRow struct { 13 | ID string 14 | Title string 15 | } 16 | 17 | func TestSqlite3vfsCreateDB100Tables(t *testing.T) { 18 | db, err := sql.Open("sqlite3", "test.db") 19 | if err != nil { 20 | t.Fatal(err) 21 | } 22 | 23 | for i := 0; i < 100; i++ { 24 | tableName := fmt.Sprintf("foo%v", i) 25 | _, err = db.Exec(` 26 | CREATE TABLE IF NOT EXISTS ` + tableName + ` ( 27 | id text NOT NULL PRIMARY KEY, 28 | title text 29 | )`) 30 | if err != nil { 31 | t.Fatal(err) 32 | } 33 | 34 | rows := []FooRow{ 35 | { 36 | ID: "415", 37 | Title: "romantic-swell", 38 | }, 39 | { 40 | ID: "610", 41 | Title: "ironically-gnarl", 42 | }, 43 | { 44 | ID: "768", 45 | Title: "biophysicist-straddled", 46 | }, 47 | } 48 | 49 | for _, row := range rows { 50 | _, err = db.Exec(`INSERT INTO `+tableName+` (id, title) values (?, ?)`, row.ID, row.Title) 51 | if err != nil { 52 | t.Fatal(err) 53 | } 54 | } 55 | } 56 | 57 | err = db.Close() 58 | if err != nil { 59 | t.Fatal(err) 60 | } 61 | } 62 | 63 | func TestSqlite3vfsCreateDBRow1W(t *testing.T) { 64 | db, err := sql.Open("sqlite3", "test100w.db") 65 | if err != nil { 66 | t.Fatal(err) 67 | } 68 | 69 | tableName := "rows1W_foo" 70 | _, err = db.Exec(` 71 | CREATE TABLE IF NOT EXISTS ` + tableName + ` ( 72 | id text NOT NULL PRIMARY KEY, 73 | title text 74 | )`) 75 | 76 | for i := 0; i < 10000; i++ { 77 | if err != nil { 78 | t.Fatal(err) 79 | } 80 | 81 | rows := []FooRow{ 82 | { 83 | ID: fmt.Sprintf("%v", i), 84 | Title: fmt.Sprintf("romantic-swell-%v", i), 85 | }, 86 | } 87 | 88 | for _, row := range rows { 89 | _, err = db.Exec(`INSERT INTO `+tableName+` (id, title) values (?, ?)`, row.ID, row.Title) 90 | if err != nil { 91 | t.Fatal(err) 92 | } 93 | } 94 | } 95 | 96 | err = db.Close() 97 | if err != nil { 98 | t.Fatal(err) 99 | } 100 | } 101 | 102 | -------------------------------------------------------------------------------- /tests/http_create.http: -------------------------------------------------------------------------------- 1 | ### create tables 2 | POST http://localhost:9000/api/v1/createdb 3 | Content-Type: application/json 4 | 5 | { 6 | "list": [ 7 | { 8 | "cmd": "CREATE TABLE IF NOT EXISTS foo1 (id text NOT NULL PRIMARY KEY,title text)", 9 | "args": [] 10 | }, 11 | { 12 | "cmd": "CREATE TABLE IF NOT EXISTS foo2 (id text NOT NULL PRIMARY KEY,title text)", 13 | "args": [] 14 | }, 15 | { 16 | "cmd": "INSERT INTO foo1 (id, title) values (?, ?)", 17 | "args": [415, "romantic-swell"] 18 | }, 19 | { 20 | "cmd": "INSERT INTO foo2 (id, title) values (?, ?)", 21 | "args": [415, "romantic-swell"] 22 | } 23 | ] 24 | } 25 | 26 | ### create tables 27 | POST https://service-69bnjtn2-1251014631.gz.tencentapigw.com.cn/api/v1/createdb 28 | Content-Type: application/json 29 | 30 | { 31 | "list": [ 32 | { 33 | "cmd": "CREATE TABLE IF NOT EXISTS foo1 (id text NOT NULL PRIMARY KEY,title text)", 34 | "args": [] 35 | }, 36 | { 37 | "cmd": "CREATE TABLE IF NOT EXISTS foo2 (id text NOT NULL PRIMARY KEY,title text)", 38 | "args": [] 39 | }, 40 | { 41 | "cmd": "INSERT INTO foo1 (id, title) values (?, ?)", 42 | "args": [415, "romantic-swell"] 43 | }, 44 | { 45 | "cmd": "INSERT INTO foo2 (id, title) values (?, ?)", 46 | "args": [415, "romantic-swell"] 47 | } 48 | ] 49 | } 50 | 51 | ### 52 | 53 | { 54 | "code": 0, 55 | "message": "OK", 56 | "data": { 57 | "readkey": "367fb8f59f92e9b1cb0820f68abbf3d51cd6be535a8d9110cf08a431f5b2f4f0", 58 | "writekey": "367fb8f59f92e9b1cb0820f6eee0d6c519ec9a6b5d8dd71e8b768f2cda8ae085f813f595a2ee01a70d064102ef94b010c6c8de28c76ebb569ee8bfb8b96ce91b0fab4f3e96d03a87961a0410" 59 | } 60 | } 61 | 62 | ### create databases 63 | curl -i -X POST -F "file=@test100w.db" 'https://service-69bnjtn2-1251014631.gz.tencentapigw.com.cn/api/v1/uploaddb' 64 | test100wdb: {"code":0,"message":"OK","data":{"readkey":"367fb8f59f92e9b1cb0820f69f85c4f23cd28f736fbe8729a82d8b66f491d5f0","writekey":"367fb8f59f92e9b1cb0820f6b2b9eed139f0e37047d5a712b22f8337ee89a9decf1dd59289d442c2201e5427f495d57597c9c717fa4d9a75a3ffa695f16ad95b178f163fe3c513aaf027061d"}} 65 | curl -i -X POST -F "file=@demo.db" 'http://localhost:9000/api/v1/uploaddb' 66 | demodb: {"code":0,"message":"OK","data":{"readkey":"367fb8f59f92e9b1cb0820f692e4c0d426a886043dd6ae1eac2fbc6cacf6cada","writekey":"367fb8f59f92e9b1cb0820f6acb8dff322ab8e7e7fd3801cb476fe0caca5c2c3ec3efb9eddef5fa42d35781c88d4b36be2d7a836d8419b77bff5aea0a967ea1d0391294494da28aa81130a22"}} -------------------------------------------------------------------------------- /tests/http_exec.http: -------------------------------------------------------------------------------- 1 | ### execute sql 2 | POST http://localhost:9000/api/v1/367fb8f59f92e9b1cb0820f692e4c0d426a886043dd6ae1eac2fbc6cacf6cada/execute?writekey= 3 | Content-Type: application/json 4 | 5 | { 6 | "list": [ 7 | { 8 | "cmd": "CREATE TABLE IF NOT EXISTS foo1 (id text NOT NULL PRIMARY KEY,title text)", 9 | "args": [] 10 | }, 11 | { 12 | "cmd": "CREATE TABLE IF NOT EXISTS foo2 (id text NOT NULL PRIMARY KEY,title text)", 13 | "args": [] 14 | }, 15 | { 16 | "cmd": "INSERT INTO foo1 (id, title) values (?, ?)", 17 | "args": [1, "unique-title-1"] 18 | }, 19 | { 20 | "cmd": "INSERT INTO foo1 (id, title) values (?, ?)", 21 | "args": [2, "unique-title-2"] 22 | }, 23 | { 24 | "cmd": "INSERT INTO foo1 (id, title) values (?, ?)", 25 | "args": [3, "unique-title-3"] 26 | }, 27 | { 28 | "cmd": "INSERT INTO foo1 (id, title) values (?, ?)", 29 | "args": [4, "unique-title-4"] 30 | }, 31 | { 32 | "cmd": "INSERT INTO foo1 (id, title) values (?, ?)", 33 | "args": [5, "unique-title-5"] 34 | }, 35 | { 36 | "cmd": "INSERT INTO foo2 (id, title) values (?, ?)", 37 | "args": [6, "unique-title-6"] 38 | }, 39 | { 40 | "cmd": "INSERT INTO foo2 (id, title) values (?, ?)", 41 | "args": [7, "unique-title-7"] 42 | }, 43 | { 44 | "cmd": "INSERT INTO foo2 (id, title) values (?, ?)", 45 | "args": [8, "unique-title-8"] 46 | }, 47 | { 48 | "cmd": "INSERT INTO foo2 (id, title) values (?, ?)", 49 | "args": [9, "unique-title-9"] 50 | }, 51 | { 52 | "cmd": "INSERT INTO foo2 (id, title) values (?, ?)", 53 | "args": [10, "unique-title-10"] 54 | } 55 | ], 56 | "writekey": "367fb8f59f92e9b1cb0820f6acb8dff322ab8e7e7fd3801cb476fe0caca5c2c3ec3efb9eddef5fa42d35781c88d4b36be2d7a836d8419b77bff5aea0a967ea1d0391294494da28aa81130a22" 57 | } 58 | 59 | ### execute sql 60 | POST https://service-69bnjtn2-1251014631.gz.tencentapigw.com.cn/api/v1/367fb8f59f92e9b1cb0820f69f85c4f23cd28f736fbe8729a82d8b66f491d5f0/execute?writekey= 61 | Content-Type: application/json 62 | 63 | { 64 | "list": [ 65 | { 66 | "cmd": "CREATE TABLE IF NOT EXISTS foo1 (id text NOT NULL PRIMARY KEY,title text)", 67 | "args": [] 68 | }, 69 | { 70 | "cmd": "CREATE TABLE IF NOT EXISTS foo2 (id text NOT NULL PRIMARY KEY,title text)", 71 | "args": [] 72 | }, 73 | { 74 | "cmd": "INSERT INTO foo1 (id, title) values (?, ?)", 75 | "args": [1, "unique-title-1"] 76 | }, 77 | { 78 | "cmd": "INSERT INTO foo1 (id, title) values (?, ?)", 79 | "args": [2, "unique-title-2"] 80 | }, 81 | { 82 | "cmd": "INSERT INTO foo1 (id, title) values (?, ?)", 83 | "args": [3, "unique-title-3"] 84 | }, 85 | { 86 | "cmd": "INSERT INTO foo1 (id, title) values (?, ?)", 87 | "args": [4, "unique-title-4"] 88 | }, 89 | { 90 | "cmd": "INSERT INTO foo1 (id, title) values (?, ?)", 91 | "args": [5, "unique-title-5"] 92 | }, 93 | { 94 | "cmd": "INSERT INTO foo2 (id, title) values (?, ?)", 95 | "args": [6, "unique-title-6"] 96 | }, 97 | { 98 | "cmd": "INSERT INTO foo2 (id, title) values (?, ?)", 99 | "args": [7, "unique-title-7"] 100 | }, 101 | { 102 | "cmd": "INSERT INTO foo2 (id, title) values (?, ?)", 103 | "args": [8, "unique-title-8"] 104 | }, 105 | { 106 | "cmd": "INSERT INTO foo2 (id, title) values (?, ?)", 107 | "args": [9, "unique-title-9"] 108 | }, 109 | { 110 | "cmd": "INSERT INTO foo2 (id, title) values (?, ?)", 111 | "args": [10, "unique-title-10"] 112 | } 113 | ], 114 | "writekey": "367fb8f59f92e9b1cb0820f6b2b9eed139f0e37047d5a712b22f8337ee89a9decf1dd59289d442c2201e5427f495d57597c9c717fa4d9a75a3ffa695f16ad95b178f163fe3c513aaf027061d" 115 | } 116 | 117 | ### execute sql 118 | POST http://localhost:9000/api/v1/367fb8f59f92e9b1cb0820f692e4c0d426a886043dd6ae1eac2fbc6cacf6cada/execute 119 | Content-Type: application/json 120 | 121 | { 122 | "list": [ 123 | { 124 | "cmd": "INSERT INTO csv (series_reference, period) values (?, ?)", 125 | "args": ["unique-title-10", "2024.10"] 126 | }, 127 | { 128 | "cmd": "INSERT INTO csv (series_reference, period) values (?, ?)", 129 | "args": ["unique-title-11", "2024.10"] 130 | } 131 | ], 132 | "writekey": "367fb8f59f92e9b1cb0820f6acb8dff322ab8e7e7fd3801cb476fe0caca5c2c3ec3efb9eddef5fa42d35781c88d4b36be2d7a836d8419b77bff5aea0a967ea1d0391294494da28aa81130a22" 133 | } 134 | 135 | ### 136 | { 137 | "code": 0, 138 | "message": "OK", 139 | "data": { 140 | "seqid": "367fb8f59f92e9b1cb0820f68abbf3d51cd6be535a8d9110cf08a431f5b2f4f0-1713092056336277000.redolog", 141 | "message": "Execute Pending", 142 | "status": 1 143 | } 144 | } 145 | 146 | ### query tables 147 | POST http://localhost:9000/api/v1/367fb8f59f92e9b1cb0820f68abbf3d51cd6be535a8d9110cf08a431f5b2f4f0/executelog 148 | Content-Type: application/json 149 | 150 | { 151 | "seqid": "367fb8f59f92e9b1cb0820f68abbf3d51cd6be535a8d9110cf08a431f5b2f4f0-1713092056336277000.redolog" 152 | } 153 | 154 | ### query tables 155 | POST http://localhost:9000/api/v1/367fb8f59f92e9b1cb0820f6efa3e09627ffb40c66d58942b2168b3cfd91dcc7/query 156 | Content-Type: application/json 157 | 158 | { 159 | "list": [ 160 | { 161 | "cmd": "select * from Csv where data_value = \"426\" limit 10" 162 | } 163 | ] 164 | } 165 | 166 | ### query redolog 167 | POST http://localhost:9000/api/v1/367fb8f59f92e9b1cb0820f6bb82f7ed11fbb404699fbb38cc1e9c36f68acfc4/query 168 | Content-Type: application/json 169 | 170 | { 171 | "list": [ 172 | { 173 | "cmd": "select * from foo1" 174 | }, 175 | { 176 | "cmd": "select * from foo2" 177 | } 178 | ] 179 | } -------------------------------------------------------------------------------- /tests/http_query.http: -------------------------------------------------------------------------------- 1 | ### test 2 | GET http://localhost:9000/api/v1/test 3 | Content-Type: application/json 4 | 5 | ### query tables 6 | GET http://localhost:9000/api/v1/367fb8f59f92e9b1cb0820f68abbf3d51cd6be535a8d9110cf08a431f5b2f4f0/tables?limit=10&offset=0 7 | Content-Type: application/json 8 | 9 | ### query tables 10 | GET http://localhost:9000/api/v1/367fb8f59f92e9b1cb0820f68abbf3d51cd6be535a8d9110cf08a431f5b2f4f0/tables/foo1/rows?limit=100&offset=0 11 | Content-Type: application/json 12 | 13 | ### query tables 14 | GET http://localhost:9000/api/v1/367fb8f59f92e9b1cb0820f68abbf3d51cd6be535a8d9110cf08a431f5b2f4f0/tables/foo2/rows?limit=100&offset=0 15 | Content-Type: application/json 16 | 17 | ### query tables 18 | GET http://localhost:9000/api/v1/367fb8f59f92e9b1cb0820f68abbf3d51cd6be535a8d9110cf08a431f5b2f4f0/tables/__LESSDBSYSTEM__/rows?limit=100&offset=0 19 | Content-Type: application/json -------------------------------------------------------------------------------- /tests/http_tigger.http: -------------------------------------------------------------------------------- 1 | ### tigger 2 | POST http://localhost:9000/api/v1/tigger/s3events 3 | Content-Type: application/json 4 | 5 | { 6 | "events":[ 7 | { 8 | "s3key": "367fb8f59f92e9b1cb0820f692e4c0d426a886043dd6ae1eac2fbc6cacf6cada-1713679280378842000.redolog" 9 | } 10 | ] 11 | } -------------------------------------------------------------------------------- /thirdparty/README.md: -------------------------------------------------------------------------------- 1 | # Goal 2 | 3 | (1)Serve small and medium-sized developers, reducing the cost of cloud services. 4 | (2)The goal is to build the most comprehensive public dataset worldwide. -------------------------------------------------------------------------------- /ui/README.md: -------------------------------------------------------------------------------- 1 | # LessDB UI 2 | 3 | ## Development 4 | 5 | ```bash 6 | cp .env.sample .env # Duplicate sample environment variables 7 | 8 | yarn # Install dependencies 9 | yarn dev # Start the dev server 10 | ``` 11 | -------------------------------------------------------------------------------- /ui/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "lessdb", 3 | "version": "0.0.1", 4 | "description": "Serverless SQLite", 5 | "scripts": { 6 | "dev": "vite dev --port 3000", 7 | "build": "vite build", 8 | "preview": "vite preview" 9 | }, 10 | "devDependencies": { 11 | "@sveltejs/adapter-auto": "2.0.0", 12 | "@sveltejs/adapter-node": "^1.2.0", 13 | "@sveltejs/adapter-static": "^2.0.1", 14 | "@sveltejs/kit": "1.8.5", 15 | "svelte": "^3.55.1", 16 | "vite": "^4.1.4" 17 | }, 18 | "type": "module", 19 | "files": [ 20 | "build/" 21 | ] 22 | } -------------------------------------------------------------------------------- /ui/src/app.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | LessDB 9 | %sveltekit.head% 10 | 11 | 12 | 13 |
%sveltekit.body%
14 | 15 | 16 | 24 | 25 | -------------------------------------------------------------------------------- /ui/src/routes/+layout.js: -------------------------------------------------------------------------------- 1 | export const prerender = true; 2 | export const ssr = false; -------------------------------------------------------------------------------- /ui/src/routes/+layout.svelte: -------------------------------------------------------------------------------- 1 | 36 | 37 |
38 | 39 |
40 |
41 | 42 | 50 | -------------------------------------------------------------------------------- /ui/src/routes/+page.svelte: -------------------------------------------------------------------------------- 1 | 174 | 175 |
176 | {#if tablesLoading} 177 |

Loading...

178 | {:else} 179 |
    180 |
  • 181 | 182 |
  • 183 | {#each tables as table} 184 |
  • 185 | 191 |
  • 192 | {/each} 193 |
194 |
195 |
196 |

DBName :

197 | 202 | 208 | 211 |
212 | 213 | 214 | 215 | 218 | {#each columns as column} 219 | 220 | {/each} 221 | 222 | 223 | 224 | {#each rows as row} 225 | 226 | 229 | {#each Object.keys(row.fields) as key} 230 | 253 | {/each} 254 | 255 | {/each} 256 | 257 |
216 | 217 | {column.name}
227 | 228 | { 233 | row.fields[key].editable = true; 234 | }} 235 | > 236 | {#if !row.fields[key].editable} 237 |
238 | {row.fields[key].value} 239 |
240 | {:else} 241 | { 246 | if (row.new) return; 247 | row.fields[key].editable = false; 248 | }} 249 | placeholder={row.fields[key].placeholder} 250 | /> 251 | {/if} 252 |
258 |
259 | 262 | {/if} 263 |
264 | 265 | 414 | -------------------------------------------------------------------------------- /ui/src/routes/styles.css: -------------------------------------------------------------------------------- 1 | body>div { 2 | display: flex; 3 | flex-direction: column; 4 | flex-wrap: wrap; 5 | } 6 | 7 | * { 8 | font-size: medium; 9 | } 10 | 11 | body::-webkit-scrollbar { 12 | width: 1em; 13 | } 14 | 15 | body::-webkit-scrollbar-track { 16 | box-shadow: inset 0 0 6px rgba(0, 0, 0, 0.3); 17 | } -------------------------------------------------------------------------------- /ui/static/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/linkxzhou/LessDB/eab3e2fd53e8d4d5758cbf46a43084bb98958a8e/ui/static/favicon.png -------------------------------------------------------------------------------- /ui/svelte.config.js: -------------------------------------------------------------------------------- 1 | import adapter from '@sveltejs/adapter-static'; 2 | 3 | /** @type {import('@sveltejs/kit').Config} */ 4 | const config = { 5 | kit: { 6 | adapter: adapter({ 7 | out: 'build', 8 | precompress: false, 9 | fallback: 'index.html', // may differ from host to host 10 | polyfill: true, 11 | strict: false, 12 | }) 13 | }, 14 | }; 15 | 16 | export default config; 17 | -------------------------------------------------------------------------------- /ui/vite.config.js: -------------------------------------------------------------------------------- 1 | import { sveltekit } from '@sveltejs/kit/vite'; 2 | 3 | const config = { 4 | plugins: [sveltekit()] 5 | }; 6 | 7 | export default config; 8 | -------------------------------------------------------------------------------- /ui/yarn.lock: -------------------------------------------------------------------------------- 1 | # THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. 2 | # yarn lockfile v1 3 | 4 | 5 | "@esbuild/android-arm64@0.18.20": 6 | version "0.18.20" 7 | resolved "https://registry.npmmirror.com/@esbuild/android-arm64/-/android-arm64-0.18.20.tgz#984b4f9c8d0377443cc2dfcef266d02244593622" 8 | integrity sha512-Nz4rJcchGDtENV0eMKUNa6L12zz2zBDXuhj/Vjh18zGqB44Bi7MBMSXjgunJgjRhCmKOjnPuZp4Mb6OKqtMHLQ== 9 | 10 | "@esbuild/android-arm@0.18.20": 11 | version "0.18.20" 12 | resolved "https://registry.npmmirror.com/@esbuild/android-arm/-/android-arm-0.18.20.tgz#fedb265bc3a589c84cc11f810804f234947c3682" 13 | integrity sha512-fyi7TDI/ijKKNZTUJAQqiG5T7YjJXgnzkURqmGj13C6dCqckZBLdl4h7bkhHt/t0WP+zO9/zwroDvANaOqO5Sw== 14 | 15 | "@esbuild/android-x64@0.18.20": 16 | version "0.18.20" 17 | resolved "https://registry.npmmirror.com/@esbuild/android-x64/-/android-x64-0.18.20.tgz#35cf419c4cfc8babe8893d296cd990e9e9f756f2" 18 | integrity sha512-8GDdlePJA8D6zlZYJV/jnrRAi6rOiNaCC/JclcXpB+KIuvfBN4owLtgzY2bsxnx666XjJx2kDPUmnTtR8qKQUg== 19 | 20 | "@esbuild/darwin-arm64@0.18.20": 21 | version "0.18.20" 22 | resolved "https://registry.npmmirror.com/@esbuild/darwin-arm64/-/darwin-arm64-0.18.20.tgz#08172cbeccf95fbc383399a7f39cfbddaeb0d7c1" 23 | integrity sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA== 24 | 25 | "@esbuild/darwin-x64@0.18.20": 26 | version "0.18.20" 27 | resolved "https://registry.npmmirror.com/@esbuild/darwin-x64/-/darwin-x64-0.18.20.tgz#d70d5790d8bf475556b67d0f8b7c5bdff053d85d" 28 | integrity sha512-pc5gxlMDxzm513qPGbCbDukOdsGtKhfxD1zJKXjCCcU7ju50O7MeAZ8c4krSJcOIJGFR+qx21yMMVYwiQvyTyQ== 29 | 30 | "@esbuild/freebsd-arm64@0.18.20": 31 | version "0.18.20" 32 | resolved "https://registry.npmmirror.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.18.20.tgz#98755cd12707f93f210e2494d6a4b51b96977f54" 33 | integrity sha512-yqDQHy4QHevpMAaxhhIwYPMv1NECwOvIpGCZkECn8w2WFHXjEwrBn3CeNIYsibZ/iZEUemj++M26W3cNR5h+Tw== 34 | 35 | "@esbuild/freebsd-x64@0.18.20": 36 | version "0.18.20" 37 | resolved "https://registry.npmmirror.com/@esbuild/freebsd-x64/-/freebsd-x64-0.18.20.tgz#c1eb2bff03915f87c29cece4c1a7fa1f423b066e" 38 | integrity sha512-tgWRPPuQsd3RmBZwarGVHZQvtzfEBOreNuxEMKFcd5DaDn2PbBxfwLcj4+aenoh7ctXcbXmOQIn8HI6mCSw5MQ== 39 | 40 | "@esbuild/linux-arm64@0.18.20": 41 | version "0.18.20" 42 | resolved "https://registry.npmmirror.com/@esbuild/linux-arm64/-/linux-arm64-0.18.20.tgz#bad4238bd8f4fc25b5a021280c770ab5fc3a02a0" 43 | integrity sha512-2YbscF+UL7SQAVIpnWvYwM+3LskyDmPhe31pE7/aoTMFKKzIc9lLbyGUpmmb8a8AixOL61sQ/mFh3jEjHYFvdA== 44 | 45 | "@esbuild/linux-arm@0.18.20": 46 | version "0.18.20" 47 | resolved "https://registry.npmmirror.com/@esbuild/linux-arm/-/linux-arm-0.18.20.tgz#3e617c61f33508a27150ee417543c8ab5acc73b0" 48 | integrity sha512-/5bHkMWnq1EgKr1V+Ybz3s1hWXok7mDFUMQ4cG10AfW3wL02PSZi5kFpYKrptDsgb2WAJIvRcDm+qIvXf/apvg== 49 | 50 | "@esbuild/linux-ia32@0.18.20": 51 | version "0.18.20" 52 | resolved "https://registry.npmmirror.com/@esbuild/linux-ia32/-/linux-ia32-0.18.20.tgz#699391cccba9aee6019b7f9892eb99219f1570a7" 53 | integrity sha512-P4etWwq6IsReT0E1KHU40bOnzMHoH73aXp96Fs8TIT6z9Hu8G6+0SHSw9i2isWrD2nbx2qo5yUqACgdfVGx7TA== 54 | 55 | "@esbuild/linux-loong64@0.18.20": 56 | version "0.18.20" 57 | resolved "https://registry.npmmirror.com/@esbuild/linux-loong64/-/linux-loong64-0.18.20.tgz#e6fccb7aac178dd2ffb9860465ac89d7f23b977d" 58 | integrity sha512-nXW8nqBTrOpDLPgPY9uV+/1DjxoQ7DoB2N8eocyq8I9XuqJ7BiAMDMf9n1xZM9TgW0J8zrquIb/A7s3BJv7rjg== 59 | 60 | "@esbuild/linux-mips64el@0.18.20": 61 | version "0.18.20" 62 | resolved "https://registry.npmmirror.com/@esbuild/linux-mips64el/-/linux-mips64el-0.18.20.tgz#eeff3a937de9c2310de30622a957ad1bd9183231" 63 | integrity sha512-d5NeaXZcHp8PzYy5VnXV3VSd2D328Zb+9dEq5HE6bw6+N86JVPExrA6O68OPwobntbNJ0pzCpUFZTo3w0GyetQ== 64 | 65 | "@esbuild/linux-ppc64@0.18.20": 66 | version "0.18.20" 67 | resolved "https://registry.npmmirror.com/@esbuild/linux-ppc64/-/linux-ppc64-0.18.20.tgz#2f7156bde20b01527993e6881435ad79ba9599fb" 68 | integrity sha512-WHPyeScRNcmANnLQkq6AfyXRFr5D6N2sKgkFo2FqguP44Nw2eyDlbTdZwd9GYk98DZG9QItIiTlFLHJHjxP3FA== 69 | 70 | "@esbuild/linux-riscv64@0.18.20": 71 | version "0.18.20" 72 | resolved "https://registry.npmmirror.com/@esbuild/linux-riscv64/-/linux-riscv64-0.18.20.tgz#6628389f210123d8b4743045af8caa7d4ddfc7a6" 73 | integrity sha512-WSxo6h5ecI5XH34KC7w5veNnKkju3zBRLEQNY7mv5mtBmrP/MjNBCAlsM2u5hDBlS3NGcTQpoBvRzqBcRtpq1A== 74 | 75 | "@esbuild/linux-s390x@0.18.20": 76 | version "0.18.20" 77 | resolved "https://registry.npmmirror.com/@esbuild/linux-s390x/-/linux-s390x-0.18.20.tgz#255e81fb289b101026131858ab99fba63dcf0071" 78 | integrity sha512-+8231GMs3mAEth6Ja1iK0a1sQ3ohfcpzpRLH8uuc5/KVDFneH6jtAJLFGafpzpMRO6DzJ6AvXKze9LfFMrIHVQ== 79 | 80 | "@esbuild/linux-x64@0.18.20": 81 | version "0.18.20" 82 | resolved "https://registry.npmmirror.com/@esbuild/linux-x64/-/linux-x64-0.18.20.tgz#c7690b3417af318a9b6f96df3031a8865176d338" 83 | integrity sha512-UYqiqemphJcNsFEskc73jQ7B9jgwjWrSayxawS6UVFZGWrAAtkzjxSqnoclCXxWtfwLdzU+vTpcNYhpn43uP1w== 84 | 85 | "@esbuild/netbsd-x64@0.18.20": 86 | version "0.18.20" 87 | resolved "https://registry.npmmirror.com/@esbuild/netbsd-x64/-/netbsd-x64-0.18.20.tgz#30e8cd8a3dded63975e2df2438ca109601ebe0d1" 88 | integrity sha512-iO1c++VP6xUBUmltHZoMtCUdPlnPGdBom6IrO4gyKPFFVBKioIImVooR5I83nTew5UOYrk3gIJhbZh8X44y06A== 89 | 90 | "@esbuild/openbsd-x64@0.18.20": 91 | version "0.18.20" 92 | resolved "https://registry.npmmirror.com/@esbuild/openbsd-x64/-/openbsd-x64-0.18.20.tgz#7812af31b205055874c8082ea9cf9ab0da6217ae" 93 | integrity sha512-e5e4YSsuQfX4cxcygw/UCPIEP6wbIL+se3sxPdCiMbFLBWu0eiZOJ7WoD+ptCLrmjZBK1Wk7I6D/I3NglUGOxg== 94 | 95 | "@esbuild/sunos-x64@0.18.20": 96 | version "0.18.20" 97 | resolved "https://registry.npmmirror.com/@esbuild/sunos-x64/-/sunos-x64-0.18.20.tgz#d5c275c3b4e73c9b0ecd38d1ca62c020f887ab9d" 98 | integrity sha512-kDbFRFp0YpTQVVrqUd5FTYmWo45zGaXe0X8E1G/LKFC0v8x0vWrhOWSLITcCn63lmZIxfOMXtCfti/RxN/0wnQ== 99 | 100 | "@esbuild/win32-arm64@0.18.20": 101 | version "0.18.20" 102 | resolved "https://registry.npmmirror.com/@esbuild/win32-arm64/-/win32-arm64-0.18.20.tgz#73bc7f5a9f8a77805f357fab97f290d0e4820ac9" 103 | integrity sha512-ddYFR6ItYgoaq4v4JmQQaAI5s7npztfV4Ag6NrhiaW0RrnOXqBkgwZLofVTlq1daVTQNhtI5oieTvkRPfZrePg== 104 | 105 | "@esbuild/win32-ia32@0.18.20": 106 | version "0.18.20" 107 | resolved "https://registry.npmmirror.com/@esbuild/win32-ia32/-/win32-ia32-0.18.20.tgz#ec93cbf0ef1085cc12e71e0d661d20569ff42102" 108 | integrity sha512-Wv7QBi3ID/rROT08SABTS7eV4hX26sVduqDOTe1MvGMjNd3EjOz4b7zeexIR62GTIEKrfJXKL9LFxTYgkyeu7g== 109 | 110 | "@esbuild/win32-x64@0.18.20": 111 | version "0.18.20" 112 | resolved "https://registry.npmmirror.com/@esbuild/win32-x64/-/win32-x64-0.18.20.tgz#786c5f41f043b07afb1af37683d7c33668858f6d" 113 | integrity sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ== 114 | 115 | "@jridgewell/sourcemap-codec@^1.4.15": 116 | version "1.4.15" 117 | resolved "https://registry.npmmirror.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz#d7c6e6755c78567a951e04ab52ef0fd26de59f32" 118 | integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg== 119 | 120 | "@polka/url@^1.0.0-next.24": 121 | version "1.0.0-next.25" 122 | resolved "https://registry.npmmirror.com/@polka/url/-/url-1.0.0-next.25.tgz#f077fdc0b5d0078d30893396ff4827a13f99e817" 123 | integrity sha512-j7P6Rgr3mmtdkeDGTe0E/aYyWEWVtc5yFXtHCRHs28/jptDEWfaVOc5T7cblqy1XKPPfCxJc/8DwQ5YgLOZOVQ== 124 | 125 | "@rollup/plugin-commonjs@^25.0.0": 126 | version "25.0.7" 127 | resolved "https://registry.npmmirror.com/@rollup/plugin-commonjs/-/plugin-commonjs-25.0.7.tgz#145cec7589ad952171aeb6a585bbeabd0fd3b4cf" 128 | integrity sha512-nEvcR+LRjEjsaSsc4x3XZfCCvZIaSMenZu/OiwOKGN2UhQpAYI7ru7czFvyWbErlpoGjnSX3D5Ch5FcMA3kRWQ== 129 | dependencies: 130 | "@rollup/pluginutils" "^5.0.1" 131 | commondir "^1.0.1" 132 | estree-walker "^2.0.2" 133 | glob "^8.0.3" 134 | is-reference "1.2.1" 135 | magic-string "^0.30.3" 136 | 137 | "@rollup/plugin-json@^6.0.0": 138 | version "6.1.0" 139 | resolved "https://registry.npmmirror.com/@rollup/plugin-json/-/plugin-json-6.1.0.tgz#fbe784e29682e9bb6dee28ea75a1a83702e7b805" 140 | integrity sha512-EGI2te5ENk1coGeADSIwZ7G2Q8CJS2sF120T7jLw4xFw9n7wIOXHo+kIYRAoVpJAN+kmqZSoO3Fp4JtoNF4ReA== 141 | dependencies: 142 | "@rollup/pluginutils" "^5.1.0" 143 | 144 | "@rollup/plugin-node-resolve@^15.0.1": 145 | version "15.2.3" 146 | resolved "https://registry.npmmirror.com/@rollup/plugin-node-resolve/-/plugin-node-resolve-15.2.3.tgz#e5e0b059bd85ca57489492f295ce88c2d4b0daf9" 147 | integrity sha512-j/lym8nf5E21LwBT4Df1VD6hRO2L2iwUeUmP7litikRsVp1H6NWx20NEp0Y7su+7XGc476GnXXc4kFeZNGmaSQ== 148 | dependencies: 149 | "@rollup/pluginutils" "^5.0.1" 150 | "@types/resolve" "1.20.2" 151 | deepmerge "^4.2.2" 152 | is-builtin-module "^3.2.1" 153 | is-module "^1.0.0" 154 | resolve "^1.22.1" 155 | 156 | "@rollup/pluginutils@^5.0.1", "@rollup/pluginutils@^5.1.0": 157 | version "5.1.0" 158 | resolved "https://registry.npmmirror.com/@rollup/pluginutils/-/pluginutils-5.1.0.tgz#7e53eddc8c7f483a4ad0b94afb1f7f5fd3c771e0" 159 | integrity sha512-XTIWOPPcpvyKI6L1NHo0lFlCyznUEyPmPY1mc3KpPVDYulHSTvyeLNVW00QTLIAFNhR3kYnJTQHeGqU4M3n09g== 160 | dependencies: 161 | "@types/estree" "^1.0.0" 162 | estree-walker "^2.0.2" 163 | picomatch "^2.3.1" 164 | 165 | "@sveltejs/adapter-auto@2.0.0": 166 | version "2.0.0" 167 | resolved "https://registry.npmmirror.com/@sveltejs/adapter-auto/-/adapter-auto-2.0.0.tgz#902dba0c92e8c1d4b3128c53b370d6cc18f8f672" 168 | integrity sha512-b+gkHFZgD771kgV3aO4avHFd7y1zhmMYy9i6xOK7m/rwmwaRO8gnF5zBc0Rgca80B2PMU1bKNxyBTHA14OzUAQ== 169 | dependencies: 170 | import-meta-resolve "^2.2.0" 171 | 172 | "@sveltejs/adapter-node@^1.2.0": 173 | version "1.3.1" 174 | resolved "https://registry.npmmirror.com/@sveltejs/adapter-node/-/adapter-node-1.3.1.tgz#b3d8dee1ae09d008b2add093e84cf085f3333bbc" 175 | integrity sha512-A0VgRQDCDPzdLNoiAbcOxGw4zT1Mc+n1LwT1OmO350R7WxrEqdMUChPPOd1iMfIDWlP4ie6E2d/WQf5es2d4Zw== 176 | dependencies: 177 | "@rollup/plugin-commonjs" "^25.0.0" 178 | "@rollup/plugin-json" "^6.0.0" 179 | "@rollup/plugin-node-resolve" "^15.0.1" 180 | rollup "^3.7.0" 181 | 182 | "@sveltejs/adapter-static@^2.0.1": 183 | version "2.0.3" 184 | resolved "https://registry.npmmirror.com/@sveltejs/adapter-static/-/adapter-static-2.0.3.tgz#616836c30bdce4d673a2e26c0f5ffbd5c1bc7c67" 185 | integrity sha512-VUqTfXsxYGugCpMqQv1U0LIdbR3S5nBkMMDmpjGVJyM6Q2jHVMFtdWJCkeHMySc6mZxJ+0eZK3T7IgmUCDrcUQ== 186 | 187 | "@sveltejs/kit@1.8.5": 188 | version "1.8.5" 189 | resolved "https://registry.npmmirror.com/@sveltejs/kit/-/kit-1.8.5.tgz#f61f417cd5fa8ab76d28ec68f72f9a561749dd5b" 190 | integrity sha512-b6kbjVAivoPd3oL9IVBaZBWiuHeI0qBKfszSDXcqsPfiSMyUK7ilHDFVSWNn+2EMPO48+87iuho71yTCOXZE3w== 191 | dependencies: 192 | "@sveltejs/vite-plugin-svelte" "^2.0.0" 193 | "@types/cookie" "^0.5.1" 194 | cookie "^0.5.0" 195 | devalue "^4.3.0" 196 | esm-env "^1.0.0" 197 | kleur "^4.1.5" 198 | magic-string "^0.30.0" 199 | mime "^3.0.0" 200 | sade "^1.8.1" 201 | set-cookie-parser "^2.5.1" 202 | sirv "^2.0.2" 203 | tiny-glob "^0.2.9" 204 | undici "5.20.0" 205 | 206 | "@sveltejs/vite-plugin-svelte-inspector@^1.0.4": 207 | version "1.0.4" 208 | resolved "https://registry.npmmirror.com/@sveltejs/vite-plugin-svelte-inspector/-/vite-plugin-svelte-inspector-1.0.4.tgz#c99fcb73aaa845a3e2c0563409aeb3ee0b863add" 209 | integrity sha512-zjiuZ3yydBtwpF3bj0kQNV0YXe+iKE545QGZVTaylW3eAzFr+pJ/cwK8lZEaRp4JtaJXhD5DyWAV4AxLh6DgaQ== 210 | dependencies: 211 | debug "^4.3.4" 212 | 213 | "@sveltejs/vite-plugin-svelte@^2.0.0": 214 | version "2.5.3" 215 | resolved "https://registry.npmmirror.com/@sveltejs/vite-plugin-svelte/-/vite-plugin-svelte-2.5.3.tgz#23f810d0c38d159491845175c2566a51639be3fc" 216 | integrity sha512-erhNtXxE5/6xGZz/M9eXsmI7Pxa6MS7jyTy06zN3Ck++ldrppOnOlJwHHTsMC7DHDQdgUp4NAc4cDNQ9eGdB/w== 217 | dependencies: 218 | "@sveltejs/vite-plugin-svelte-inspector" "^1.0.4" 219 | debug "^4.3.4" 220 | deepmerge "^4.3.1" 221 | kleur "^4.1.5" 222 | magic-string "^0.30.3" 223 | svelte-hmr "^0.15.3" 224 | vitefu "^0.2.4" 225 | 226 | "@types/cookie@^0.5.1": 227 | version "0.5.4" 228 | resolved "https://registry.npmmirror.com/@types/cookie/-/cookie-0.5.4.tgz#7e70a20cd695bc48d46b08c2505874cd68b760e0" 229 | integrity sha512-7z/eR6O859gyWIAjuvBWFzNURmf2oPBmJlfVWkwehU5nzIyjwBsTh7WMmEEV4JFnHuQ3ex4oyTvfKzcyJVDBNA== 230 | 231 | "@types/estree@*", "@types/estree@^1.0.0": 232 | version "1.0.5" 233 | resolved "https://registry.npmmirror.com/@types/estree/-/estree-1.0.5.tgz#a6ce3e556e00fd9895dd872dd172ad0d4bd687f4" 234 | integrity sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw== 235 | 236 | "@types/resolve@1.20.2": 237 | version "1.20.2" 238 | resolved "https://registry.npmmirror.com/@types/resolve/-/resolve-1.20.2.tgz#97d26e00cd4a0423b4af620abecf3e6f442b7975" 239 | integrity sha512-60BCwRFOZCQhDncwQdxxeOEEkbc5dIMccYLwbxsS4TUNeVECQ/pBJ0j09mrHOl/JJvpRPGwO9SvE4nR2Nb/a4Q== 240 | 241 | balanced-match@^1.0.0: 242 | version "1.0.2" 243 | resolved "https://registry.npmmirror.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" 244 | integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== 245 | 246 | brace-expansion@^2.0.1: 247 | version "2.0.1" 248 | resolved "https://registry.npmmirror.com/brace-expansion/-/brace-expansion-2.0.1.tgz#1edc459e0f0c548486ecf9fc99f2221364b9a0ae" 249 | integrity sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA== 250 | dependencies: 251 | balanced-match "^1.0.0" 252 | 253 | builtin-modules@^3.3.0: 254 | version "3.3.0" 255 | resolved "https://registry.npmmirror.com/builtin-modules/-/builtin-modules-3.3.0.tgz#cae62812b89801e9656336e46223e030386be7b6" 256 | integrity sha512-zhaCDicdLuWN5UbN5IMnFqNMhNfo919sH85y2/ea+5Yg9TsTkeZxpL+JLbp6cgYFS4sRLp3YV4S6yDuqVWHYOw== 257 | 258 | busboy@^1.6.0: 259 | version "1.6.0" 260 | resolved "https://registry.npmmirror.com/busboy/-/busboy-1.6.0.tgz#966ea36a9502e43cdb9146962523b92f531f6893" 261 | integrity sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA== 262 | dependencies: 263 | streamsearch "^1.1.0" 264 | 265 | commondir@^1.0.1: 266 | version "1.0.1" 267 | resolved "https://registry.npmmirror.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b" 268 | integrity sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg== 269 | 270 | cookie@^0.5.0: 271 | version "0.5.0" 272 | resolved "https://registry.npmmirror.com/cookie/-/cookie-0.5.0.tgz#d1f5d71adec6558c58f389987c366aa47e994f8b" 273 | integrity sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw== 274 | 275 | debug@^4.3.4: 276 | version "4.3.4" 277 | resolved "https://registry.npmmirror.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" 278 | integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== 279 | dependencies: 280 | ms "2.1.2" 281 | 282 | deepmerge@^4.2.2, deepmerge@^4.3.1: 283 | version "4.3.1" 284 | resolved "https://registry.npmmirror.com/deepmerge/-/deepmerge-4.3.1.tgz#44b5f2147cd3b00d4b56137685966f26fd25dd4a" 285 | integrity sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A== 286 | 287 | devalue@^4.3.0: 288 | version "4.3.2" 289 | resolved "https://registry.npmmirror.com/devalue/-/devalue-4.3.2.tgz#cc44e4cf3872ac5a78229fbce3b77e57032727b5" 290 | integrity sha512-KqFl6pOgOW+Y6wJgu80rHpo2/3H07vr8ntR9rkkFIRETewbf5GaYYcakYfiKz89K+sLsuPkQIZaXDMjUObZwWg== 291 | 292 | esbuild@^0.18.10: 293 | version "0.18.20" 294 | resolved "https://registry.npmmirror.com/esbuild/-/esbuild-0.18.20.tgz#4709f5a34801b43b799ab7d6d82f7284a9b7a7a6" 295 | integrity sha512-ceqxoedUrcayh7Y7ZX6NdbbDzGROiyVBgC4PriJThBKSVPWnnFHZAkfI1lJT8QFkOwH4qOS2SJkS4wvpGl8BpA== 296 | optionalDependencies: 297 | "@esbuild/android-arm" "0.18.20" 298 | "@esbuild/android-arm64" "0.18.20" 299 | "@esbuild/android-x64" "0.18.20" 300 | "@esbuild/darwin-arm64" "0.18.20" 301 | "@esbuild/darwin-x64" "0.18.20" 302 | "@esbuild/freebsd-arm64" "0.18.20" 303 | "@esbuild/freebsd-x64" "0.18.20" 304 | "@esbuild/linux-arm" "0.18.20" 305 | "@esbuild/linux-arm64" "0.18.20" 306 | "@esbuild/linux-ia32" "0.18.20" 307 | "@esbuild/linux-loong64" "0.18.20" 308 | "@esbuild/linux-mips64el" "0.18.20" 309 | "@esbuild/linux-ppc64" "0.18.20" 310 | "@esbuild/linux-riscv64" "0.18.20" 311 | "@esbuild/linux-s390x" "0.18.20" 312 | "@esbuild/linux-x64" "0.18.20" 313 | "@esbuild/netbsd-x64" "0.18.20" 314 | "@esbuild/openbsd-x64" "0.18.20" 315 | "@esbuild/sunos-x64" "0.18.20" 316 | "@esbuild/win32-arm64" "0.18.20" 317 | "@esbuild/win32-ia32" "0.18.20" 318 | "@esbuild/win32-x64" "0.18.20" 319 | 320 | esm-env@^1.0.0: 321 | version "1.0.0" 322 | resolved "https://registry.npmmirror.com/esm-env/-/esm-env-1.0.0.tgz#b124b40b180711690a4cb9b00d16573391950413" 323 | integrity sha512-Cf6VksWPsTuW01vU9Mk/3vRue91Zevka5SjyNf3nEpokFRuqt/KjUQoGAwq9qMmhpLTHmXzSIrFRw8zxWzmFBA== 324 | 325 | estree-walker@^2.0.2: 326 | version "2.0.2" 327 | resolved "https://registry.npmmirror.com/estree-walker/-/estree-walker-2.0.2.tgz#52f010178c2a4c117a7757cfe942adb7d2da4cac" 328 | integrity sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w== 329 | 330 | fs.realpath@^1.0.0: 331 | version "1.0.0" 332 | resolved "https://registry.npmmirror.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" 333 | integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw== 334 | 335 | fsevents@~2.3.2: 336 | version "2.3.3" 337 | resolved "https://registry.npmmirror.com/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6" 338 | integrity sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw== 339 | 340 | function-bind@^1.1.2: 341 | version "1.1.2" 342 | resolved "https://registry.npmmirror.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c" 343 | integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA== 344 | 345 | glob@^8.0.3: 346 | version "8.1.0" 347 | resolved "https://registry.npmmirror.com/glob/-/glob-8.1.0.tgz#d388f656593ef708ee3e34640fdfb99a9fd1c33e" 348 | integrity sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ== 349 | dependencies: 350 | fs.realpath "^1.0.0" 351 | inflight "^1.0.4" 352 | inherits "2" 353 | minimatch "^5.0.1" 354 | once "^1.3.0" 355 | 356 | globalyzer@0.1.0: 357 | version "0.1.0" 358 | resolved "https://registry.npmmirror.com/globalyzer/-/globalyzer-0.1.0.tgz#cb76da79555669a1519d5a8edf093afaa0bf1465" 359 | integrity sha512-40oNTM9UfG6aBmuKxk/giHn5nQ8RVz/SS4Ir6zgzOv9/qC3kKZ9v4etGTcJbEl/NyVQH7FGU7d+X1egr57Md2Q== 360 | 361 | globrex@^0.1.2: 362 | version "0.1.2" 363 | resolved "https://registry.npmmirror.com/globrex/-/globrex-0.1.2.tgz#dd5d9ec826232730cd6793a5e33a9302985e6098" 364 | integrity sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg== 365 | 366 | hasown@^2.0.0: 367 | version "2.0.2" 368 | resolved "https://registry.npmmirror.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" 369 | integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ== 370 | dependencies: 371 | function-bind "^1.1.2" 372 | 373 | import-meta-resolve@^2.2.0: 374 | version "2.2.2" 375 | resolved "https://registry.npmmirror.com/import-meta-resolve/-/import-meta-resolve-2.2.2.tgz#75237301e72d1f0fbd74dbc6cca9324b164c2cc9" 376 | integrity sha512-f8KcQ1D80V7RnqVm+/lirO9zkOxjGxhaTC1IPrBGd3MEfNgmNG67tSUO9gTi2F3Blr2Az6g1vocaxzkVnWl9MA== 377 | 378 | inflight@^1.0.4: 379 | version "1.0.6" 380 | resolved "https://registry.npmmirror.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" 381 | integrity sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA== 382 | dependencies: 383 | once "^1.3.0" 384 | wrappy "1" 385 | 386 | inherits@2: 387 | version "2.0.4" 388 | resolved "https://registry.npmmirror.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" 389 | integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== 390 | 391 | is-builtin-module@^3.2.1: 392 | version "3.2.1" 393 | resolved "https://registry.npmmirror.com/is-builtin-module/-/is-builtin-module-3.2.1.tgz#f03271717d8654cfcaf07ab0463faa3571581169" 394 | integrity sha512-BSLE3HnV2syZ0FK0iMA/yUGplUeMmNz4AW5fnTunbCIqZi4vG3WjJT9FHMy5D69xmAYBHXQhJdALdpwVxV501A== 395 | dependencies: 396 | builtin-modules "^3.3.0" 397 | 398 | is-core-module@^2.13.0: 399 | version "2.13.1" 400 | resolved "https://registry.npmmirror.com/is-core-module/-/is-core-module-2.13.1.tgz#ad0d7532c6fea9da1ebdc82742d74525c6273384" 401 | integrity sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw== 402 | dependencies: 403 | hasown "^2.0.0" 404 | 405 | is-module@^1.0.0: 406 | version "1.0.0" 407 | resolved "https://registry.npmmirror.com/is-module/-/is-module-1.0.0.tgz#3258fb69f78c14d5b815d664336b4cffb6441591" 408 | integrity sha512-51ypPSPCoTEIN9dy5Oy+h4pShgJmPCygKfyRCISBI+JoWT/2oJvK8QPxmwv7b/p239jXrm9M1mlQbyKJ5A152g== 409 | 410 | is-reference@1.2.1: 411 | version "1.2.1" 412 | resolved "https://registry.npmmirror.com/is-reference/-/is-reference-1.2.1.tgz#8b2dac0b371f4bc994fdeaba9eb542d03002d0b7" 413 | integrity sha512-U82MsXXiFIrjCK4otLT+o2NA2Cd2g5MLoOVXUZjIOhLurrRxpEXzI8O0KZHr3IjLvlAH1kTPYSuqer5T9ZVBKQ== 414 | dependencies: 415 | "@types/estree" "*" 416 | 417 | kleur@^4.1.5: 418 | version "4.1.5" 419 | resolved "https://registry.npmmirror.com/kleur/-/kleur-4.1.5.tgz#95106101795f7050c6c650f350c683febddb1780" 420 | integrity sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ== 421 | 422 | magic-string@^0.30.0, magic-string@^0.30.3: 423 | version "0.30.9" 424 | resolved "https://registry.npmmirror.com/magic-string/-/magic-string-0.30.9.tgz#8927ae21bfdd856310e07a1bc8dd5e73cb6c251d" 425 | integrity sha512-S1+hd+dIrC8EZqKyT9DstTH/0Z+f76kmmvZnkfQVmOpDEF9iVgdYif3Q/pIWHmCoo59bQVGW0kVL3e2nl+9+Sw== 426 | dependencies: 427 | "@jridgewell/sourcemap-codec" "^1.4.15" 428 | 429 | mime@^3.0.0: 430 | version "3.0.0" 431 | resolved "https://registry.npmmirror.com/mime/-/mime-3.0.0.tgz#b374550dca3a0c18443b0c950a6a58f1931cf7a7" 432 | integrity sha512-jSCU7/VB1loIWBZe14aEYHU/+1UMEHoaO7qxCOVJOw9GgH72VAWppxNcjU+x9a2k3GSIBXNKxXQFqRvvZ7vr3A== 433 | 434 | minimatch@^5.0.1: 435 | version "5.1.6" 436 | resolved "https://registry.npmmirror.com/minimatch/-/minimatch-5.1.6.tgz#1cfcb8cf5522ea69952cd2af95ae09477f122a96" 437 | integrity sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g== 438 | dependencies: 439 | brace-expansion "^2.0.1" 440 | 441 | mri@^1.1.0: 442 | version "1.2.0" 443 | resolved "https://registry.npmmirror.com/mri/-/mri-1.2.0.tgz#6721480fec2a11a4889861115a48b6cbe7cc8f0b" 444 | integrity sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA== 445 | 446 | mrmime@^2.0.0: 447 | version "2.0.0" 448 | resolved "https://registry.npmmirror.com/mrmime/-/mrmime-2.0.0.tgz#151082a6e06e59a9a39b46b3e14d5cfe92b3abb4" 449 | integrity sha512-eu38+hdgojoyq63s+yTpN4XMBdt5l8HhMhc4VKLO9KM5caLIBvUm4thi7fFaxyTmCKeNnXZ5pAlBwCUnhA09uw== 450 | 451 | ms@2.1.2: 452 | version "2.1.2" 453 | resolved "https://registry.npmmirror.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" 454 | integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== 455 | 456 | nanoid@^3.3.7: 457 | version "3.3.7" 458 | resolved "https://registry.npmmirror.com/nanoid/-/nanoid-3.3.7.tgz#d0c301a691bc8d54efa0a2226ccf3fe2fd656bd8" 459 | integrity sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g== 460 | 461 | once@^1.3.0: 462 | version "1.4.0" 463 | resolved "https://registry.npmmirror.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" 464 | integrity sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w== 465 | dependencies: 466 | wrappy "1" 467 | 468 | path-parse@^1.0.7: 469 | version "1.0.7" 470 | resolved "https://registry.npmmirror.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" 471 | integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== 472 | 473 | picocolors@^1.0.0: 474 | version "1.0.0" 475 | resolved "https://registry.npmmirror.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" 476 | integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== 477 | 478 | picomatch@^2.3.1: 479 | version "2.3.1" 480 | resolved "https://registry.npmmirror.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" 481 | integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== 482 | 483 | postcss@^8.4.27: 484 | version "8.4.38" 485 | resolved "https://registry.npmmirror.com/postcss/-/postcss-8.4.38.tgz#b387d533baf2054288e337066d81c6bee9db9e0e" 486 | integrity sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A== 487 | dependencies: 488 | nanoid "^3.3.7" 489 | picocolors "^1.0.0" 490 | source-map-js "^1.2.0" 491 | 492 | resolve@^1.22.1: 493 | version "1.22.8" 494 | resolved "https://registry.npmmirror.com/resolve/-/resolve-1.22.8.tgz#b6c87a9f2aa06dfab52e3d70ac8cde321fa5a48d" 495 | integrity sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw== 496 | dependencies: 497 | is-core-module "^2.13.0" 498 | path-parse "^1.0.7" 499 | supports-preserve-symlinks-flag "^1.0.0" 500 | 501 | rollup@^3.27.1, rollup@^3.7.0: 502 | version "3.29.4" 503 | resolved "https://registry.npmmirror.com/rollup/-/rollup-3.29.4.tgz#4d70c0f9834146df8705bfb69a9a19c9e1109981" 504 | integrity sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw== 505 | optionalDependencies: 506 | fsevents "~2.3.2" 507 | 508 | sade@^1.8.1: 509 | version "1.8.1" 510 | resolved "https://registry.npmmirror.com/sade/-/sade-1.8.1.tgz#0a78e81d658d394887be57d2a409bf703a3b2701" 511 | integrity sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A== 512 | dependencies: 513 | mri "^1.1.0" 514 | 515 | set-cookie-parser@^2.5.1: 516 | version "2.6.0" 517 | resolved "https://registry.npmmirror.com/set-cookie-parser/-/set-cookie-parser-2.6.0.tgz#131921e50f62ff1a66a461d7d62d7b21d5d15a51" 518 | integrity sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ== 519 | 520 | sirv@^2.0.2: 521 | version "2.0.4" 522 | resolved "https://registry.npmmirror.com/sirv/-/sirv-2.0.4.tgz#5dd9a725c578e34e449f332703eb2a74e46a29b0" 523 | integrity sha512-94Bdh3cC2PKrbgSOUqTiGPWVZeSiXfKOVZNJniWoqrWrRkB1CJzBU3NEbiTsPcYy1lDsANA/THzS+9WBiy5nfQ== 524 | dependencies: 525 | "@polka/url" "^1.0.0-next.24" 526 | mrmime "^2.0.0" 527 | totalist "^3.0.0" 528 | 529 | source-map-js@^1.2.0: 530 | version "1.2.0" 531 | resolved "https://registry.npmmirror.com/source-map-js/-/source-map-js-1.2.0.tgz#16b809c162517b5b8c3e7dcd315a2a5c2612b2af" 532 | integrity sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg== 533 | 534 | streamsearch@^1.1.0: 535 | version "1.1.0" 536 | resolved "https://registry.npmmirror.com/streamsearch/-/streamsearch-1.1.0.tgz#404dd1e2247ca94af554e841a8ef0eaa238da764" 537 | integrity sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg== 538 | 539 | supports-preserve-symlinks-flag@^1.0.0: 540 | version "1.0.0" 541 | resolved "https://registry.npmmirror.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" 542 | integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== 543 | 544 | svelte-hmr@^0.15.3: 545 | version "0.15.3" 546 | resolved "https://registry.npmmirror.com/svelte-hmr/-/svelte-hmr-0.15.3.tgz#df54ccde9be3f091bf5f18fc4ef7b8eb6405fbe6" 547 | integrity sha512-41snaPswvSf8TJUhlkoJBekRrABDXDMdpNpT2tfHIv4JuhgvHqLMhEPGtaQn0BmbNSTkuz2Ed20DF2eHw0SmBQ== 548 | 549 | svelte@^3.55.1: 550 | version "3.59.2" 551 | resolved "https://registry.npmmirror.com/svelte/-/svelte-3.59.2.tgz#a137b28e025a181292b2ae2e3dca90bf8ec73aec" 552 | integrity sha512-vzSyuGr3eEoAtT/A6bmajosJZIUWySzY2CzB3w2pgPvnkUjGqlDnsNnA0PMO+mMAhuyMul6C2uuZzY6ELSkzyA== 553 | 554 | tiny-glob@^0.2.9: 555 | version "0.2.9" 556 | resolved "https://registry.npmmirror.com/tiny-glob/-/tiny-glob-0.2.9.tgz#2212d441ac17928033b110f8b3640683129d31e2" 557 | integrity sha512-g/55ssRPUjShh+xkfx9UPDXqhckHEsHr4Vd9zX55oSdGZc/MD0m3sferOkwWtp98bv+kcVfEHtRJgBVJzelrzg== 558 | dependencies: 559 | globalyzer "0.1.0" 560 | globrex "^0.1.2" 561 | 562 | totalist@^3.0.0: 563 | version "3.0.1" 564 | resolved "https://registry.npmmirror.com/totalist/-/totalist-3.0.1.tgz#ba3a3d600c915b1a97872348f79c127475f6acf8" 565 | integrity sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ== 566 | 567 | undici@5.20.0: 568 | version "5.20.0" 569 | resolved "https://registry.npmmirror.com/undici/-/undici-5.20.0.tgz#6327462f5ce1d3646bcdac99da7317f455bcc263" 570 | integrity sha512-J3j60dYzuo6Eevbawwp1sdg16k5Tf768bxYK4TUJRH7cBM4kFCbf3mOnM/0E3vQYXvpxITbbWmBafaDbxLDz3g== 571 | dependencies: 572 | busboy "^1.6.0" 573 | 574 | vite@^4.1.4: 575 | version "4.5.3" 576 | resolved "https://registry.npmmirror.com/vite/-/vite-4.5.3.tgz#d88a4529ea58bae97294c7e2e6f0eab39a50fb1a" 577 | integrity sha512-kQL23kMeX92v3ph7IauVkXkikdDRsYMGTVl5KY2E9OY4ONLvkHf04MDTbnfo6NKxZiDLWzVpP5oTa8hQD8U3dg== 578 | dependencies: 579 | esbuild "^0.18.10" 580 | postcss "^8.4.27" 581 | rollup "^3.27.1" 582 | optionalDependencies: 583 | fsevents "~2.3.2" 584 | 585 | vitefu@^0.2.4: 586 | version "0.2.5" 587 | resolved "https://registry.npmmirror.com/vitefu/-/vitefu-0.2.5.tgz#c1b93c377fbdd3e5ddd69840ea3aa70b40d90969" 588 | integrity sha512-SgHtMLoqaeeGnd2evZ849ZbACbnwQCIwRH57t18FxcXoZop0uQu0uzlIhJBlF/eWVzuce0sHeqPcDo+evVcg8Q== 589 | 590 | wrappy@1: 591 | version "1.0.2" 592 | resolved "https://registry.npmmirror.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" 593 | integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== 594 | --------------------------------------------------------------------------------