├── node ├── gen.sh ├── node.schema ├── schema.go ├── node.go ├── schema_test.go └── node.schema.gen.go ├── go.mod ├── utils ├── bytes.go └── bytes_test.go ├── page ├── btree.go ├── page.go ├── cursor.go └── table.go ├── planner ├── plan.go ├── insert.go ├── select_test.go ├── insert_test.go └── select.go ├── db ├── db.go └── db_test.go ├── go.sum ├── README-zh.md ├── README.md ├── main.go ├── parser ├── parser_test.go └── parser.go ├── .gitignore └── LICENSE /node/gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | gencode go -schema node.schema -package node 4 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/auxten/go-sqldb 2 | 3 | go 1.16 4 | 5 | require github.com/smartystreets/goconvey v1.6.4 6 | -------------------------------------------------------------------------------- /utils/bytes.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | func Length(s []byte) (i int) { 4 | var c byte 5 | for i, c = range s { 6 | if c == 0 { 7 | break 8 | } 9 | } 10 | return i 11 | } 12 | -------------------------------------------------------------------------------- /utils/bytes_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/smartystreets/goconvey/convey" 7 | ) 8 | 9 | func TestUtils_Length(t *testing.T) { 10 | Convey("Length", t, func() { 11 | a := [16]byte{'a'} 12 | So(Length(a[:]), ShouldEqual, 1) 13 | b := [16]byte{} 14 | So(Length(b[:]), ShouldEqual, 0) 15 | }) 16 | } 17 | -------------------------------------------------------------------------------- /page/btree.go: -------------------------------------------------------------------------------- 1 | package page 2 | 3 | import ( 4 | "github.com/auxten/go-sqldb/node" 5 | ) 6 | 7 | func InitLeafNode(node *node.LeafNode) { 8 | node.CommonHeader.IsInternal = false 9 | node.CommonHeader.IsRoot = false 10 | node.Header.Cells = 0 11 | node.Header.NextLeaf = 0 12 | } 13 | 14 | func InitInternalNode(node *node.InternalNode) { 15 | node.CommonHeader.IsInternal = true 16 | node.CommonHeader.IsRoot = false 17 | node.Header.KeysNum = 0 18 | } 19 | -------------------------------------------------------------------------------- /planner/plan.go: -------------------------------------------------------------------------------- 1 | package planner 2 | 3 | import ( 4 | "github.com/auxten/go-sqldb/node" 5 | "github.com/auxten/go-sqldb/page" 6 | ) 7 | 8 | type Plan struct { 9 | table *page.Table 10 | cursor *page.Cursor 11 | UnFilteredPipe chan *node.Row 12 | FilteredPipe chan *node.Row 13 | LimitedPipe chan *node.Row 14 | ErrorsPipe chan error 15 | Stop chan bool 16 | } 17 | 18 | func NewPlan(t *page.Table) (p *Plan) { 19 | return &Plan{ 20 | table: t, 21 | FilteredPipe: make(chan *node.Row), 22 | UnFilteredPipe: make(chan *node.Row), 23 | LimitedPipe: make(chan *node.Row), 24 | ErrorsPipe: make(chan error, 1), 25 | Stop: make(chan bool), 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /node/node.schema: -------------------------------------------------------------------------------- 1 | struct Header { 2 | IsInternal bool 3 | IsRoot bool 4 | Parent uint32 5 | } 6 | 7 | struct InternalNodeHeader { 8 | KeysNum uint32 9 | RightChild uint32 10 | } 11 | 12 | struct LeafNodeHeader { 13 | Cells uint32 14 | NextLeaf uint32 15 | } 16 | 17 | struct ICell { 18 | Key uint32 19 | Child uint32 20 | } 21 | 22 | struct InternalNode { 23 | CommonHeader Header 24 | Header InternalNodeHeader 25 | ICells [510]ICell 26 | } 27 | 28 | struct Cell { 29 | Key uint32 30 | Value [230]byte 31 | } 32 | 33 | struct LeafNode { 34 | CommonHeader Header 35 | Header LeafNodeHeader 36 | Cells [17]Cell 37 | } 38 | 39 | struct Row { 40 | Id uint32 41 | Sex byte 42 | Age uint8 43 | Username [32]byte 44 | Email [128]byte 45 | Phone [64]byte 46 | } 47 | -------------------------------------------------------------------------------- /db/db.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "github.com/auxten/go-sqldb/page" 5 | ) 6 | 7 | func Open(fileName string) (t *page.Table, err error) { 8 | var ( 9 | pager *page.Pager 10 | p *page.Page 11 | ) 12 | if pager, err = page.PagerOpen(fileName); err != nil { 13 | return 14 | } 15 | 16 | t = &page.Table{ 17 | Pager: pager, 18 | RootPageIdx: 0, 19 | } 20 | 21 | if pager.PageNum == 0 { 22 | // New database file, initialize page 0 as leaf node. 23 | if p, err = pager.GetPage(0); err != nil { 24 | return 25 | } 26 | page.InitLeafNode(p.LeafNode) 27 | p.LeafNode.CommonHeader.IsRoot = true 28 | } 29 | 30 | return 31 | } 32 | 33 | func Close(t *page.Table) (err error) { 34 | pager := t.Pager 35 | for i := uint32(0); i < pager.PageNum; i++ { 36 | if pager.Pages[i] != nil { 37 | if err = pager.Flush(i); err != nil { 38 | return 39 | } 40 | pager.Pages[i] = nil 41 | } 42 | } 43 | 44 | err = pager.File.Close() 45 | 46 | for i := range pager.Pages { 47 | pager.Pages[i] = nil 48 | } 49 | return 50 | } 51 | -------------------------------------------------------------------------------- /db/db_test.go: -------------------------------------------------------------------------------- 1 | package db 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "testing" 7 | 8 | "github.com/auxten/go-sqldb/node" 9 | . "github.com/smartystreets/goconvey/convey" 10 | ) 11 | 12 | func TestDB(t *testing.T) { 13 | Convey("Open and Close", t, func() { 14 | const testFile = "test.db" 15 | defer func() { 16 | _ = os.Remove(testFile) 17 | }() 18 | table, err := Open(testFile) 19 | So(err, ShouldBeNil) 20 | So(table.Pager.Pages[0].LeafNode.CommonHeader.IsInternal, ShouldBeFalse) 21 | So(err, ShouldBeNil) 22 | So(table, ShouldNotBeNil) 23 | 24 | err = table.Insert(&node.Row{ 25 | Id: 1, 26 | Sex: 'F', 27 | Age: 35, 28 | Username: [32]byte{'a', 'u', 'x', 't', 'e', 'n'}, 29 | Email: [128]byte{'a', 'u', 'x', 't', 'e', 'n', '@'}, 30 | Phone: [64]byte{'1', '2', '3', '4', '5', '6', '0'}, 31 | }) 32 | So(err, ShouldBeNil) 33 | 34 | err = table.Insert(&node.Row{ 35 | Id: 1, 36 | }) 37 | So(err.Error(), ShouldContainSubstring, "duplicate key 1") 38 | 39 | for i := uint32(2); i < 35; i++ { 40 | err = table.Insert(&node.Row{ 41 | Id: i, 42 | }) 43 | fmt.Println(i) 44 | So(err, ShouldBeNil) 45 | } 46 | err = Close(table) 47 | So(err, ShouldBeNil) 48 | 49 | table, err = Open(testFile) 50 | So(err, ShouldBeNil) 51 | }) 52 | } 53 | -------------------------------------------------------------------------------- /planner/insert.go: -------------------------------------------------------------------------------- 1 | package planner 2 | 3 | import ( 4 | "strconv" 5 | "strings" 6 | 7 | "github.com/auxten/go-sqldb/node" 8 | "github.com/auxten/go-sqldb/parser" 9 | ) 10 | 11 | func (plan *Plan) Insert(ast *parser.InsertTree) (count int, err error) { 12 | for _, r := range ast.Values { 13 | // 这里暂时都假定我们插入的 Schema 是固定的 node.Row 类型 14 | // 根据 InsertTree.Columns 的字段顺序,我们强制类型转换还原出 node.Row 结构 15 | row := &node.Row{} 16 | if len(ast.Columns) == 0 { 17 | ast.Columns = []string{"id", "sex", "age", "username", "email", "phone"} 18 | } 19 | for i, col := range ast.Columns { 20 | switch strings.ToUpper(col) { 21 | case "ID": 22 | var parsed int64 23 | if parsed, err = strconv.ParseInt(r[i], 10, 64); err != nil { 24 | return 25 | } 26 | row.Id = uint32(parsed) 27 | case "SEX": 28 | row.Sex = r[i][0] 29 | case "AGE": 30 | var parsed int64 31 | if parsed, err = strconv.ParseInt(r[i], 10, 64); err != nil { 32 | return 33 | } 34 | row.Age = uint8(parsed) 35 | case "USERNAME": 36 | copy(row.Username[:], r[i]) 37 | case "EMAIL": 38 | copy(row.Email[:], r[i]) 39 | case "PHONE": 40 | copy(row.Phone[:], r[i]) 41 | } 42 | } 43 | 44 | if err = plan.table.Insert(row); err != nil { 45 | return 46 | } else { 47 | count++ 48 | } 49 | } 50 | return 51 | } 52 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= 2 | github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= 3 | github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= 4 | github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= 5 | github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= 6 | github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= 7 | github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= 8 | github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= 9 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 10 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 11 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 12 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 13 | golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 14 | -------------------------------------------------------------------------------- /node/schema.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | 8 | "github.com/auxten/go-sqldb/utils" 9 | ) 10 | 11 | var ( 12 | RowSize = (&Row{}).Size() 13 | ) 14 | 15 | /* 16 | Id uint32 17 | Sex byte 18 | Age uint8 19 | Username [32]byte 20 | Email [128]byte 21 | Phone [64]byte 22 | */ 23 | func PrintRow(row *Row) { 24 | _, _ = WriteRow(os.Stdout, row) 25 | } 26 | 27 | func WriteRow(w io.Writer, row *Row) (int, error) { 28 | return fmt.Fprintf(w, "%d\t%c\t%d\t%s\t%s\t%s\n", 29 | row.Id, 30 | row.Sex, 31 | row.Age, 32 | string(row.Username[:utils.Length(row.Username[:])]), 33 | string(row.Email[:utils.Length(row.Email[:])]), 34 | string(row.Phone[:utils.Length(row.Phone[:])]), 35 | ) 36 | } 37 | 38 | func dumpConst() { 39 | if LeafNodeSize > PageSize { 40 | panic("LeafNode too big") 41 | } 42 | if RowSize > LeafNodeCellSize { 43 | panic("Row too big") 44 | } 45 | 46 | fmt.Printf("Row Size %d\n", RowSize) 47 | fmt.Printf("Common Header Size %d\n", CommonHeaderSize) 48 | fmt.Printf("InternalNode Header Size %d\n", InternalNodeHeaderSize) 49 | fmt.Printf("InternalNode Size %d\n", InternalNodeSize) 50 | fmt.Printf("InternalNode Cell Size %d\n", InternalNodeCellSize) 51 | fmt.Printf("InternalNode Max Cell %d\n", InternalNodeMaxCells) 52 | fmt.Printf("LeafNode Header Size %d\n", LeafNodeHeaderSize) 53 | fmt.Printf("LeafNode Size %d\n", LeafNodeSize) 54 | fmt.Printf("LeafNode Cell Size %d\n", LeafNodeCellSize) 55 | fmt.Printf("LeafNode Max Cell %d\n", LeafNodeMaxCells) 56 | fmt.Printf("LeftSplitCount %d\n", LeftSplitCount) 57 | fmt.Printf("RightSplitCount %d\n", RightSplitCount) 58 | } 59 | -------------------------------------------------------------------------------- /node/node.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | const ( 8 | PageSize = 4096 9 | MaxPages = 1024 10 | ) 11 | 12 | var ( 13 | leaf LeafNode 14 | internalNode InternalNode 15 | 16 | CommonHeaderSize = leaf.CommonHeader.Size() 17 | InternalNodeHeaderSize = CommonHeaderSize + internalNode.Header.Size() 18 | InternalNodeSize = internalNode.Size() 19 | InternalNodeCellSize = internalNode.ICells[0].Size() 20 | InternalNodeMaxCells = uint32(len(internalNode.ICells)) 21 | 22 | LeafNodeHeaderSize = CommonHeaderSize + leaf.Header.Size() 23 | LeafNodeSize = leaf.Size() 24 | LeafNodeCellSize = leaf.Cells[0].Size() 25 | LeafNodeMaxCells = uint32(len(leaf.Cells)) 26 | 27 | RightSplitCount = (LeafNodeMaxCells + 1) / 2 28 | LeftSplitCount = LeafNodeMaxCells + 1 - RightSplitCount 29 | ) 30 | 31 | // FindChildByKey returns the index of the child which should contain 32 | // the given key. 33 | func (d *InternalNode) FindChildByKey(key uint32) uint32 { 34 | var ( 35 | minIdx = uint32(0) 36 | maxIdx = d.Header.KeysNum 37 | ) 38 | for minIdx != maxIdx { 39 | idx := (minIdx + maxIdx) / 2 40 | rightKey := d.ICells[idx].Key 41 | if rightKey >= key { 42 | maxIdx = idx 43 | } else { 44 | minIdx = idx + 1 45 | } 46 | } 47 | 48 | return minIdx 49 | } 50 | 51 | func (d *InternalNode) Child(childIdx uint32) (ptr *uint32) { 52 | keysNum := d.Header.KeysNum 53 | if childIdx > keysNum { 54 | panic(fmt.Sprintf("childIdx %d out of keysNum %d", childIdx, keysNum)) 55 | } else if childIdx == keysNum { 56 | return &d.Header.RightChild 57 | } else { 58 | return &d.ICells[childIdx].Child 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /node/schema_test.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/smartystreets/goconvey/convey" 7 | ) 8 | 9 | func TestSchema(t *testing.T) { 10 | Convey("", t, func() { 11 | dumpConst() 12 | }) 13 | } 14 | 15 | type node struct { 16 | name string 17 | idx uint32 18 | cells [2]cell 19 | } 20 | 21 | type cell struct { 22 | Key uint32 23 | Value [230]byte 24 | } 25 | 26 | func TestStructAssignment(t *testing.T) { 27 | Convey("cell assign", t, func() { 28 | c1 := Cell{ 29 | Key: 1, 30 | Value: [230]byte{'1', '1'}, 31 | } 32 | c2 := Cell{ 33 | Key: 2, 34 | Value: [230]byte{'2', '2'}, 35 | } 36 | So(c2.Key, ShouldEqual, 2) 37 | So(c2.Value[0], ShouldEqual, '2') 38 | So(c2.Value[1], ShouldEqual, '2') 39 | 40 | c2 = c1 41 | So(c2.Key, ShouldEqual, 1) 42 | So(c2.Value[0], ShouldEqual, '1') 43 | So(c2.Value[1], ShouldEqual, '1') 44 | }) 45 | 46 | Convey("node assign", t, func() { 47 | n1 := node{ 48 | name: "n1", 49 | idx: 1, 50 | cells: [2]cell{ 51 | { 52 | Key: 1, 53 | Value: [230]byte{'1', '1'}, 54 | }, 55 | { 56 | Key: 2, 57 | Value: [230]byte{'2', '2'}, 58 | }, 59 | }, 60 | } 61 | n2 := node{ 62 | name: "n2", 63 | idx: 2, 64 | cells: [2]cell{ 65 | { 66 | Key: 3, 67 | Value: [230]byte{'3', '3'}, 68 | }, 69 | { 70 | Key: 4, 71 | Value: [230]byte{'4', '4'}, 72 | }, 73 | }, 74 | } 75 | 76 | n2 = n1 77 | So(n2.name, ShouldEqual, "n1") 78 | So(n2.idx, ShouldEqual, 1) 79 | So(n2.cells[0].Key, ShouldEqual, 1) 80 | So(n2.cells[0].Value[0], ShouldEqual, '1') 81 | So(n2.cells[0].Value[1], ShouldEqual, '1') 82 | So(n2.cells[1].Key, ShouldEqual, 2) 83 | So(n2.cells[1].Value[0], ShouldEqual, '2') 84 | So(n2.cells[1].Value[1], ShouldEqual, '2') 85 | }) 86 | } 87 | -------------------------------------------------------------------------------- /README-zh.md: -------------------------------------------------------------------------------- 1 | # Go SQL DB 2 | 3 | [English](README.md) 4 | 5 | "Go SQL DB" 是一个研究目的的支持 SQL 查询的关系型数据库。主要的目标是为了向数据库爱好者展示一个关系型数据库的基本原理和关键设计。因此,为了便于理解,采取了很多取巧但不是很严谨的设计,代码量控制在了 2000 行左右(包含约 400 行单元测试代码)。 6 | 7 | ## 特性列表 8 | 9 | 1. 纯 Golang 实现,不依赖任何第三方包。仅在单元测试中引入了 goconvey 10 | 1. 单元测试覆盖率≈ 73.5% 11 | 12 | #### 存储引擎 13 | 1. 基于 B+Tree 的数据检索结构 14 | 1. 基于 4KB 分页的磁盘持久化引擎 15 | 1. 接近 POD(Plain Old Data)的序列化 & 反序列化 16 | 17 | #### SQL Parser 18 | 1. Tokenizer 基于 text/scanner 实现 19 | 1. 支持简单的 SELECT、INSERT 语法 20 | 1. SELECT 支持数值类型的 WHERE 21 | 1. 支持 LIMIT,但暂不支持 ORDER BY 22 | 1. 如果你想要了解可以生产可用的 SQL Parser 是如何实现的,请参考我从 CockroachDB 剥离的 SQL-2011 标准支持的 [SQL Parser](https://github.com/auxten/postgresql-parser) 23 | 24 | #### 执行计划 Planner 25 | 1. 基于火山模型(Volcano Model)的 [Select 实现](planner/select.go) 26 | 1. 基于 HTTP 的查询和插入接口 27 | 28 | ## 实现的局限 29 | 30 | 1. 暂时没有实现 DDL,仅有固定的 Schema 31 | ```go 32 | struct Row { 33 | Id uint32 34 | Sex byte 35 | Age uint8 36 | Username [32]byte 37 | Email [128]byte 38 | Phone [64]byte 39 | } 40 | ``` 41 | 2. SQL 语法的有限支持,参见[单测用例](parser/parser_test.go) 42 | 3. Tokenizer 由于是基于 Golang 语言本身的一个取巧实现,对于一些字符串里的特殊字符支持会出现问题,可以通过加 `"` 解决 43 | 44 | ## 运行方式 45 | 1. 运行 46 | ```bash 47 | go run . test.db 48 | ``` 49 | 2. 插入数据 50 | 51 | 执行 `INSERT INTO table (id, username, email) VALUES (10, auxten, "auxtenwpc_gmailcom")` 52 | 可以访问:http://localhost:8080/exec?q=INSERT%20INTO%20table%20(id,%20username,%20email)%20VALUES%20(10,%20auxten,%20%22auxtenwpc_gmailcom%22) 53 | 54 | 3. 查询数据 55 | 56 | 执行 `SELECT * FROM table WHERE id > 3 LIMIT 10` 57 | 可以访问:http://localhost:8080/query?q=SELECT%20*%20FROM%20table%20WHERE%20id%20%3E%203%20LIMIT%2010 58 | 59 | ## 特别感谢 60 | 61 | 1. [SQL-2011 SQL Parser](https://github.com/auxten/postgresql-parser) 62 | 1. [Marshal/Unmarshal Code generation](https://github.com/andyleap/gencode/) 63 | 1. [Document-oriented, embedded SQL database: genji](https://github.com/genjidb/genji) 64 | 1. [CockroachDB](https://github.com/cockroachdb/cockroach) 65 | 1. [Let's Build a Simple Database](https://cstack.github.io/db_tutorial/) 66 | -------------------------------------------------------------------------------- /planner/select_test.go: -------------------------------------------------------------------------------- 1 | package planner 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/auxten/go-sqldb/db" 8 | "github.com/auxten/go-sqldb/node" 9 | "github.com/auxten/go-sqldb/parser" 10 | . "github.com/smartystreets/goconvey/convey" 11 | ) 12 | 13 | func TestPlannerSelect(t *testing.T) { 14 | Convey("Volcano model select implementation", t, func() { 15 | const testFile = "test_plan_insert.db" 16 | const InsertCnt = 512 17 | defer func() { 18 | _ = os.Remove(testFile) 19 | }() 20 | table, err := db.Open(testFile) 21 | So(err, ShouldBeNil) 22 | So(table.Pager.Pages[0].LeafNode.CommonHeader.IsInternal, ShouldBeFalse) 23 | So(err, ShouldBeNil) 24 | So(table, ShouldNotBeNil) 25 | 26 | for i := uint32(0); i < InsertCnt; i++ { 27 | row := &node.Row{ 28 | Id: i, 29 | Sex: func(i int) byte { 30 | if uint8(i%2) == 1 { 31 | return 'm' 32 | } else { 33 | return 'f' 34 | } 35 | }(int(i)), 36 | Age: uint8(i % 120), 37 | Username: [32]byte{'a', 'u', 'x', 't', 'e', 'n', byte('a' + i)}, 38 | Email: [128]byte{'a', 'u', 'x', 't', 'e', 'n', '@', byte('a' + i)}, 39 | Phone: [64]byte{'1', '2', '3', '4', '5', '6', '0' + uint8((i/100)%10), '0' + uint8((i/10)%10), '0' + uint8(i%10)}, 40 | } 41 | err = table.Insert(row) 42 | //fmt.Println(i, string(row.Username[:]), string(row.Email[:])) 43 | So(err, ShouldBeNil) 44 | } 45 | 46 | p := &parser.Parser{} 47 | ast, err := p.ParseSelect("SELECT id, username, email FROM table WHERE id > 5 AND id < 7 LIMIT 3") 48 | So(err, ShouldBeNil) 49 | 50 | plan := NewPlan(table) 51 | resultPipe, err := plan.SelectPrepare(ast) 52 | So(err, ShouldBeNil) 53 | var i int 54 | for row := range resultPipe { 55 | i++ 56 | node.PrintRow(row) 57 | } 58 | So(i, ShouldEqual, 1) 59 | 60 | p = &parser.Parser{} 61 | ast, err = p.ParseSelect("SELECT id, username, email FROM table") 62 | So(err, ShouldBeNil) 63 | 64 | plan = NewPlan(table) 65 | resultPipe, err = plan.SelectPrepare(ast) 66 | So(err, ShouldBeNil) 67 | i = 0 68 | for row := range resultPipe { 69 | i++ 70 | node.PrintRow(row) 71 | } 72 | So(i, ShouldEqual, InsertCnt) 73 | }) 74 | } 75 | -------------------------------------------------------------------------------- /planner/insert_test.go: -------------------------------------------------------------------------------- 1 | package planner 2 | 3 | import ( 4 | "os" 5 | "testing" 6 | 7 | "github.com/auxten/go-sqldb/db" 8 | "github.com/auxten/go-sqldb/node" 9 | "github.com/auxten/go-sqldb/parser" 10 | . "github.com/smartystreets/goconvey/convey" 11 | ) 12 | 13 | func TestPlan_Insert(t *testing.T) { 14 | Convey("Plan Insert one value", t, func() { 15 | const testFile = "test_plan_insert.db" 16 | defer func() { 17 | _ = os.Remove(testFile) 18 | }() 19 | table, err := db.Open(testFile) 20 | So(err, ShouldBeNil) 21 | plan := NewPlan(table) 22 | 23 | p := &parser.Parser{} 24 | ast, err := p.ParseInsert("INSERT INTO table VALUES " + 25 | "(10, f, 28, auxten, \"auxtenwpc@gmail.com\", 13812341234)", 26 | ) 27 | So(err, ShouldBeNil) 28 | 29 | cnt, err := plan.Insert(ast) 30 | So(err, ShouldBeNil) 31 | So(cnt, ShouldEqual, 1) 32 | 33 | p2 := &parser.Parser{} 34 | ast2, err := p2.ParseSelect("SELECT id, username, email FROM table") 35 | So(err, ShouldBeNil) 36 | 37 | plan2 := NewPlan(table) 38 | resultPipe, err := plan2.SelectPrepare(ast2) 39 | So(err, ShouldBeNil) 40 | cnt = 0 41 | var row *node.Row 42 | for row = range resultPipe { 43 | cnt++ 44 | //fmt.Println(row.Id, string(row.Username[:]), string(row.Email[:])) 45 | } 46 | So(cnt, ShouldEqual, 1) 47 | So(row, ShouldNotBeNil) 48 | So(row.Id, ShouldEqual, 10) 49 | So(string(row.Username[:]), ShouldStartWith, "auxten") 50 | So(string(row.Email[:]), ShouldStartWith, "\"auxtenwpc@gmail.com\"") 51 | So(string(row.Phone[:]), ShouldStartWith, "13812341234") 52 | }) 53 | } 54 | 55 | func TestPlan_Insert_multiple(t *testing.T) { 56 | Convey("Plan Insert multiple values", t, func() { 57 | const testFile = "test_plan_insert_multiple.db" 58 | defer func() { 59 | _ = os.Remove(testFile) 60 | }() 61 | table, err := db.Open(testFile) 62 | So(err, ShouldBeNil) 63 | plan := NewPlan(table) 64 | 65 | p := &parser.Parser{} 66 | ast, err := p.ParseInsert("INSERT INTO table (id, username, email) VALUES " + 67 | "(0, auxten, \"auxtenwpc@gmail.com\")," + 68 | "(1, hahaha, \"hahaha@gmail.com\")," + 69 | "(2, aaaa, \"aaaa@gmail.com\")," + 70 | "(3, jijiji, \"jijiji@gmail.com\")", 71 | ) 72 | So(err, ShouldBeNil) 73 | 74 | cnt, err := plan.Insert(ast) 75 | So(err, ShouldBeNil) 76 | So(cnt, ShouldEqual, 4) 77 | 78 | p2 := &parser.Parser{} 79 | ast2, err := p2.ParseSelect("SELECT id, username, email FROM table LIMIT 10") 80 | So(err, ShouldBeNil) 81 | 82 | plan2 := NewPlan(table) 83 | resultPipe, err := plan2.SelectPrepare(ast2) 84 | So(err, ShouldBeNil) 85 | for row := range resultPipe { 86 | node.PrintRow(row) 87 | } 88 | }) 89 | } 90 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Go SQL DB 2 | 3 | [中文](README-zh.md) 4 | 5 | "Go SQL DB" is a relational database that supports SQL queries for research purposes. The main goal is to show the basic principles and key design of a relational database to database enthusiasts. Therefore, to easily understand, a lot of tricks but not very rigorous design was adopted, and the amount of code was controlled at about 2000 lines (including 400 lines of unit tests). 6 | 7 | ## Features 8 | 9 | 1. Pure Golang implementation, does not rely on any third-party packages. Goconvey was only introduced in unit tests 10 | 1. Unit test coverage ≈ 73.5% 11 | 12 | #### Storage Engine 13 | 1. Special Thanks to [Let's Build a Simple Database](https://cstack.github.io/db_tutorial/) 14 | 1. Data retrieval structure based on B+Tree 15 | 1. Disk persistence engine based on 4KB paging 16 | 1. Close to POD (Plain Old Data) speed serialization & deserialization 17 | 18 | #### SQL Parser 19 | 1. Tokenizer is implemented based on text/scanner 20 | 1. Support simple SELECT, INSERT syntax 21 | 1. SELECT supports WHERE of numeric type 22 | 1. Support LIMIT, but not support ORDER BY temporarily 23 | 1. If you want to know how the SQL Parser that can be used in the production environment is implemented, please refer to the [SQL Parser](https://github.com/auxten/postgresql-parser) that I stripped from CockroachDB and supports the SQL-2011 standard 24 | 25 | #### Execution Planner 26 | 1. [Select Implementation](planner/select.go) based on Volcano Model 27 | 1. HTTP-based query and insert interface 28 | 29 | ## Known Issues 30 | 31 | 1. No DDL is implemented for the time being, only a fixed Schema 32 | ```go 33 | struct Row { 34 | Id uint32 35 | Sex byte 36 | Age uint8 37 | Username [32]byte 38 | Email [128]byte 39 | Phone [64]byte 40 | } 41 | ``` 42 | 2. For limited support for SQL syntax, see [Test Cases](parser/parser_test.go) 43 | 3. Tokenizer is based on a clever implementation of the Golang language itself, there will be problems with the support of special characters in some strings, which can be solved by quoting strings with `"` 44 | 45 | ## How to run 46 | 1. Run 47 | ```bash 48 | go run . test.db 49 | ``` 50 | 2. INSERT 51 | 52 | Execute `INSERT INTO table (id, username, email) VALUES (10, auxten, "auxtenwpc_gmailcom")` 53 | 54 | BY accessing: http://localhost:8080/exec?q=INSERT%20INTO%20table%20(id,%20username,%20email)%20VALUES%20(10,%20auxten,%20%22auxtenwpc_gmailcom%22) 55 | 56 | 3. SELECT 57 | 58 | Query `SELECT * FROM table WHERE id > 3 LIMIT 10` 59 | 60 | BY accessing: http://localhost:8080/query?q=SELECT%20*%20FROM%20table%20WHERE%20id%20%3E%203%20LIMIT%2010 61 | 62 | ## Thanks 63 | 64 | 1. [SQL-2011 SQL Parser](https://github.com/auxten/postgresql-parser) 65 | 1. [Marshal/Unmarshal Code generation](https://github.com/andyleap/gencode/) 66 | 1. [Document-oriented, embedded SQL database: genji](https://github.com/genjidb/genji) 67 | 1. [CockroachDB](https://github.com/cockroachdb/cockroach) 68 | 1. [Let's Build a Simple Database](https://cstack.github.io/db_tutorial/) 69 | 70 | -------------------------------------------------------------------------------- /page/page.go: -------------------------------------------------------------------------------- 1 | package page 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | 8 | "github.com/auxten/go-sqldb/node" 9 | ) 10 | 11 | type Page struct { 12 | // Either InternalNode or LeafNode 13 | InternalNode *node.InternalNode 14 | LeafNode *node.LeafNode 15 | } 16 | 17 | func (p *Page) GetMaxKey() uint32 { 18 | if p.InternalNode != nil { 19 | return p.InternalNode.ICells[p.InternalNode.Header.KeysNum-1].Key 20 | } else if p.LeafNode != nil { 21 | return p.LeafNode.Cells[p.LeafNode.Header.Cells-1].Key 22 | } else { 23 | panic("neither Leaf nor Internal node") 24 | } 25 | } 26 | 27 | type Pager struct { 28 | File *os.File 29 | fileLen int64 30 | PageNum uint32 // PageNum is the boundary of db memory page. 31 | Pages []*Page // Page pointer slice, nil member indicates cache missing. 32 | } 33 | 34 | func PagerOpen(fileName string) (pager *Pager, err error) { 35 | var ( 36 | dbFile *os.File 37 | fileLen int64 38 | ) 39 | if dbFile, err = os.OpenFile(fileName, os.O_RDWR|os.O_CREATE, 0600); err != nil { 40 | return 41 | } 42 | 43 | // get file length 44 | if fileLen, err = dbFile.Seek(0, io.SeekEnd); err != nil { 45 | return 46 | } 47 | 48 | // dbFile length must be n * node.PageSize, node.PageSize is usually 4096 49 | if fileLen%node.PageSize != 0 { 50 | return 51 | } 52 | 53 | pageNum := uint32(fileLen / node.PageSize) 54 | if pageNum >= node.MaxPages { 55 | panic("file length exceeds max pages limit") 56 | } 57 | pager = &Pager{ 58 | File: dbFile, 59 | fileLen: fileLen, 60 | PageNum: pageNum, 61 | Pages: make([]*Page, node.MaxPages), 62 | } 63 | 64 | return 65 | } 66 | 67 | func (p *Pager) GetPage(pageIdx uint32) (page *Page, err error) { 68 | if pageIdx >= node.MaxPages { 69 | return nil, fmt.Errorf("page index %d out of node.MaxPages %d", pageIdx, node.MaxPages) 70 | } 71 | 72 | if p.Pages[pageIdx] == nil { 73 | // Cache miss 74 | // If pageIdx within data file, just read, 75 | // else just return blank page which will be flushed to db file later. 76 | if pageIdx <= p.PageNum { 77 | // Load page from file 78 | buf := make([]byte, node.PageSize) 79 | if _, err = p.File.ReadAt(buf, int64(pageIdx*node.PageSize)); err != nil { 80 | if err != io.EOF { 81 | return 82 | } 83 | } 84 | // Empty new page will be leaf node 85 | if buf[0] == 0 { 86 | // Leaf node 87 | leaf := &node.LeafNode{} 88 | if _, err = leaf.Unmarshal(buf); err != nil { 89 | return 90 | } 91 | p.Pages[pageIdx] = &Page{LeafNode: leaf} 92 | } else { 93 | // Internal node 94 | internal := &node.InternalNode{} 95 | if _, err = internal.Unmarshal(buf); err != nil { 96 | return 97 | } 98 | p.Pages[pageIdx] = &Page{InternalNode: internal} 99 | } 100 | if pageIdx >= p.PageNum { 101 | p.PageNum = pageIdx + 1 102 | } 103 | } 104 | } 105 | 106 | return p.Pages[pageIdx], nil 107 | } 108 | 109 | func (p *Pager) Flush(pageIdx uint32) (err error) { 110 | page := p.Pages[pageIdx] 111 | if page == nil { 112 | return fmt.Errorf("flushing nil page") 113 | } 114 | 115 | buf := make([]byte, node.PageSize) 116 | if page.LeafNode != nil { 117 | if _, err = page.LeafNode.Marshal(buf); err != nil { 118 | return 119 | } 120 | } else if page.InternalNode != nil { 121 | if _, err = page.InternalNode.Marshal(buf); err != nil { 122 | return 123 | } 124 | } else { 125 | panic("neither leaf nor internal node") 126 | } 127 | _, err = p.File.WriteAt(buf, int64(pageIdx*node.PageSize)) 128 | 129 | return 130 | } 131 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "os" 7 | "os/signal" 8 | "strings" 9 | "syscall" 10 | 11 | "github.com/auxten/go-sqldb/db" 12 | "github.com/auxten/go-sqldb/node" 13 | "github.com/auxten/go-sqldb/page" 14 | "github.com/auxten/go-sqldb/parser" 15 | "github.com/auxten/go-sqldb/planner" 16 | "github.com/auxten/go-sqldb/utils" 17 | ) 18 | 19 | func main() { 20 | if len(os.Args) < 2 { 21 | panic("Database file name must provide") 22 | } 23 | 24 | var ( 25 | err error 26 | t *page.Table 27 | resultPipe chan *node.Row 28 | ) 29 | 30 | fileName := os.Args[1] 31 | if t, err = db.Open(fileName); err != nil { 32 | panic(err) 33 | } 34 | defer db.Close(t) 35 | 36 | http.HandleFunc("/query", func(writer http.ResponseWriter, request *http.Request) { 37 | writer.Header().Set("Content-Type", "application/json") 38 | q := request.URL.Query() 39 | query := q.Get("q") 40 | if query != "" { 41 | var ( 42 | ast *parser.SelectTree 43 | ) 44 | 45 | p := &parser.Parser{} 46 | if p.GetSQLType(query) != parser.SELECT { 47 | _, _ = fmt.Fprintf(writer, "not a SELECT statement") 48 | return 49 | } 50 | if ast, err = p.ParseSelect(query); err != nil { 51 | _, _ = fmt.Fprintf(writer, "parse %s, error: %v", query, err) 52 | return 53 | } 54 | 55 | plan := planner.NewPlan(t) 56 | if resultPipe, err = plan.SelectPrepare(ast); err != nil { 57 | _, _ = fmt.Fprintf(writer, "%s", err) 58 | return 59 | } 60 | 61 | for row := range resultPipe { 62 | if len(ast.Projects) == 1 && ast.Projects[0] == parser.ASTERISK { 63 | node.WriteRow(writer, row) 64 | continue 65 | } 66 | var outRow = make([]string, 0, 3) 67 | for _, proj := range ast.Projects { 68 | switch strings.ToUpper(proj) { 69 | case "ID": 70 | outRow = append(outRow, fmt.Sprintf("%d", row.Id)) 71 | case "SEX": 72 | outRow = append(outRow, fmt.Sprintf("%c", row.Sex)) 73 | case "AGE": 74 | outRow = append(outRow, fmt.Sprintf("%d", row.Age)) 75 | case "USERNAME": 76 | outRow = append(outRow, string(row.Username[:utils.Length(row.Username[:])])) 77 | case "EMAIL": 78 | outRow = append(outRow, string(row.Email[:utils.Length(row.Email[:])])) 79 | case "PHONE": 80 | outRow = append(outRow, string(row.Phone[:utils.Length(row.Phone[:])])) 81 | } 82 | } 83 | _, _ = fmt.Fprint(writer, strings.Join(outRow, "\t"), "\n") 84 | } 85 | return 86 | } 87 | _, _ = fmt.Fprintf(writer, "need /query?q=SELECT") 88 | return 89 | }) 90 | http.HandleFunc("/exec", func(writer http.ResponseWriter, request *http.Request) { 91 | writer.Header().Set("Content-Type", "application/json") 92 | q := request.URL.Query() 93 | query := q.Get("q") 94 | if query != "" { 95 | var ( 96 | ast *parser.InsertTree 97 | cnt int 98 | ) 99 | 100 | p := &parser.Parser{} 101 | if p.GetSQLType(query) != parser.INSERT { 102 | _, _ = fmt.Fprintf(writer, "not a INSERT statement") 103 | return 104 | } 105 | ast, err = p.ParseInsert(query) 106 | 107 | plan := planner.NewPlan(t) 108 | if cnt, err = plan.Insert(ast); err != nil { 109 | _, _ = fmt.Fprintf(writer, "%s", err) 110 | return 111 | } 112 | _, _ = fmt.Fprintf(writer, "%d", cnt) 113 | return 114 | } 115 | _, _ = fmt.Fprintf(writer, "need /exec?q=INSERT") 116 | return 117 | }) 118 | go http.ListenAndServe(":8080", nil) 119 | 120 | sigs := make(chan os.Signal, 1) 121 | signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) 122 | <-sigs 123 | } 124 | -------------------------------------------------------------------------------- /page/cursor.go: -------------------------------------------------------------------------------- 1 | package page 2 | 3 | import ( 4 | "github.com/auxten/go-sqldb/node" 5 | ) 6 | 7 | type Cursor struct { 8 | Table *Table 9 | PageIdx uint32 10 | CellIdx uint32 11 | EndOfTable bool 12 | } 13 | 14 | func (cursor *Cursor) LeafNodeInsert(key uint32, row *node.Row) (err error) { 15 | var ( 16 | page *Page 17 | ) 18 | 19 | if page, err = cursor.Table.Pager.GetPage(cursor.PageIdx); err != nil { 20 | return 21 | } 22 | cells := page.LeafNode.Header.Cells 23 | if cells >= node.LeafNodeMaxCells { 24 | // Split leaf node 25 | if err = cursor.LeafNodeSplitInsert(key, row); err != nil { 26 | return 27 | } 28 | return 29 | } 30 | 31 | if cursor.CellIdx < cells { 32 | // Need make room for new cell 33 | for i := cells; i > cursor.CellIdx; i-- { 34 | page.LeafNode.Cells[i] = page.LeafNode.Cells[i-1] 35 | } 36 | } 37 | page.LeafNode.Header.Cells += 1 38 | cell := &page.LeafNode.Cells[cursor.CellIdx] 39 | err = saveToCell(cell, key, row) 40 | return 41 | } 42 | 43 | func (cursor *Cursor) LeafNodeSplitInsert(key uint32, row *node.Row) (err error) { 44 | /* 45 | Create a new node and move half the cells over. 46 | Insert the new value in one of the two nodes. 47 | Update parent or create a new parent. 48 | */ 49 | var ( 50 | oldMaxKey, newPageNum uint32 51 | oldPage, newPage *Page 52 | parentPage *Page 53 | pager *Pager 54 | ) 55 | pager = cursor.Table.Pager 56 | if oldPage, err = pager.GetPage(cursor.PageIdx); err != nil { 57 | return 58 | } 59 | oldMaxKey = oldPage.GetMaxKey() 60 | newPageNum = pager.PageNum 61 | // put new page in the end 62 | // TODO: Page recycle 63 | if newPage, err = pager.GetPage(newPageNum); err != nil { 64 | return 65 | } 66 | InitLeafNode(newPage.LeafNode) 67 | newPage.LeafNode.CommonHeader.Parent = oldPage.LeafNode.CommonHeader.Parent 68 | newPage.LeafNode.Header.NextLeaf = oldPage.LeafNode.Header.NextLeaf 69 | oldPage.LeafNode.Header.NextLeaf = newPageNum 70 | 71 | /* 72 | All existing keys plus new key should should be divided 73 | evenly between old (left) and new (right) nodes. 74 | Starting from the right, move each key to correct position. 75 | */ 76 | for i := node.LeafNodeMaxCells; ; i-- { 77 | if i+1 == 0 { 78 | break 79 | } 80 | var destPage *Page 81 | if i > node.LeftSplitCount { 82 | destPage = newPage 83 | } else { 84 | destPage = oldPage 85 | } 86 | cellIdx := i % node.LeftSplitCount 87 | destCell := &destPage.LeafNode.Cells[cellIdx] 88 | 89 | if i == cursor.CellIdx { 90 | if err = saveToCell(destCell, key, row); err != nil { 91 | return 92 | } 93 | } else if i > cursor.CellIdx { 94 | *destCell = oldPage.LeafNode.Cells[i-1] 95 | } else { 96 | *destCell = oldPage.LeafNode.Cells[i] 97 | } 98 | } 99 | 100 | /* Update cell count on both leaf nodes */ 101 | oldPage.LeafNode.Header.Cells = node.LeftSplitCount 102 | newPage.LeafNode.Header.Cells = node.RightSplitCount 103 | 104 | if oldPage.LeafNode.CommonHeader.IsRoot { 105 | return cursor.Table.CreateNewRoot(newPageNum) 106 | } else { 107 | parentPageIdx := oldPage.LeafNode.CommonHeader.Parent 108 | if parentPage, err = pager.GetPage(parentPageIdx); err != nil { 109 | return 110 | } 111 | // parent page is an internal node 112 | oldChildIdx := parentPage.InternalNode.FindChildByKey(oldMaxKey) 113 | if oldChildIdx >= node.InternalNodeMaxCells { 114 | panic("InternalNodeMaxCells exceeds during leaf node splitting") 115 | } 116 | parentPage.InternalNode.ICells[oldChildIdx].Key = oldPage.GetMaxKey() 117 | err = cursor.Table.InternalNodeInsert(parentPageIdx, newPageNum) 118 | } 119 | return 120 | } 121 | 122 | func saveToCell(cell *node.Cell, key uint32, row *node.Row) (err error) { 123 | rowBuf := make([]byte, row.Size()) 124 | if _, err = row.Marshal(rowBuf); err != nil { 125 | return 126 | } 127 | cell.Key = key 128 | copy(cell.Value[:], rowBuf) 129 | return 130 | } 131 | -------------------------------------------------------------------------------- /parser/parser_test.go: -------------------------------------------------------------------------------- 1 | package parser 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/smartystreets/goconvey/convey" 7 | ) 8 | 9 | func TestParser(t *testing.T) { 10 | var p *Parser 11 | Convey("SQL type", t, func() { 12 | p = &Parser{} 13 | typ := p.GetSQLType("SELECT * FROM foo WHERE id < 3") 14 | So(typ, ShouldEqual, SELECT) 15 | 16 | p = &Parser{} 17 | typ = p.GetSQLType("INSERT INTO foo VALUES (1,2,3)") 18 | So(typ, ShouldEqual, INSERT) 19 | 20 | p = &Parser{} 21 | typ = p.GetSQLType("UPSERT INTO foo VALUES (1,2,3)") 22 | So(typ, ShouldEqual, UNSUPPORTED) 23 | }) 24 | } 25 | 26 | func TestParserSelect(t *testing.T) { 27 | var p *Parser 28 | Convey("SELECT SQL", t, func() { 29 | p = &Parser{} 30 | ast, err := p.ParseSelect("SELECT ab,b, c FROM foo WHERE id < 3") 31 | So(err, ShouldBeNil) 32 | So(ast.Projects, ShouldResemble, []string{"ab", "b", "c"}) 33 | So(ast.Table, ShouldEqual, "foo") 34 | 35 | p = &Parser{} 36 | ast, err = p.ParseSelect("SELECT ab,b, c FROM foo LIMIT 3") 37 | So(err, ShouldBeNil) 38 | So(ast.Projects, ShouldResemble, []string{"ab", "b", "c"}) 39 | So(ast.Table, ShouldEqual, "foo") 40 | So(ast.Limit, ShouldEqual, 3) 41 | 42 | p = &Parser{} 43 | ast, err = p.ParseSelect("SELECT ab,b,c FROM foo WHERE id < 3 AND ab > 10 LIMIT 11") 44 | So(err, ShouldBeNil) 45 | So(ast.Projects, ShouldResemble, []string{"ab", "b", "c"}) 46 | So(ast.Where, ShouldResemble, []string{"id", "<", "3", "AND", "ab", ">", "10"}) 47 | So(ast.Table, ShouldEqual, "foo") 48 | So(ast.Limit, ShouldEqual, 11) 49 | 50 | p = &Parser{} 51 | ast, err = p.ParseSelect("SELECT 1") 52 | So(err, ShouldBeNil) 53 | So(ast.Projects, ShouldResemble, []string{"1"}) 54 | So(ast.Table, ShouldEqual, "") 55 | }) 56 | } 57 | 58 | func TestParserInsert(t *testing.T) { 59 | var p *Parser 60 | Convey("INSERT SQL with Column names", t, func() { 61 | p = &Parser{} 62 | ast, err := p.ParseInsert("INSERT INTO table_name(column1, column2) VALUES (value1, value2)") 63 | So(err, ShouldBeNil) 64 | So(ast.Table, ShouldEqual, "table_name") 65 | So(ast.Columns, ShouldResemble, []string{"column1", "column2"}) 66 | So(ast.Values, ShouldResemble, [][]string{{"value1", "value2"}}) 67 | }) 68 | 69 | Convey("column count miss match", t, func() { 70 | p = &Parser{} 71 | _, err := p.ParseInsert("INSERT INTO table_name(column1, column2, column3) VALUES (value1, value2)") 72 | So(err, ShouldNotBeNil) 73 | So(err.Error(), ShouldContainSubstring, "expected column count") 74 | }) 75 | 76 | Convey("INSERT SQL", t, func() { 77 | p = &Parser{} 78 | ast, err := p.ParseInsert("INSERT INTO table_name VALUES (value1, value2)") 79 | So(err, ShouldBeNil) 80 | So(ast.Table, ShouldEqual, "table_name") 81 | So(ast.Columns, ShouldBeNil) 82 | So(ast.Values, ShouldResemble, [][]string{{"value1", "value2"}}) 83 | }) 84 | 85 | Convey("INSERT multiple rows", t, func() { 86 | p = &Parser{} 87 | ast, err := p.ParseInsert("INSERT INTO table_name VALUES (\"value1\", value2), (\"value3\", value4)") 88 | So(err, ShouldBeNil) 89 | So(ast.Table, ShouldEqual, "table_name") 90 | So(ast.Columns, ShouldBeNil) 91 | So(ast.Values, ShouldResemble, [][]string{{"\"value1\"", "value2"}, {"\"value3\"", "value4"}}) 92 | }) 93 | 94 | Convey("INSERT multiple rows 2", t, func() { 95 | p = &Parser{} 96 | ast, err := p.ParseInsert("INSERT INTO table (id, username, email) VALUES " + 97 | "(0, auxten, \"auxtenwpc@gmail.com\")," + 98 | "(1, hahaha, \"hahaha@gmail.com\")," + 99 | "(2, jijiji, \"jijiji@gmail.com\")") 100 | So(err, ShouldBeNil) 101 | So(ast.Table, ShouldEqual, "table") 102 | So(ast.Columns, ShouldResemble, []string{"id", "username", "email"}) 103 | So(ast.Values, ShouldResemble, [][]string{{"0", "auxten", "\"auxtenwpc@gmail.com\""}, 104 | {"1", "hahaha", "\"hahaha@gmail.com\""}, {"2", "jijiji", "\"jijiji@gmail.com\""}}) 105 | }) 106 | 107 | Convey("column count miss match 2", t, func() { 108 | p = &Parser{} 109 | _, err := p.ParseInsert("INSERT INTO table_name VALUES (value1, value2), (value3, value4, value5)") 110 | So(err, ShouldNotBeNil) 111 | So(err.Error(), ShouldContainSubstring, "expected column count") 112 | }) 113 | } 114 | -------------------------------------------------------------------------------- /planner/select.go: -------------------------------------------------------------------------------- 1 | package planner 2 | 3 | import ( 4 | "fmt" 5 | "go/token" 6 | "go/types" 7 | "strings" 8 | 9 | "github.com/auxten/go-sqldb/node" 10 | "github.com/auxten/go-sqldb/page" 11 | "github.com/auxten/go-sqldb/parser" 12 | "github.com/auxten/go-sqldb/utils" 13 | ) 14 | 15 | func (plan *Plan) SelectPrepare(ast *parser.SelectTree) (filteredPipe chan *node.Row, err error) { 16 | var ( 17 | table = plan.table 18 | p *page.Page 19 | ) 20 | // Get the start of table 21 | if plan.cursor, err = table.Seek(0); err != nil { 22 | return 23 | } 24 | if p, err = table.Pager.GetPage(plan.cursor.PageIdx); err != nil { 25 | return 26 | } 27 | // If the key not found in the table and the page to insert that key 28 | // has 0 cells. We got the end of the table. 29 | plan.cursor.EndOfTable = p.LeafNode.Header.Cells == 0 30 | 31 | /* 32 | The code below demonstrates a simple "Volcano Model" query plan. 33 | For more please refer to https://doi.org/10.1109/69.273032 34 | */ 35 | 36 | // Fetch rows from storage pages 37 | go func(out chan<- *node.Row) { 38 | var ( 39 | err error 40 | row *node.Row 41 | ) 42 | defer close(out) 43 | for { 44 | if row, err = plan.fetchRow(table); err != nil { 45 | plan.ErrorsPipe <- err 46 | return 47 | } 48 | 49 | select { 50 | case <-plan.Stop: 51 | return 52 | case out <- row: 53 | } 54 | if plan.cursor.EndOfTable { 55 | break 56 | } 57 | } 58 | }(plan.UnFilteredPipe) 59 | 60 | // Filter rows according the ast.Where 61 | go func(in <-chan *node.Row, out chan<- *node.Row, where []string) { 62 | defer close(out) 63 | for row := range in { 64 | if len(where) == 0 { 65 | out <- row 66 | continue 67 | } 68 | filtered, err := isRowFiltered(where, row) 69 | if err != nil { 70 | plan.ErrorsPipe <- err 71 | return 72 | } 73 | if !filtered { 74 | out <- row 75 | } 76 | } 77 | }(plan.UnFilteredPipe, plan.FilteredPipe, ast.Where) 78 | 79 | // Count row count for LIMIT clause. 80 | go func(in <-chan *node.Row, out chan<- *node.Row, limit int64) { 81 | defer close(out) 82 | defer close(plan.Stop) 83 | i := int64(0) 84 | for row := range in { 85 | i++ 86 | if i > limit && limit > 0 { 87 | return 88 | } 89 | out <- row 90 | } 91 | }(plan.FilteredPipe, plan.LimitedPipe, ast.Limit) 92 | 93 | return plan.LimitedPipe, nil 94 | } 95 | 96 | func (plan *Plan) fetchRow(table *page.Table) (row *node.Row, err error) { 97 | var p *page.Page 98 | if p, err = table.Pager.GetPage(plan.cursor.PageIdx); err != nil { 99 | return 100 | } 101 | row = new(node.Row) 102 | if _, err = row.Unmarshal(p.LeafNode.Cells[plan.cursor.CellIdx].Value[:]); err != nil { 103 | row = nil 104 | return 105 | } 106 | plan.cursor.CellIdx++ 107 | if plan.cursor.CellIdx >= p.LeafNode.Header.Cells { 108 | // Move cursor to next leaf 109 | if p.LeafNode.Header.NextLeaf == 0 { 110 | // 已经移动到了最右的的叶子节点 111 | plan.cursor.EndOfTable = true 112 | } else { 113 | plan.cursor.PageIdx = p.LeafNode.Header.NextLeaf 114 | plan.cursor.CellIdx = 0 115 | } 116 | } 117 | return 118 | } 119 | 120 | func isRowFiltered(where []string, row *node.Row) (filtered bool, err error) { 121 | // This is a very dirty hack to use Eval to evaluate the Where statement. 122 | var ( 123 | normalized = make([]string, len(where)) 124 | tv types.TypeAndValue 125 | ) 126 | 127 | /* 128 | type Row struct { 129 | Id uint32 130 | Sex byte 131 | Age uint8 132 | Username [32]byte 133 | Email [128]byte 134 | Phone [64]byte 135 | } 136 | */ 137 | for i, w := range where { 138 | upper := strings.ToUpper(w) 139 | switch upper { 140 | case "AND": 141 | normalized[i] = "&&" 142 | case "OR": 143 | normalized[i] = "||" 144 | case "ID": 145 | normalized[i] = fmt.Sprintf("%d", row.Id) 146 | case "SEX": 147 | normalized[i] = fmt.Sprintf("%c", row.Sex) 148 | case "AGE": 149 | normalized[i] = fmt.Sprintf("%d", row.Age) 150 | case "USERNAME": 151 | normalized[i] = fmt.Sprintf("%s", string(row.Username[:utils.Length(row.Username[:])])) 152 | case "EMAIL": 153 | normalized[i] = fmt.Sprintf("%s", string(row.Email[:utils.Length(row.Email[:])])) 154 | case "PHONE": 155 | normalized[i] = fmt.Sprintf("%s", string(row.Phone[:utils.Length(row.Phone[:])])) 156 | default: 157 | normalized[i] = w 158 | } 159 | } 160 | expr := strings.Join(normalized, " ") 161 | fSet := token.NewFileSet() 162 | if tv, err = types.Eval(fSet, nil, token.NoPos, expr); err != nil { 163 | return 164 | } 165 | if tv.Type == nil { 166 | err = fmt.Errorf("eval(%q) got nil type but no error", expr) 167 | return 168 | } 169 | if !strings.Contains(tv.Type.String(), "bool") { 170 | err = fmt.Errorf("eval(%q) got non bool type", expr) 171 | return 172 | } 173 | if tv.Value.ExactString() == "true" { 174 | filtered = false 175 | } else { 176 | filtered = true 177 | } 178 | return 179 | } 180 | -------------------------------------------------------------------------------- /page/table.go: -------------------------------------------------------------------------------- 1 | package page 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/auxten/go-sqldb/node" 7 | ) 8 | 9 | type Table struct { 10 | Pager *Pager 11 | RootPageIdx uint32 12 | } 13 | 14 | // Seek the page of key, if not exist then return the place key should be 15 | // for the later INSERT. 16 | func (table *Table) Seek(key uint32) (cursor *Cursor, err error) { 17 | var ( 18 | rootPage *Page 19 | ) 20 | 21 | if rootPage, err = table.Pager.GetPage(table.RootPageIdx); err != nil { 22 | return 23 | } 24 | if rootPage.LeafNode != nil { 25 | return table.leafNodeSeek(table.RootPageIdx, key) 26 | } else if rootPage.InternalNode != nil { 27 | return table.internalNodeSeek(table.RootPageIdx, key) 28 | } else { 29 | panic("root page type") 30 | } 31 | return 32 | } 33 | 34 | func (table *Table) Insert(row *node.Row) (err error) { 35 | var ( 36 | p *Page 37 | cur *Cursor 38 | ) 39 | 40 | if cur, err = table.Seek(row.Id); err != nil { 41 | return 42 | } 43 | if p, err = table.Pager.GetPage(cur.PageIdx); err != nil { 44 | return 45 | } 46 | // Must be leaf node 47 | if p.LeafNode == nil { 48 | panic("should be leaf node") 49 | } 50 | if cur.CellIdx < p.LeafNode.Header.Cells { 51 | if p.LeafNode.Cells[cur.CellIdx].Key == row.Id { 52 | return fmt.Errorf("duplicate key %d", row.Id) 53 | } 54 | } 55 | 56 | return cur.LeafNodeInsert(row.Id, row) 57 | } 58 | 59 | func (table *Table) leafNodeSeek(pageIdx uint32, key uint32) (cursor *Cursor, err error) { 60 | var ( 61 | p *Page 62 | minIdx, maxIdx, i uint32 63 | ) 64 | 65 | if p, err = table.Pager.GetPage(pageIdx); err != nil { 66 | return 67 | } 68 | maxIdx = p.LeafNode.Header.Cells 69 | 70 | cursor = &Cursor{ 71 | Table: table, 72 | PageIdx: pageIdx, 73 | EndOfTable: false, 74 | } 75 | 76 | // Walk the btree 77 | for i = maxIdx; i != minIdx; { 78 | index := (minIdx + i) / 2 79 | keyIdx := p.LeafNode.Cells[index].Key 80 | if key == keyIdx { 81 | cursor.CellIdx = index 82 | return 83 | } 84 | if key < keyIdx { 85 | i = index 86 | } else { 87 | minIdx = index + 1 88 | } 89 | } 90 | 91 | cursor.CellIdx = minIdx 92 | return 93 | } 94 | 95 | func (table *Table) internalNodeSeek(pageIdx uint32, key uint32) (cursor *Cursor, err error) { 96 | var ( 97 | p, childPage *Page 98 | ) 99 | 100 | if p, err = table.Pager.GetPage(pageIdx); err != nil { 101 | return 102 | } 103 | 104 | nodeIdx := p.InternalNode.FindChildByKey(key) 105 | childIdx := *p.InternalNode.Child(nodeIdx) 106 | 107 | if childPage, err = table.Pager.GetPage(childIdx); err != nil { 108 | return 109 | } 110 | if childPage.InternalNode != nil { 111 | return table.internalNodeSeek(childIdx, key) 112 | } else if childPage.LeafNode != nil { 113 | return table.leafNodeSeek(childIdx, key) 114 | } 115 | return 116 | } 117 | 118 | func (table *Table) CreateNewRoot(rightChildPageIdx uint32) (err error) { 119 | /* 120 | Handle splitting the root. 121 | Old root copied to new page, becomes left child. 122 | Address of right child passed in. 123 | Re-initialize root page to contain the new root node. 124 | New root node points to two children. 125 | */ 126 | var ( 127 | rootPage, rightChildPage, leftChildPage *Page 128 | ) 129 | if rootPage, err = table.Pager.GetPage(table.RootPageIdx); err != nil { 130 | return 131 | } 132 | if rightChildPage, err = table.Pager.GetPage(rightChildPageIdx); err != nil { 133 | return 134 | } 135 | leftChildPageIdx := table.Pager.PageNum 136 | if leftChildPage, err = table.Pager.GetPage(leftChildPageIdx); err != nil { 137 | return 138 | } 139 | 140 | // copy whatever kind of node to leftChildPage, and set nonRoot 141 | if rootPage.LeafNode != nil { 142 | *leftChildPage.LeafNode = *rootPage.LeafNode 143 | leftChildPage.LeafNode.CommonHeader.IsRoot = false 144 | } else if rootPage.InternalNode != nil { 145 | *leftChildPage.InternalNode = *rootPage.InternalNode 146 | leftChildPage.InternalNode.CommonHeader.IsRoot = false 147 | } 148 | 149 | // 重新初始化 root page,root page 将会有一个 key,两个子节点 150 | rootPage.LeafNode = nil 151 | rootPage.InternalNode = new(node.InternalNode) 152 | rootNode := rootPage.InternalNode 153 | InitInternalNode(rootNode) 154 | rootNode.CommonHeader.IsRoot = true 155 | rootNode.Header.KeysNum = 1 156 | childPageIdxPtr := rootNode.Child(0) 157 | *(childPageIdxPtr) = leftChildPageIdx 158 | leftChildMaxKey := leftChildPage.GetMaxKey() 159 | rootNode.ICells[0].Key = leftChildMaxKey 160 | rootNode.Header.RightChild = rightChildPageIdx 161 | if leftChildPage.LeafNode != nil { 162 | leftChildPage.LeafNode.CommonHeader.Parent = table.RootPageIdx 163 | } else if leftChildPage.InternalNode != nil { 164 | leftChildPage.InternalNode.CommonHeader.Parent = table.RootPageIdx 165 | } 166 | if rightChildPage.LeafNode != nil { 167 | rightChildPage.LeafNode.CommonHeader.Parent = table.RootPageIdx 168 | } else if rightChildPage.InternalNode != nil { 169 | rightChildPage.InternalNode.CommonHeader.Parent = table.RootPageIdx 170 | } 171 | 172 | return 173 | } 174 | 175 | func (table *Table) InternalNodeInsert(parentPageIdx uint32, childPageIdx uint32) (err error) { 176 | /* 177 | Add a new child/key pair to parent that corresponds to child 178 | */ 179 | var ( 180 | parentPage, childPage, rightChildPage *Page 181 | ) 182 | 183 | if parentPage, err = table.Pager.GetPage(parentPageIdx); err != nil { 184 | return 185 | } 186 | if childPage, err = table.Pager.GetPage(childPageIdx); err != nil { 187 | return 188 | } 189 | childMaxKey := childPage.GetMaxKey() 190 | index := parentPage.InternalNode.FindChildByKey(childMaxKey) 191 | originalKeyCnt := parentPage.InternalNode.Header.KeysNum 192 | parentPage.InternalNode.Header.KeysNum += 1 193 | 194 | if parentPage.InternalNode.Header.KeysNum > node.InternalNodeMaxCells { 195 | panic("InternalNodeMaxCells exceeds") 196 | } 197 | 198 | rightChildPageIdx := parentPage.InternalNode.Header.RightChild 199 | if rightChildPage, err = table.Pager.GetPage(rightChildPageIdx); err != nil { 200 | return 201 | } 202 | 203 | if childMaxKey > rightChildPage.GetMaxKey() { 204 | /* Replace right child */ 205 | *parentPage.InternalNode.Child(originalKeyCnt) = rightChildPageIdx 206 | parentPage.InternalNode.ICells[originalKeyCnt].Key = rightChildPage.GetMaxKey() 207 | parentPage.InternalNode.Header.RightChild = childPageIdx 208 | } else { 209 | /* Make room for the new cell */ 210 | for i := originalKeyCnt; i > index; i-- { 211 | parentPage.InternalNode.ICells[i] = parentPage.InternalNode.ICells[i-1] 212 | } 213 | *parentPage.InternalNode.Child(index) = childPageIdx 214 | parentPage.InternalNode.ICells[index].Key = childMaxKey 215 | } 216 | return 217 | } 218 | 219 | func (table *Table) Select() { 220 | 221 | } 222 | 223 | func (table *Table) Prepare() { 224 | 225 | } 226 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | data/ 2 | clink 3 | docker-image-patcher 4 | 5 | # Created by https://www.toptal.com/developers/gitignore/api/go,vue,intellij+all,macos,python 6 | # Edit at https://www.toptal.com/developers/gitignore?templates=go,vue,intellij+all,macos,python 7 | 8 | ### Go ### 9 | # Binaries for programs and plugins 10 | *.exe 11 | *.exe~ 12 | *.dll 13 | *.so 14 | *.dylib 15 | 16 | # Test binary, built with `go test -c` 17 | *.test 18 | 19 | # Output of the go coverage tool, specifically when used with LiteIDE 20 | *.out 21 | 22 | # Dependency directories (remove the comment below to include it) 23 | # vendor/ 24 | 25 | ### Go Patch ### 26 | /vendor/ 27 | /Godeps/ 28 | 29 | ### Intellij+all ### 30 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider 31 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 32 | 33 | # User-specific stuff 34 | .idea/**/workspace.xml 35 | .idea/**/tasks.xml 36 | .idea/**/usage.statistics.xml 37 | .idea/**/dictionaries 38 | .idea/**/shelf 39 | 40 | # Generated files 41 | .idea/**/contentModel.xml 42 | 43 | # Sensitive or high-churn files 44 | .idea/**/dataSources/ 45 | .idea/**/dataSources.ids 46 | .idea/**/dataSources.local.xml 47 | .idea/**/sqlDataSources.xml 48 | .idea/**/dynamic.xml 49 | .idea/**/uiDesigner.xml 50 | .idea/**/dbnavigator.xml 51 | 52 | # Gradle 53 | .idea/**/gradle.xml 54 | .idea/**/libraries 55 | 56 | # Gradle and Maven with auto-import 57 | # When using Gradle or Maven with auto-import, you should exclude module files, 58 | # since they will be recreated, and may cause churn. Uncomment if using 59 | # auto-import. 60 | # .idea/artifacts 61 | # .idea/compiler.xml 62 | # .idea/jarRepositories.xml 63 | # .idea/modules.xml 64 | # .idea/*.iml 65 | # .idea/modules 66 | # *.iml 67 | # *.ipr 68 | 69 | # CMake 70 | cmake-build-*/ 71 | 72 | # Mongo Explorer plugin 73 | .idea/**/mongoSettings.xml 74 | 75 | # File-based project format 76 | *.iws 77 | 78 | # IntelliJ 79 | out/ 80 | 81 | # mpeltonen/sbt-idea plugin 82 | .idea_modules/ 83 | 84 | # JIRA plugin 85 | atlassian-ide-plugin.xml 86 | 87 | # Cursive Clojure plugin 88 | .idea/replstate.xml 89 | 90 | # Crashlytics plugin (for Android Studio and IntelliJ) 91 | com_crashlytics_export_strings.xml 92 | crashlytics.properties 93 | crashlytics-build.properties 94 | fabric.properties 95 | 96 | # Editor-based Rest Client 97 | .idea/httpRequests 98 | 99 | # Android studio 3.1+ serialized cache file 100 | .idea/caches/build_file_checksums.ser 101 | 102 | ### Intellij+all Patch ### 103 | # Ignores the whole .idea folder and all .iml files 104 | # See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360 105 | 106 | .idea/ 107 | 108 | # Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023 109 | 110 | *.iml 111 | modules.xml 112 | .idea/misc.xml 113 | *.ipr 114 | 115 | # Sonarlint plugin 116 | .idea/sonarlint 117 | 118 | ### macOS ### 119 | # General 120 | .DS_Store 121 | .AppleDouble 122 | .LSOverride 123 | 124 | # Icon must end with two \r 125 | Icon 126 | 127 | 128 | # Thumbnails 129 | ._* 130 | 131 | # Files that might appear in the root of a volume 132 | .DocumentRevisions-V100 133 | .fseventsd 134 | .Spotlight-V100 135 | .TemporaryItems 136 | .Trashes 137 | .VolumeIcon.icns 138 | .com.apple.timemachine.donotpresent 139 | 140 | # Directories potentially created on remote AFP share 141 | .AppleDB 142 | .AppleDesktop 143 | Network Trash Folder 144 | Temporary Items 145 | .apdisk 146 | 147 | ### Python ### 148 | # Byte-compiled / optimized / DLL files 149 | __pycache__/ 150 | *.py[cod] 151 | *$py.class 152 | 153 | # C extensions 154 | 155 | # Distribution / packaging 156 | .Python 157 | build/ 158 | develop-eggs/ 159 | dist/ 160 | downloads/ 161 | eggs/ 162 | .eggs/ 163 | lib/ 164 | lib64/ 165 | parts/ 166 | sdist/ 167 | var/ 168 | wheels/ 169 | pip-wheel-metadata/ 170 | share/python-wheels/ 171 | *.egg-info/ 172 | .installed.cfg 173 | *.egg 174 | MANIFEST 175 | 176 | # PyInstaller 177 | # Usually these files are written by a python script from a template 178 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 179 | *.manifest 180 | *.spec 181 | 182 | # Installer logs 183 | pip-log.txt 184 | pip-delete-this-directory.txt 185 | 186 | # Unit test / coverage reports 187 | htmlcov/ 188 | .tox/ 189 | .nox/ 190 | .coverage 191 | .coverage.* 192 | .cache 193 | nosetests.xml 194 | coverage.xml 195 | *.cover 196 | *.py,cover 197 | .hypothesis/ 198 | .pytest_cache/ 199 | pytestdebug.log 200 | 201 | # Translations 202 | *.mo 203 | *.pot 204 | 205 | # Django stuff: 206 | *.log 207 | local_settings.py 208 | db.sqlite3 209 | db.sqlite3-journal 210 | 211 | # Flask stuff: 212 | instance/ 213 | .webassets-cache 214 | 215 | # Scrapy stuff: 216 | .scrapy 217 | 218 | # Sphinx documentation 219 | docs/_build/ 220 | doc/_build/ 221 | 222 | # PyBuilder 223 | target/ 224 | 225 | # Jupyter Notebook 226 | .ipynb_checkpoints 227 | 228 | # IPython 229 | profile_default/ 230 | ipython_config.py 231 | 232 | # pyenv 233 | .python-version 234 | 235 | # pipenv 236 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 237 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 238 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 239 | # install all needed dependencies. 240 | #Pipfile.lock 241 | 242 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 243 | __pypackages__/ 244 | 245 | # Celery stuff 246 | celerybeat-schedule 247 | celerybeat.pid 248 | 249 | # SageMath parsed files 250 | *.sage.py 251 | 252 | # Environments 253 | .env 254 | .venv 255 | env/ 256 | venv/ 257 | ENV/ 258 | env.bak/ 259 | venv.bak/ 260 | pythonenv* 261 | 262 | # Spyder project settings 263 | .spyderproject 264 | .spyproject 265 | 266 | # Rope project settings 267 | .ropeproject 268 | 269 | # mkdocs documentation 270 | /site 271 | 272 | # mypy 273 | .mypy_cache/ 274 | .dmypy.json 275 | dmypy.json 276 | 277 | # Pyre type checker 278 | .pyre/ 279 | 280 | # pytype static type analyzer 281 | .pytype/ 282 | 283 | # profiling data 284 | .prof 285 | 286 | ### Vue ### 287 | # gitignore template for Vue.js projects 288 | # 289 | # Recommended template: Node.gitignore 290 | 291 | # TODO: where does this rule come from? 292 | docs/_book 293 | 294 | # TODO: where does this rule come from? 295 | test/*/*.txt 296 | 297 | .DS_Store 298 | node_modules 299 | /dist 300 | 301 | # local env files 302 | .env.local 303 | .env.*.local 304 | 305 | # Log files 306 | npm-debug.log* 307 | yarn-debug.log* 308 | yarn-error.log* 309 | pnpm-debug.log* 310 | 311 | # Editor directories and files 312 | .idea 313 | .vscode 314 | *.suo 315 | *.ntvs* 316 | *.njsproj 317 | *.sln 318 | *.sw? 319 | 320 | # End of https://www.toptal.com/developers/gitignore/api/go,vue,intellij+all,macos,python 321 | -------------------------------------------------------------------------------- /parser/parser.go: -------------------------------------------------------------------------------- 1 | package parser 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | "text/scanner" 8 | ) 9 | 10 | type StatementType string 11 | 12 | // SQL type tokens 13 | const ( 14 | UNSUPPORTED = "N/A" 15 | SELECT = "SELECT" 16 | FROM = "FROM" 17 | WHERE = "WHERE" 18 | LIMIT = "LIMIT" 19 | INSERT = "INSERT" 20 | INTO = "INTO" 21 | VALUES = "VALUES" 22 | ASTERISK = "*" 23 | ) 24 | 25 | type Parser struct { 26 | s scanner.Scanner 27 | } 28 | 29 | type SelectTree struct { 30 | Projects []string 31 | Table string 32 | Where []string 33 | Limit int64 34 | } 35 | 36 | type InsertTree struct { 37 | Table string 38 | Columns []string 39 | Values [][]string 40 | } 41 | 42 | func (p *Parser) GetSQLType(sql string) StatementType { 43 | s := p.s 44 | s.Init(strings.NewReader(sql)) 45 | s.Mode = scanner.ScanIdents | scanner.ScanFloats | scanner.ScanChars | scanner.ScanStrings | scanner.ScanRawStrings 46 | 47 | if tok := s.Scan(); tok != scanner.EOF { 48 | txt := strings.ToUpper(s.TokenText()) 49 | switch txt { 50 | case "SELECT": 51 | return SELECT 52 | case "INSERT": 53 | return INSERT 54 | default: 55 | return UNSUPPORTED 56 | } 57 | } 58 | 59 | return UNSUPPORTED 60 | } 61 | 62 | /* 63 | ParseSelect is a simple select statement parser. 64 | It's just a demo of SELECT statement parser skeleton. 65 | Currently, the most complex SQL supported here is something like: 66 | 67 | SELECT * FROM foo WHERE id < 3 LIMIT 1; 68 | 69 | Even SQL-92 standard is far more complex. 70 | For a production ready SQL parser, see: https://github.com/auxten/postgresql-parser 71 | */ 72 | func (p *Parser) ParseSelect(sel string) (ast *SelectTree, err error) { 73 | ast = &SelectTree{} 74 | s := p.s 75 | s.Init(strings.NewReader(sel)) 76 | s.Mode = scanner.ScanIdents | scanner.ScanFloats | scanner.ScanChars | scanner.ScanStrings | scanner.ScanRawStrings 77 | 78 | if tok := s.Scan(); tok == scanner.EOF || strings.ToUpper(s.TokenText()) != SELECT { 79 | err = fmt.Errorf("%s is not SELECT statement", sel) 80 | return 81 | } 82 | 83 | ast.Projects = make([]string, 0, 4) 84 | for { 85 | if tok := s.Scan(); tok == scanner.EOF { 86 | if len(ast.Projects) == 0 { 87 | err = fmt.Errorf("%s get select projects failed", sel) 88 | } 89 | return 90 | } else { 91 | txt := s.TokenText() 92 | //log.Print(txt) 93 | if txt == ASTERISK { 94 | ast.Projects = append(ast.Projects, ASTERISK) 95 | } else { 96 | if txt == "," { 97 | continue 98 | } else if strings.ToUpper(txt) == FROM { 99 | break 100 | } else { 101 | ast.Projects = append(ast.Projects, txt) 102 | } 103 | } 104 | } 105 | } 106 | 107 | // token FROM is scanned, try to get the table name here 108 | // FROM ? 109 | if tok := s.Scan(); tok == scanner.EOF { 110 | // if projects are all constant value, source table is not necessary. 111 | // eg. SELECT 1; 112 | return 113 | } else { 114 | ast.Table = s.TokenText() 115 | } 116 | 117 | // WHERE 118 | if tok := s.Scan(); tok == scanner.EOF { 119 | // WHERE is not necessary 120 | return 121 | } 122 | 123 | txt := s.TokenText() 124 | if strings.ToUpper(txt) == WHERE { 125 | // token WHERE is scanned, try to get the WHERE clause. 126 | ast.Where = make([]string, 0, 4) 127 | for { 128 | if tok := s.Scan(); tok == scanner.EOF { 129 | if len(ast.Where) == 0 { 130 | err = fmt.Errorf("missing WHERE clause") 131 | } 132 | return 133 | } 134 | txt := s.TokenText() 135 | if strings.ToUpper(txt) == LIMIT { 136 | break 137 | } 138 | ast.Where = append(ast.Where, txt) 139 | } 140 | } else if strings.ToUpper(txt) != LIMIT { 141 | err = fmt.Errorf("expect WHERE or LIMIT here") 142 | return 143 | } 144 | 145 | // token LIMIT is scanned, try to get the limit 146 | if tok := s.Scan(); tok == scanner.EOF { 147 | err = fmt.Errorf("expect LIMIT clause here") 148 | return 149 | } 150 | txt = s.TokenText() 151 | ast.Limit, err = strconv.ParseInt(txt, 10, 64) 152 | return 153 | } 154 | 155 | /* 156 | ParseInsert can parse a simple INSERT statement, eg. 157 | INSERT INTO table_name VALUES (value1, value2, …) 158 | or 159 | INSERT INTO table_name(column1, column2, …) VALUES (value1, value2, …) 160 | */ 161 | func (p *Parser) ParseInsert(insert string) (ast *InsertTree, err error) { 162 | ast = &InsertTree{} 163 | s := p.s 164 | s.Init(strings.NewReader(insert)) 165 | s.Mode = scanner.ScanIdents | scanner.ScanFloats | scanner.ScanChars | scanner.ScanStrings | scanner.ScanRawStrings 166 | 167 | if tok := s.Scan(); tok == scanner.EOF || strings.ToUpper(s.TokenText()) != INSERT { 168 | err = fmt.Errorf("%s is not INSERT statement", insert) 169 | return 170 | } 171 | 172 | if tok := s.Scan(); tok == scanner.EOF || strings.ToUpper(s.TokenText()) != INTO { 173 | err = fmt.Errorf("%s expect INTO after INSERT", insert) 174 | return 175 | } 176 | 177 | // Table name 178 | if tok := s.Scan(); tok == scanner.EOF { 179 | err = fmt.Errorf("%s expect table after INSERT INTO", insert) 180 | return 181 | } else { 182 | ast.Table = s.TokenText() 183 | } 184 | 185 | var columnCnt int 186 | // try get colNames 187 | if tok := s.Scan(); tok == scanner.EOF { 188 | err = fmt.Errorf("%s expect VALUES or (colNames)", insert) 189 | return 190 | } else { 191 | txt := strings.ToUpper(s.TokenText()) 192 | if txt == "(" { 193 | ast.Columns = make([]string, 0, 4) 194 | for { 195 | if tok := s.Scan(); tok == scanner.EOF { 196 | if len(ast.Columns) == 0 { 197 | err = fmt.Errorf("%s get Columns failed", insert) 198 | } 199 | return 200 | } else { 201 | txt := s.TokenText() 202 | //log.Print(txt) 203 | if txt == "," { 204 | continue 205 | } else if txt == ")" { 206 | continue 207 | } else if strings.ToUpper(txt) == VALUES { 208 | break 209 | } else { 210 | ast.Columns = append(ast.Columns, txt) 211 | } 212 | } 213 | } 214 | } else if txt != VALUES { 215 | err = fmt.Errorf("%s expect VALUES or '(' here", insert) 216 | return 217 | } 218 | } 219 | columnCnt = len(ast.Columns) 220 | 221 | // VALUES has been scanned try to get (value1, value2), (value3, value4) 222 | ast.Values = make([][]string, 0, 4) 223 | rowLoop: 224 | for { 225 | if tok := s.Scan(); tok == scanner.EOF { 226 | break rowLoop 227 | } else { 228 | txt := s.TokenText() 229 | if txt == "," { 230 | // next row 231 | continue 232 | } 233 | if txt == "(" { 234 | var row []string 235 | if columnCnt != 0 { 236 | row = make([]string, 0, columnCnt) 237 | } else { 238 | row = make([]string, 0, 4) 239 | } 240 | for { 241 | if tok := s.Scan(); tok == scanner.EOF { 242 | break rowLoop 243 | } else { 244 | txt := s.TokenText() 245 | //log.Print(txt) 246 | if txt == "," { 247 | continue 248 | } else if txt == ")" { 249 | ast.Values = append(ast.Values, row) 250 | break 251 | } else { 252 | row = append(row, txt) 253 | } 254 | } 255 | } 256 | } 257 | } 258 | } 259 | 260 | // Check if column count identical 261 | for _, row := range ast.Values { 262 | if columnCnt == 0 { 263 | columnCnt = len(row) 264 | } else { 265 | if columnCnt != len(row) { 266 | err = fmt.Errorf( 267 | "%s expected column count is %d, got %d, %v", 268 | insert, columnCnt, len(row), row, 269 | ) 270 | return 271 | } 272 | } 273 | } 274 | 275 | return 276 | } 277 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /node/node.schema.gen.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "io" 5 | "time" 6 | "unsafe" 7 | ) 8 | 9 | var ( 10 | _ = unsafe.Sizeof(0) 11 | _ = io.ReadFull 12 | _ = time.Now() 13 | ) 14 | 15 | type Header struct { 16 | IsInternal bool 17 | IsRoot bool 18 | Parent uint32 19 | } 20 | 21 | func (d *Header) Size() (s uint64) { 22 | 23 | s += 6 24 | return 25 | } 26 | func (d *Header) Marshal(buf []byte) ([]byte, error) { 27 | size := d.Size() 28 | { 29 | if uint64(cap(buf)) >= size { 30 | buf = buf[:size] 31 | } else { 32 | buf = make([]byte, size) 33 | } 34 | } 35 | i := uint64(0) 36 | 37 | { 38 | if d.IsInternal { 39 | buf[0] = 1 40 | } else { 41 | buf[0] = 0 42 | } 43 | } 44 | { 45 | if d.IsRoot { 46 | buf[1] = 1 47 | } else { 48 | buf[1] = 0 49 | } 50 | } 51 | { 52 | 53 | buf[0+2] = byte(d.Parent >> 0) 54 | 55 | buf[1+2] = byte(d.Parent >> 8) 56 | 57 | buf[2+2] = byte(d.Parent >> 16) 58 | 59 | buf[3+2] = byte(d.Parent >> 24) 60 | 61 | } 62 | return buf[:i+6], nil 63 | } 64 | 65 | func (d *Header) Unmarshal(buf []byte) (uint64, error) { 66 | i := uint64(0) 67 | 68 | { 69 | d.IsInternal = buf[0] == 1 70 | } 71 | { 72 | d.IsRoot = buf[1] == 1 73 | } 74 | { 75 | 76 | d.Parent = 0 | (uint32(buf[0+2]) << 0) | (uint32(buf[1+2]) << 8) | (uint32(buf[2+2]) << 16) | (uint32(buf[3+2]) << 24) 77 | 78 | } 79 | return i + 6, nil 80 | } 81 | 82 | type InternalNodeHeader struct { 83 | KeysNum uint32 84 | RightChild uint32 85 | } 86 | 87 | func (d *InternalNodeHeader) Size() (s uint64) { 88 | 89 | s += 8 90 | return 91 | } 92 | func (d *InternalNodeHeader) Marshal(buf []byte) ([]byte, error) { 93 | size := d.Size() 94 | { 95 | if uint64(cap(buf)) >= size { 96 | buf = buf[:size] 97 | } else { 98 | buf = make([]byte, size) 99 | } 100 | } 101 | i := uint64(0) 102 | 103 | { 104 | 105 | buf[0+0] = byte(d.KeysNum >> 0) 106 | 107 | buf[1+0] = byte(d.KeysNum >> 8) 108 | 109 | buf[2+0] = byte(d.KeysNum >> 16) 110 | 111 | buf[3+0] = byte(d.KeysNum >> 24) 112 | 113 | } 114 | { 115 | 116 | buf[0+4] = byte(d.RightChild >> 0) 117 | 118 | buf[1+4] = byte(d.RightChild >> 8) 119 | 120 | buf[2+4] = byte(d.RightChild >> 16) 121 | 122 | buf[3+4] = byte(d.RightChild >> 24) 123 | 124 | } 125 | return buf[:i+8], nil 126 | } 127 | 128 | func (d *InternalNodeHeader) Unmarshal(buf []byte) (uint64, error) { 129 | i := uint64(0) 130 | 131 | { 132 | 133 | d.KeysNum = 0 | (uint32(buf[0+0]) << 0) | (uint32(buf[1+0]) << 8) | (uint32(buf[2+0]) << 16) | (uint32(buf[3+0]) << 24) 134 | 135 | } 136 | { 137 | 138 | d.RightChild = 0 | (uint32(buf[0+4]) << 0) | (uint32(buf[1+4]) << 8) | (uint32(buf[2+4]) << 16) | (uint32(buf[3+4]) << 24) 139 | 140 | } 141 | return i + 8, nil 142 | } 143 | 144 | type LeafNodeHeader struct { 145 | Cells uint32 146 | NextLeaf uint32 147 | } 148 | 149 | func (d *LeafNodeHeader) Size() (s uint64) { 150 | 151 | s += 8 152 | return 153 | } 154 | func (d *LeafNodeHeader) Marshal(buf []byte) ([]byte, error) { 155 | size := d.Size() 156 | { 157 | if uint64(cap(buf)) >= size { 158 | buf = buf[:size] 159 | } else { 160 | buf = make([]byte, size) 161 | } 162 | } 163 | i := uint64(0) 164 | 165 | { 166 | 167 | buf[0+0] = byte(d.Cells >> 0) 168 | 169 | buf[1+0] = byte(d.Cells >> 8) 170 | 171 | buf[2+0] = byte(d.Cells >> 16) 172 | 173 | buf[3+0] = byte(d.Cells >> 24) 174 | 175 | } 176 | { 177 | 178 | buf[0+4] = byte(d.NextLeaf >> 0) 179 | 180 | buf[1+4] = byte(d.NextLeaf >> 8) 181 | 182 | buf[2+4] = byte(d.NextLeaf >> 16) 183 | 184 | buf[3+4] = byte(d.NextLeaf >> 24) 185 | 186 | } 187 | return buf[:i+8], nil 188 | } 189 | 190 | func (d *LeafNodeHeader) Unmarshal(buf []byte) (uint64, error) { 191 | i := uint64(0) 192 | 193 | { 194 | 195 | d.Cells = 0 | (uint32(buf[0+0]) << 0) | (uint32(buf[1+0]) << 8) | (uint32(buf[2+0]) << 16) | (uint32(buf[3+0]) << 24) 196 | 197 | } 198 | { 199 | 200 | d.NextLeaf = 0 | (uint32(buf[0+4]) << 0) | (uint32(buf[1+4]) << 8) | (uint32(buf[2+4]) << 16) | (uint32(buf[3+4]) << 24) 201 | 202 | } 203 | return i + 8, nil 204 | } 205 | 206 | type ICell struct { 207 | Key uint32 208 | Child uint32 209 | } 210 | 211 | func (d *ICell) Size() (s uint64) { 212 | 213 | s += 8 214 | return 215 | } 216 | func (d *ICell) Marshal(buf []byte) ([]byte, error) { 217 | size := d.Size() 218 | { 219 | if uint64(cap(buf)) >= size { 220 | buf = buf[:size] 221 | } else { 222 | buf = make([]byte, size) 223 | } 224 | } 225 | i := uint64(0) 226 | 227 | { 228 | 229 | buf[0+0] = byte(d.Key >> 0) 230 | 231 | buf[1+0] = byte(d.Key >> 8) 232 | 233 | buf[2+0] = byte(d.Key >> 16) 234 | 235 | buf[3+0] = byte(d.Key >> 24) 236 | 237 | } 238 | { 239 | 240 | buf[0+4] = byte(d.Child >> 0) 241 | 242 | buf[1+4] = byte(d.Child >> 8) 243 | 244 | buf[2+4] = byte(d.Child >> 16) 245 | 246 | buf[3+4] = byte(d.Child >> 24) 247 | 248 | } 249 | return buf[:i+8], nil 250 | } 251 | 252 | func (d *ICell) Unmarshal(buf []byte) (uint64, error) { 253 | i := uint64(0) 254 | 255 | { 256 | 257 | d.Key = 0 | (uint32(buf[0+0]) << 0) | (uint32(buf[1+0]) << 8) | (uint32(buf[2+0]) << 16) | (uint32(buf[3+0]) << 24) 258 | 259 | } 260 | { 261 | 262 | d.Child = 0 | (uint32(buf[0+4]) << 0) | (uint32(buf[1+4]) << 8) | (uint32(buf[2+4]) << 16) | (uint32(buf[3+4]) << 24) 263 | 264 | } 265 | return i + 8, nil 266 | } 267 | 268 | type InternalNode struct { 269 | CommonHeader Header 270 | Header InternalNodeHeader 271 | ICells [510]ICell 272 | } 273 | 274 | func (d *InternalNode) Size() (s uint64) { 275 | 276 | { 277 | s += d.CommonHeader.Size() 278 | } 279 | { 280 | s += d.Header.Size() 281 | } 282 | { 283 | for k := range d.ICells { 284 | _ = k // make compiler happy in case k is unused 285 | 286 | { 287 | s += d.ICells[k].Size() 288 | } 289 | 290 | } 291 | } 292 | return 293 | } 294 | func (d *InternalNode) Marshal(buf []byte) ([]byte, error) { 295 | size := d.Size() 296 | { 297 | if uint64(cap(buf)) >= size { 298 | buf = buf[:size] 299 | } else { 300 | buf = make([]byte, size) 301 | } 302 | } 303 | i := uint64(0) 304 | 305 | { 306 | nbuf, err := d.CommonHeader.Marshal(buf[0:]) 307 | if err != nil { 308 | return nil, err 309 | } 310 | i += uint64(len(nbuf)) 311 | } 312 | { 313 | nbuf, err := d.Header.Marshal(buf[i+0:]) 314 | if err != nil { 315 | return nil, err 316 | } 317 | i += uint64(len(nbuf)) 318 | } 319 | { 320 | for k := range d.ICells { 321 | 322 | { 323 | nbuf, err := d.ICells[k].Marshal(buf[i+0:]) 324 | if err != nil { 325 | return nil, err 326 | } 327 | i += uint64(len(nbuf)) 328 | } 329 | 330 | } 331 | } 332 | return buf[:i+0], nil 333 | } 334 | 335 | func (d *InternalNode) Unmarshal(buf []byte) (uint64, error) { 336 | i := uint64(0) 337 | 338 | { 339 | ni, err := d.CommonHeader.Unmarshal(buf[i+0:]) 340 | if err != nil { 341 | return 0, err 342 | } 343 | i += ni 344 | } 345 | { 346 | ni, err := d.Header.Unmarshal(buf[i+0:]) 347 | if err != nil { 348 | return 0, err 349 | } 350 | i += ni 351 | } 352 | { 353 | for k := range d.ICells { 354 | 355 | { 356 | ni, err := d.ICells[k].Unmarshal(buf[i+0:]) 357 | if err != nil { 358 | return 0, err 359 | } 360 | i += ni 361 | } 362 | 363 | } 364 | } 365 | return i + 0, nil 366 | } 367 | 368 | type Cell struct { 369 | Key uint32 370 | Value [230]byte 371 | } 372 | 373 | func (d *Cell) Size() (s uint64) { 374 | 375 | { 376 | s += 230 377 | } 378 | s += 4 379 | return 380 | } 381 | func (d *Cell) Marshal(buf []byte) ([]byte, error) { 382 | size := d.Size() 383 | { 384 | if uint64(cap(buf)) >= size { 385 | buf = buf[:size] 386 | } else { 387 | buf = make([]byte, size) 388 | } 389 | } 390 | i := uint64(0) 391 | 392 | { 393 | 394 | buf[0+0] = byte(d.Key >> 0) 395 | 396 | buf[1+0] = byte(d.Key >> 8) 397 | 398 | buf[2+0] = byte(d.Key >> 16) 399 | 400 | buf[3+0] = byte(d.Key >> 24) 401 | 402 | } 403 | { 404 | copy(buf[i+4:], d.Value[:]) 405 | i += 230 406 | } 407 | return buf[:i+4], nil 408 | } 409 | 410 | func (d *Cell) Unmarshal(buf []byte) (uint64, error) { 411 | i := uint64(0) 412 | 413 | { 414 | 415 | d.Key = 0 | (uint32(buf[i+0+0]) << 0) | (uint32(buf[i+1+0]) << 8) | (uint32(buf[i+2+0]) << 16) | (uint32(buf[i+3+0]) << 24) 416 | 417 | } 418 | { 419 | copy(d.Value[:], buf[i+4:]) 420 | i += 230 421 | } 422 | return i + 4, nil 423 | } 424 | 425 | type LeafNode struct { 426 | CommonHeader Header 427 | Header LeafNodeHeader 428 | Cells [17]Cell 429 | } 430 | 431 | func (d *LeafNode) Size() (s uint64) { 432 | 433 | { 434 | s += d.CommonHeader.Size() 435 | } 436 | { 437 | s += d.Header.Size() 438 | } 439 | { 440 | for k := range d.Cells { 441 | _ = k // make compiler happy in case k is unused 442 | 443 | { 444 | s += d.Cells[k].Size() 445 | } 446 | 447 | } 448 | } 449 | return 450 | } 451 | func (d *LeafNode) Marshal(buf []byte) ([]byte, error) { 452 | size := d.Size() 453 | { 454 | if uint64(cap(buf)) >= size { 455 | buf = buf[:size] 456 | } else { 457 | buf = make([]byte, size) 458 | } 459 | } 460 | i := uint64(0) 461 | 462 | { 463 | nbuf, err := d.CommonHeader.Marshal(buf[0:]) 464 | if err != nil { 465 | return nil, err 466 | } 467 | i += uint64(len(nbuf)) 468 | } 469 | { 470 | nbuf, err := d.Header.Marshal(buf[i+0:]) 471 | if err != nil { 472 | return nil, err 473 | } 474 | i += uint64(len(nbuf)) 475 | } 476 | { 477 | for k := range d.Cells { 478 | 479 | { 480 | nbuf, err := d.Cells[k].Marshal(buf[i+0:]) 481 | if err != nil { 482 | return nil, err 483 | } 484 | i += uint64(len(nbuf)) 485 | } 486 | 487 | } 488 | } 489 | return buf[:i+0], nil 490 | } 491 | 492 | func (d *LeafNode) Unmarshal(buf []byte) (uint64, error) { 493 | i := uint64(0) 494 | 495 | { 496 | ni, err := d.CommonHeader.Unmarshal(buf[i+0:]) 497 | if err != nil { 498 | return 0, err 499 | } 500 | i += ni 501 | } 502 | { 503 | ni, err := d.Header.Unmarshal(buf[i+0:]) 504 | if err != nil { 505 | return 0, err 506 | } 507 | i += ni 508 | } 509 | { 510 | for k := range d.Cells { 511 | 512 | { 513 | ni, err := d.Cells[k].Unmarshal(buf[i+0:]) 514 | if err != nil { 515 | return 0, err 516 | } 517 | i += ni 518 | } 519 | 520 | } 521 | } 522 | return i + 0, nil 523 | } 524 | 525 | type Row struct { 526 | Id uint32 527 | Sex byte 528 | Age uint8 529 | Username [32]byte 530 | Email [128]byte 531 | Phone [64]byte 532 | } 533 | 534 | func (d *Row) Size() (s uint64) { 535 | 536 | { 537 | s += 32 538 | } 539 | { 540 | s += 128 541 | } 542 | { 543 | s += 64 544 | } 545 | s += 6 546 | return 547 | } 548 | func (d *Row) Marshal(buf []byte) ([]byte, error) { 549 | size := d.Size() 550 | { 551 | if uint64(cap(buf)) >= size { 552 | buf = buf[:size] 553 | } else { 554 | buf = make([]byte, size) 555 | } 556 | } 557 | i := uint64(0) 558 | 559 | { 560 | 561 | buf[0+0] = byte(d.Id >> 0) 562 | 563 | buf[1+0] = byte(d.Id >> 8) 564 | 565 | buf[2+0] = byte(d.Id >> 16) 566 | 567 | buf[3+0] = byte(d.Id >> 24) 568 | 569 | } 570 | { 571 | buf[4] = d.Sex 572 | } 573 | { 574 | 575 | buf[0+5] = byte(d.Age >> 0) 576 | 577 | } 578 | { 579 | copy(buf[i+6:], d.Username[:]) 580 | i += 32 581 | } 582 | { 583 | copy(buf[i+6:], d.Email[:]) 584 | i += 128 585 | } 586 | { 587 | copy(buf[i+6:], d.Phone[:]) 588 | i += 64 589 | } 590 | return buf[:i+6], nil 591 | } 592 | 593 | func (d *Row) Unmarshal(buf []byte) (uint64, error) { 594 | i := uint64(0) 595 | 596 | { 597 | 598 | d.Id = 0 | (uint32(buf[i+0+0]) << 0) | (uint32(buf[i+1+0]) << 8) | (uint32(buf[i+2+0]) << 16) | (uint32(buf[i+3+0]) << 24) 599 | 600 | } 601 | { 602 | d.Sex = buf[i+4] 603 | } 604 | { 605 | 606 | d.Age = 0 | (uint8(buf[i+0+5]) << 0) 607 | 608 | } 609 | { 610 | copy(d.Username[:], buf[i+6:]) 611 | i += 32 612 | } 613 | { 614 | copy(d.Email[:], buf[i+6:]) 615 | i += 128 616 | } 617 | { 618 | copy(d.Phone[:], buf[i+6:]) 619 | i += 64 620 | } 621 | return i + 6, nil 622 | } 623 | --------------------------------------------------------------------------------