├── tablestore ├── mysql │ ├── executor │ │ ├── common.go │ │ ├── aggfuncs │ │ │ ├── builder.go │ │ │ ├── func_count.go │ │ │ └── aggfuncs.go │ │ ├── delete.go │ │ ├── ddl.go │ │ └── data_model_tranfs.go │ ├── server │ │ ├── buffered_read_conn.go │ │ ├── tokenlimiter.go │ │ ├── column.go │ │ ├── packetio.go │ │ └── driver_base.go │ ├── bexpression │ │ ├── util.go │ │ ├── aggregation │ │ │ ├── sum.go │ │ │ ├── count.go │ │ │ ├── max_min.go │ │ │ ├── descriptor.go │ │ │ ├── util.go │ │ │ ├── avg.go │ │ │ ├── aggregation.go │ │ │ └── base_func.go │ │ ├── buildin_info.go │ │ ├── constant.go │ │ ├── column.go │ │ └── expression.go │ ├── sqlexec │ │ └── sql_exec.go │ ├── planner │ │ ├── common_plans.go │ │ ├── util.go │ │ ├── schema.go │ │ └── plan.go │ ├── sctx │ │ └── context.go │ └── expression │ │ ├── aggregation │ │ ├── sum.go │ │ ├── explain.go │ │ ├── max_min.go │ │ ├── first_row.go │ │ ├── bit_or.go │ │ ├── bit_xor.go │ │ ├── bit_and.go │ │ ├── count.go │ │ ├── window_func.go │ │ ├── util.go │ │ ├── avg.go │ │ ├── agg_to_pb.go │ │ └── concat.go │ │ ├── rand.go │ │ ├── vectorized.go │ │ └── builtin_like_vec.go ├── util │ ├── testkit │ │ ├── fake.go │ │ └── testkit_test.go │ ├── bound.go │ └── pdhttp │ │ └── rule.go ├── doc.go ├── server │ ├── hthrift │ │ ├── hbase │ │ │ ├── GoUnusedProtection__.go │ │ │ └── HBase-consts.go │ │ ├── scanner.go │ │ └── thrift_server.go │ ├── driver_zetta.go │ └── driver.go ├── session │ ├── manager.go │ └── structs.go ├── rpc │ ├── request.go │ ├── response.go │ └── mutation.go ├── domain │ ├── session_pool.go │ ├── domainctx.go │ ├── domain_test.go │ ├── schema_checker.go │ └── schema_checker_test.go ├── ddl │ ├── rollback.go │ ├── session_pool.go │ ├── column.go │ ├── callback.go │ ├── delete_range_manager.go │ └── schema_test.go ├── table │ └── tables │ │ ├── keyrange.go │ │ └── result_iter.go ├── infoschema │ └── infoschema_test.go └── zstore │ └── scan.go ├── etc └── zetta.toml ├── README.md ├── pkg ├── meta │ └── autoid │ │ └── errors.go ├── metrics │ ├── oracles.go │ ├── domain.go │ ├── session.go │ ├── tables.go │ └── grpc.go ├── codec │ ├── bytes_test.go │ ├── float.go │ ├── decimal.go │ ├── decimal_test.go │ └── bytes.go ├── structure │ ├── structure.go │ ├── string.go │ └── type.go └── model │ └── flags.go ├── Makefile └── go.mod /tablestore/mysql/executor/common.go: -------------------------------------------------------------------------------- 1 | package executor 2 | -------------------------------------------------------------------------------- /tablestore/util/testkit/fake.go: -------------------------------------------------------------------------------- 1 | // +build codes 2 | 3 | package testkit 4 | -------------------------------------------------------------------------------- /tablestore/doc.go: -------------------------------------------------------------------------------- 1 | package tablestore 2 | 3 | var ( 4 | ReleaseVersion = "None" 5 | BuildTS = "None" 6 | GitHash = "None" 7 | GitBranch = "None" 8 | ) 9 | 10 | -------------------------------------------------------------------------------- /tablestore/server/hthrift/hbase/GoUnusedProtection__.go: -------------------------------------------------------------------------------- 1 | // Autogenerated by Thrift Compiler (0.13.0) 2 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 3 | 4 | package hbase 5 | 6 | var GoUnusedProtection__ int 7 | -------------------------------------------------------------------------------- /tablestore/util/bound.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "github.com/pingcap/tidb/types" 5 | ) 6 | 7 | type Bound struct { 8 | Less bool 9 | Desc bool 10 | Included bool 11 | Value *types.Datum 12 | } 13 | -------------------------------------------------------------------------------- /etc/zetta.toml: -------------------------------------------------------------------------------- 1 | [logging] 2 | log-level = "debug" 3 | log-file = "log/zetta.log" 4 | slow-query-file = "log/slowquery.log" 5 | maxsize = 500 #MB 6 | maxroll = 1 7 | 8 | [zetta.server] 9 | port = 4000 10 | 11 | [zetta.store.mocktikv] 12 | name = "mocktikv" 13 | store-path = "/tmp/tidb" 14 | 15 | [zetta.store.tikv] 16 | name = "tikv" 17 | store-path = "/tmp/tikv" 18 | 19 | 20 | -------------------------------------------------------------------------------- /tablestore/server/hthrift/hbase/HBase-consts.go: -------------------------------------------------------------------------------- 1 | // Autogenerated by Thrift Compiler (0.13.0) 2 | // DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING 3 | 4 | package hbase 5 | 6 | import ( 7 | "bytes" 8 | "context" 9 | "fmt" 10 | "github.com/apache/thrift/lib/go/thrift" 11 | "reflect" 12 | ) 13 | 14 | // (needed to ensure safety because of naive import list construction.) 15 | var _ = thrift.ZERO 16 | var _ = fmt.Printf 17 | var _ = context.Background 18 | var _ = reflect.DeepEqual 19 | var _ = bytes.Equal 20 | 21 | func init() { 22 | } 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Zetta - TableStore On TiKV 2 | 3 | ## Introduction 4 | 5 | Zetta is an open-source NoSQL database that supports Transaction and Cloud Spanner like API 6 | 7 | 8 | ## Quick start 9 | 10 | ```bash 11 | $> cd zetta 12 | $> make zetta-server 13 | $> ./bin/zetta-server -store tikv -path='{pd.address:port}' 14 | ``` 15 | 16 | ## Documentation 17 | 18 | 19 | > **Note:** 20 | > 21 | > We are working on this, but could use more help since it is a massive project. 22 | 23 | ## License 24 | 25 | Zetta is under the Apache 2.0 license. See the [LICENSE](./LICENSE) file for details. 26 | 27 | -------------------------------------------------------------------------------- /tablestore/mysql/server/buffered_read_conn.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "bufio" 5 | "net" 6 | ) 7 | 8 | const defaultReaderSize = 16 * 1024 9 | 10 | // bufferedReadConn is a net.Conn compatible structure that reads from bufio.Reader. 11 | type bufferedReadConn struct { 12 | net.Conn 13 | rb *bufio.Reader 14 | } 15 | 16 | func (conn bufferedReadConn) Read(b []byte) (n int, err error) { 17 | return conn.rb.Read(b) 18 | } 19 | 20 | func newBufferedReadConn(conn net.Conn) *bufferedReadConn { 21 | return &bufferedReadConn{ 22 | Conn: conn, 23 | rb: bufio.NewReaderSize(conn, defaultReaderSize), 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /tablestore/mysql/executor/aggfuncs/builder.go: -------------------------------------------------------------------------------- 1 | package aggfuncs 2 | 3 | import ( 4 | "github.com/pingcap/parser/ast" 5 | "github.com/zhihu/zetta/tablestore/mysql/expression/aggregation" 6 | "github.com/zhihu/zetta/tablestore/mysql/sctx" 7 | ) 8 | 9 | func Build(ctx sctx.Context, aggFuncDesc *aggregation.AggFuncDesc, ordinal int) AggFunc { 10 | switch aggFuncDesc.Name { 11 | case ast.AggFuncCount: 12 | return buildCount(aggFuncDesc, ordinal) 13 | } 14 | return nil 15 | } 16 | 17 | func buildCount(aggFuncDesc *aggregation.AggFuncDesc, ordinal int) AggFunc { 18 | base := baseAggFunc{ 19 | args: aggFuncDesc.Args, 20 | ordinal: ordinal, 21 | } 22 | return &partialCount{baseCount{base}} 23 | } 24 | -------------------------------------------------------------------------------- /tablestore/mysql/bexpression/util.go: -------------------------------------------------------------------------------- 1 | package expression 2 | 3 | // ExtractColumns extracts all columns from an expression. 4 | func ExtractColumns(expr Expression) []*Column { 5 | // Pre-allocate a slice to reduce allocation, 8 doesn't have special meaning. 6 | result := make([]*Column, 0, 8) 7 | return extractColumns(result, expr, nil) 8 | } 9 | 10 | func extractColumns(result []*Column, expr Expression, filter func(*Column) bool) []*Column { 11 | switch v := expr.(type) { 12 | case *Column: 13 | if filter == nil || filter(v) { 14 | result = append(result, v) 15 | } 16 | case *ScalarFunction: 17 | for _, arg := range v.GetArgs() { 18 | result = extractColumns(result, arg, filter) 19 | } 20 | } 21 | return result 22 | } 23 | -------------------------------------------------------------------------------- /tablestore/mysql/sqlexec/sql_exec.go: -------------------------------------------------------------------------------- 1 | package sqlexec 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/pingcap/parser/ast" 7 | "github.com/pingcap/tidb/util/chunk" 8 | ) 9 | 10 | type SQLExecutor interface { 11 | Execute(ctx context.Context, sql string) (RecordSet, error) 12 | Close() 13 | } 14 | 15 | // RecordSet is an abstract result set interface to help get data from Plan. 16 | type RecordSet interface { 17 | // Fields gets result fields. 18 | Fields() []*ast.ResultField 19 | 20 | // Next reads records into chunk. 21 | Next(ctx context.Context, req *chunk.Chunk) error 22 | 23 | // NewChunk create a chunk. 24 | NewChunk() *chunk.Chunk 25 | 26 | // Close closes the underlying iterator, call Next after Close will 27 | // restart the iteration. 28 | Close() error 29 | } 30 | -------------------------------------------------------------------------------- /tablestore/mysql/planner/common_plans.go: -------------------------------------------------------------------------------- 1 | package planner 2 | 3 | import ( 4 | "github.com/pingcap/parser/ast" 5 | "github.com/pingcap/tidb/types" 6 | "github.com/zhihu/zetta/pkg/model" 7 | "github.com/zhihu/zetta/tablestore/mysql/expression" 8 | ) 9 | 10 | // Simple represents a simple statement plan which doesn't need any optimization. 11 | type Simple struct { 12 | baseSchemaProducer 13 | 14 | Statement ast.StmtNode 15 | } 16 | 17 | type Set struct { 18 | baseSchemaProducer 19 | 20 | VarAssigns []*expression.VarAssignment 21 | } 22 | 23 | type SplitRegion struct { 24 | baseSchemaProducer 25 | 26 | IndexMeta *model.IndexMeta 27 | TableMeta *model.TableMeta 28 | Lower []types.Datum 29 | Upper []types.Datum 30 | ValueLists [][]types.Datum 31 | Num int 32 | } 33 | -------------------------------------------------------------------------------- /tablestore/server/hthrift/scanner.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package hthrift 15 | 16 | import "github.com/zhihu/zetta/tablestore/session" 17 | 18 | type TScanner struct { 19 | ResultIter session.RecordSet 20 | scannerID int64 21 | } 22 | -------------------------------------------------------------------------------- /tablestore/session/manager.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package session 15 | 16 | type ProcessInfo struct { 17 | } 18 | 19 | type SessionManager interface { 20 | ShowProcessList() map[uint64]*ProcessInfo 21 | GetProcessInfo(id uint64) (*ProcessInfo, bool) 22 | Kill(connectionID uint64, query bool) 23 | } 24 | -------------------------------------------------------------------------------- /tablestore/mysql/bexpression/aggregation/sum.go: -------------------------------------------------------------------------------- 1 | package aggregation 2 | 3 | import ( 4 | "github.com/pingcap/tidb/sessionctx/stmtctx" 5 | "github.com/pingcap/tidb/types" 6 | "github.com/pingcap/tidb/util/chunk" 7 | ) 8 | 9 | type sumFunction struct { 10 | aggFunction 11 | } 12 | 13 | // Update implements Aggregation interface. 14 | func (sf *sumFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error { 15 | return sf.updateSum(sc, evalCtx, row) 16 | } 17 | 18 | // GetResult implements Aggregation interface. 19 | func (sf *sumFunction) GetResult(evalCtx *AggEvaluateContext) (d types.Datum) { 20 | return evalCtx.Value 21 | } 22 | 23 | // GetPartialResult implements Aggregation interface. 24 | func (sf *sumFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { 25 | return []types.Datum{sf.GetResult(evalCtx)} 26 | } 27 | -------------------------------------------------------------------------------- /tablestore/util/testkit/testkit_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package testkit 15 | 16 | import ( 17 | "testing" 18 | 19 | "github.com/pingcap/check" 20 | ) 21 | 22 | var _ = check.Suite(&testKitSuite{}) 23 | 24 | func TestT(t *testing.T) { 25 | check.TestingT(t) 26 | } 27 | 28 | type testKitSuite struct { 29 | } 30 | 31 | func (s testKitSuite) TestSort(c *check.C) { 32 | result := &Result{ 33 | rows: [][]string{{"1", "1", "", ""}, {"2", "2", "2", "3"}}, 34 | c: c, 35 | comment: check.Commentf(""), 36 | } 37 | result.Sort().Check(Rows("1 1 ", "2 2 2 3")) 38 | } 39 | -------------------------------------------------------------------------------- /tablestore/rpc/request.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package rpc 15 | 16 | import ( 17 | tspb "github.com/zhihu/zetta-proto/pkg/tablestore" 18 | ) 19 | 20 | type MutationRequest interface { 21 | GetSession() string 22 | GetMutations() []*tspb.Mutation 23 | GetTable() string 24 | } 25 | 26 | type ReadRequest interface { 27 | GetSession() string 28 | GetTable() string 29 | } 30 | 31 | func ReadRequestFromProto(r *tspb.ReadRequest) *readRequest { 32 | return &readRequest{r} 33 | } 34 | 35 | type readRequest struct { 36 | *tspb.ReadRequest 37 | } 38 | 39 | type SparseReadRequest struct { 40 | *tspb.SparseReadRequest 41 | } 42 | -------------------------------------------------------------------------------- /tablestore/mysql/bexpression/aggregation/count.go: -------------------------------------------------------------------------------- 1 | package aggregation 2 | 3 | import ( 4 | "github.com/pingcap/tidb/sessionctx/stmtctx" 5 | "github.com/pingcap/tidb/types" 6 | "github.com/pingcap/tidb/util/chunk" 7 | ) 8 | 9 | type countFunction struct { 10 | aggFunction 11 | } 12 | 13 | // Update implements Aggregation interface. 14 | func (cf *countFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error { 15 | for _, a := range cf.Args { 16 | value, err := a.Eval(row) 17 | if err != nil { 18 | return err 19 | } 20 | if value.IsNull() { 21 | return nil 22 | } 23 | evalCtx.Count++ 24 | } 25 | return nil 26 | } 27 | 28 | func (cf *countFunction) ResetContext(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext) { 29 | evalCtx.Count = 0 30 | } 31 | 32 | // GetResult implements Aggregation interface. 33 | func (cf *countFunction) GetResult(evalCtx *AggEvaluateContext) (d types.Datum) { 34 | d.SetInt64(evalCtx.Count) 35 | return d 36 | } 37 | 38 | // GetPartialResult implements Aggregation interface. 39 | func (cf *countFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { 40 | return []types.Datum{cf.GetResult(evalCtx)} 41 | } 42 | -------------------------------------------------------------------------------- /pkg/meta/autoid/errors.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package autoid 15 | 16 | import ( 17 | "github.com/pingcap/parser/mysql" 18 | "github.com/pingcap/parser/terror" 19 | ) 20 | 21 | // Error instances. 22 | var ( 23 | ErrAutoincReadFailed = terror.ClassAutoid.New(mysql.ErrAutoincReadFailed, mysql.MySQLErrName[mysql.ErrAutoincReadFailed]) 24 | ErrWrongAutoKey = terror.ClassAutoid.New(mysql.ErrWrongAutoKey, mysql.MySQLErrName[mysql.ErrWrongAutoKey]) 25 | ) 26 | 27 | func init() { 28 | // Map error codes to mysql error codes. 29 | tableMySQLErrCodes := map[terror.ErrCode]uint16{ 30 | mysql.ErrAutoincReadFailed: mysql.ErrAutoincReadFailed, 31 | mysql.ErrWrongAutoKey: mysql.ErrWrongAutoKey, 32 | } 33 | terror.ErrClassToMySQLCodes[terror.ClassAutoid] = tableMySQLErrCodes 34 | } 35 | -------------------------------------------------------------------------------- /tablestore/mysql/bexpression/aggregation/max_min.go: -------------------------------------------------------------------------------- 1 | package aggregation 2 | 3 | import ( 4 | "github.com/pingcap/tidb/sessionctx/stmtctx" 5 | "github.com/pingcap/tidb/types" 6 | "github.com/pingcap/tidb/util/chunk" 7 | ) 8 | 9 | type maxMinFunction struct { 10 | aggFunction 11 | isMax bool 12 | } 13 | 14 | // GetResult implements Aggregation interface. 15 | func (mmf *maxMinFunction) GetResult(evalCtx *AggEvaluateContext) (d types.Datum) { 16 | return evalCtx.Value 17 | } 18 | 19 | // GetPartialResult implements Aggregation interface. 20 | func (mmf *maxMinFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { 21 | return []types.Datum{mmf.GetResult(evalCtx)} 22 | } 23 | 24 | // Update implements Aggregation interface. 25 | func (mmf *maxMinFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error { 26 | a := mmf.Args[0] 27 | value, err := a.Eval(row) 28 | if err != nil { 29 | return err 30 | } 31 | if evalCtx.Value.IsNull() { 32 | value.Copy(&evalCtx.Value) 33 | } 34 | if value.IsNull() { 35 | return nil 36 | } 37 | var c int 38 | c, err = evalCtx.Value.CompareDatum(sc, &value) 39 | if err != nil { 40 | return err 41 | } 42 | if (mmf.isMax && c == -1) || (!mmf.isMax && c == 1) { 43 | value.Copy(&evalCtx.Value) 44 | } 45 | return nil 46 | } 47 | -------------------------------------------------------------------------------- /tablestore/mysql/server/tokenlimiter.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package server 15 | 16 | // Token is used as a permission to keep on running. 17 | type Token struct { 18 | } 19 | 20 | // TokenLimiter is used to limit the number of concurrent tasks. 21 | type TokenLimiter struct { 22 | count uint 23 | ch chan *Token 24 | } 25 | 26 | // Put releases the token. 27 | func (tl *TokenLimiter) Put(tk *Token) { 28 | tl.ch <- tk 29 | } 30 | 31 | // Get obtains a token. 32 | func (tl *TokenLimiter) Get() *Token { 33 | return <-tl.ch 34 | } 35 | 36 | // NewTokenLimiter creates a TokenLimiter with count tokens. 37 | func NewTokenLimiter(count uint) *TokenLimiter { 38 | tl := &TokenLimiter{count: count, ch: make(chan *Token, count)} 39 | for i := uint(0); i < count; i++ { 40 | tl.ch <- &Token{} 41 | } 42 | 43 | return tl 44 | } 45 | -------------------------------------------------------------------------------- /tablestore/mysql/executor/aggfuncs/func_count.go: -------------------------------------------------------------------------------- 1 | package aggfuncs 2 | 3 | import ( 4 | "github.com/pingcap/tidb/util/chunk" 5 | "github.com/zhihu/zetta/tablestore/mysql/sctx" 6 | ) 7 | 8 | type baseCount struct { 9 | baseAggFunc 10 | } 11 | 12 | type partialResult4Count = int64 13 | 14 | func (e *baseCount) AllocPartialResult() PartialResult { 15 | return PartialResult(new(partialResult4Count)) 16 | } 17 | 18 | func (e *baseCount) ResetPartialResult(pr PartialResult) { 19 | p := (*partialResult4Count)(pr) 20 | *p = 0 21 | } 22 | 23 | func (e *baseCount) AppendFinalResult2Chunk(sctx sctx.Context, pr PartialResult, chk *chunk.Chunk) error { 24 | p := (*partialResult4Count)(pr) 25 | chk.AppendInt64(e.ordinal, *p) 26 | return nil 27 | } 28 | 29 | type partialCount struct { 30 | baseCount 31 | } 32 | 33 | func (e *partialCount) UpdatePartialResult(sctx sctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error { 34 | p := (*partialResult4Count)(pr) 35 | for _, row := range rowsInGroup { 36 | /* 37 | fmt.Println("get row value:", row.GetInt64(0)) 38 | input, isNull, err := e.args[0].EvalInt(sctx, row) 39 | if err != nil { 40 | return err 41 | } 42 | if isNull { 43 | continue 44 | } 45 | fmt.Println("input count:", input) 46 | */ 47 | *p += row.GetInt64(0) 48 | } 49 | //*p = *p + int64(len(rowsInGroup)) 50 | return nil 51 | } 52 | -------------------------------------------------------------------------------- /tablestore/mysql/sctx/context.go: -------------------------------------------------------------------------------- 1 | package sctx 2 | 3 | import ( 4 | "context" 5 | //"fmt" 6 | 7 | //"github.com/pingcap/tidb/sessionctx/variable" 8 | 9 | "github.com/pingcap/tidb/kv" 10 | "github.com/pingcap/tidb/sessionctx" 11 | ) 12 | 13 | type Context interface { 14 | sessionctx.Context 15 | // NewTxn creates a new transaction for further execution. 16 | // If old transaction is valid, it is committed first. 17 | // It's used in BEGIN statement and DDL statements to commit old transaction. 18 | //NewTxn(context.Context) error 19 | 20 | // Txn returns the current transaction which is created before executing a statement. 21 | // The returned kv.Transaction is not nil, but it maybe pending or invalid. 22 | // If the active parameter is true, call this function will wait for the pending txn 23 | // to become valid. 24 | GetTxn(active bool, raw bool) (kv.Transaction, error) 25 | CommitTxnWrapper(ctx context.Context) error 26 | 27 | // SetValue saves a value associated with this context for key. 28 | //SetValue(key fmt.Stringer, value interface{}) 29 | 30 | // Value returns the value associated with this context for key. 31 | //Value(key fmt.Stringer) interface{} 32 | //GetSessionVars() *variable.SessionVars 33 | 34 | // PrepareTSFuture uses to prepare timestamp by future. 35 | //PrepareTSFuture(ctx context.Context) 36 | //GetStore() kv.Storage 37 | } 38 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/aggregation/sum.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package aggregation 15 | 16 | import ( 17 | "github.com/pingcap/tidb/sessionctx/stmtctx" 18 | "github.com/pingcap/tidb/types" 19 | "github.com/pingcap/tidb/util/chunk" 20 | ) 21 | 22 | type sumFunction struct { 23 | aggFunction 24 | } 25 | 26 | // Update implements Aggregation interface. 27 | func (sf *sumFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error { 28 | return sf.updateSum(sc, evalCtx, row) 29 | } 30 | 31 | // GetResult implements Aggregation interface. 32 | func (sf *sumFunction) GetResult(evalCtx *AggEvaluateContext) (d types.Datum) { 33 | return evalCtx.Value 34 | } 35 | 36 | // GetPartialResult implements Aggregation interface. 37 | func (sf *sumFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { 38 | return []types.Datum{sf.GetResult(evalCtx)} 39 | } 40 | -------------------------------------------------------------------------------- /tablestore/mysql/bexpression/buildin_info.go: -------------------------------------------------------------------------------- 1 | package expression 2 | 3 | import ( 4 | "github.com/pingcap/tidb/types" 5 | "github.com/pingcap/tidb/util/chunk" 6 | "github.com/zhihu/zetta/tablestore/mysql/sctx" 7 | ) 8 | 9 | var ( 10 | _ functionClass = &databaseFunctionClass{} 11 | ) 12 | 13 | var ( 14 | _ builtinFunc = &builtinDatabaseSig{} 15 | ) 16 | 17 | type databaseFunctionClass struct { 18 | baseFunctionClass 19 | } 20 | 21 | func (c *databaseFunctionClass) getFunction(ctx sctx.Context, args []Expression) (builtinFunc, error) { 22 | if err := c.verifyArgs(args); err != nil { 23 | return nil, err 24 | } 25 | bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETString) 26 | if err != nil { 27 | return nil, err 28 | } 29 | bf.tp.Charset, bf.tp.Collate = ctx.GetSessionVars().GetCharsetInfo() 30 | bf.tp.Flen = 64 31 | sig := &builtinDatabaseSig{bf} 32 | return sig, nil 33 | } 34 | 35 | type builtinDatabaseSig struct { 36 | baseBuiltinFunc 37 | } 38 | 39 | func (b *builtinDatabaseSig) Clone() builtinFunc { 40 | newSig := &builtinDatabaseSig{} 41 | newSig.cloneFrom(&b.baseBuiltinFunc) 42 | return newSig 43 | } 44 | 45 | // evalString evals a builtinDatabaseSig. 46 | // See https://dev.mysql.com/doc/refman/5.7/en/information-functions.html 47 | func (b *builtinDatabaseSig) evalString(row chunk.Row) (string, bool, error) { 48 | currentDB := b.ctx.GetSessionVars().CurrentDB 49 | return currentDB, currentDB == "", nil 50 | } 51 | -------------------------------------------------------------------------------- /tablestore/domain/session_pool.go: -------------------------------------------------------------------------------- 1 | package domain 2 | 3 | import ( 4 | "errors" 5 | "sync" 6 | 7 | "github.com/ngaut/pools" 8 | ) 9 | 10 | type sessionPool struct { 11 | resources chan pools.Resource 12 | factory pools.Factory 13 | mu struct { 14 | sync.RWMutex 15 | closed bool 16 | } 17 | } 18 | 19 | func newSessionPool(cap int, factory pools.Factory) *sessionPool { 20 | return &sessionPool{ 21 | resources: make(chan pools.Resource, cap), 22 | factory: factory, 23 | } 24 | } 25 | 26 | func (p *sessionPool) Get() (resource pools.Resource, err error) { 27 | var ok bool 28 | select { 29 | case resource, ok = <-p.resources: 30 | if !ok { 31 | err = errors.New("session pool closed") 32 | } 33 | default: 34 | resource, err = p.factory() 35 | } 36 | return 37 | } 38 | 39 | func (p *sessionPool) Put(resource pools.Resource) { 40 | p.mu.RLock() 41 | defer p.mu.RUnlock() 42 | if p.mu.closed { 43 | resource.Close() 44 | return 45 | } 46 | 47 | select { 48 | case p.resources <- resource: 49 | default: 50 | resource.Close() 51 | } 52 | } 53 | func (p *sessionPool) Close() { 54 | p.mu.Lock() 55 | if p.mu.closed { 56 | p.mu.Unlock() 57 | return 58 | } 59 | p.mu.closed = true 60 | close(p.resources) 61 | p.mu.Unlock() 62 | 63 | for r := range p.resources { 64 | r.Close() 65 | } 66 | } 67 | 68 | // SysSessionPool returns the system session pool. 69 | func (do *Domain) SysSessionPool() *sessionPool { 70 | return do.sysSessionPool 71 | } 72 | -------------------------------------------------------------------------------- /tablestore/util/pdhttp/rule.go: -------------------------------------------------------------------------------- 1 | package pdhttp 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "net/http" 7 | "path" 8 | 9 | "github.com/pingcap/pd/v4/server/schedule/placement" 10 | ) 11 | 12 | var ( 13 | configPrefix = "pd/api/v1/config" 14 | schedulePrefix = "pd/api/v1/config/schedule" 15 | replicatePrefix = "pd/api/v1/config/replicate" 16 | labelPropertyPrefix = "pd/api/v1/config/label-property" 17 | clusterVersionPrefix = "pd/api/v1/config/cluster-version" 18 | rulesPrefix = "pd/api/v1/config/rules" 19 | rulesBatchPrefix = "pd/api/v1/config/rules/batch" 20 | rulePrefix = "pd/api/v1/config/rule" 21 | ruleGroupPrefix = "pd/api/v1/config/rule_group" 22 | ruleGroupsPrefix = "pd/api/v1/config/rule_groups" 23 | replicationModePrefix = "pd/api/v1/config/replication-mode" 24 | ruleBundlePrefix = "pd/api/v1/config/placement-rule" 25 | ) 26 | 27 | func PutPlacementRules(rules []*placement.Rule) error { 28 | var err error 29 | for _, r := range rules { 30 | if r.Count > 0 { 31 | b, _ := json.Marshal(r) 32 | _, err = doRequest(rulePrefix, http.MethodPost, WithBody("application/json", bytes.NewBuffer(b))) 33 | if err != nil { 34 | return err 35 | } 36 | } 37 | } 38 | for _, r := range rules { 39 | if r.Count == 0 { 40 | _, err = doRequest(path.Join(rulePrefix, r.GroupID, r.ID), http.MethodDelete) 41 | if err != nil { 42 | return err 43 | } 44 | } 45 | } 46 | return err 47 | } 48 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PROJECT=zetta 2 | 3 | ZETTA_PKG := github.com/zhihu/zetta 4 | GOPATH ?= $(shell go env GOPATH) 5 | 6 | PACKAGES := go list ./... 7 | GOCHECKER := awk '{ print } END { if (NR > 0) { exit 1 } }' 8 | 9 | GO:= GOPRIVATE="github.com/pingcap" go 10 | 11 | LDFLAGS += -X "$(ZETTA_PKG)/tablestore.ReleaseVersion=$(shell git describe --tags --dirty)" 12 | LDFLAGS += -X "$(ZETTA_PKG)/tablestore.BuildTS=$(shell date -u '+%Y-%m-%d %I:%M:%S')" 13 | LDFLAGS += -X "$(ZETTA_PKG)/tablestore.GitHash=$(shell git rev-parse HEAD)" 14 | LDFLAGS += -X "$(ZETTA_PKG)/tablestore.GitBranch=$(shell git rev-parse --abbrev-ref HEAD)" 15 | 16 | GOVER_MAJOR := $(shell go version | sed -E -e "s/.*go([0-9]+)[.]([0-9]+).*/\1/") 17 | GOVER_MINOR := $(shell go version | sed -E -e "s/.*go([0-9]+)[.]([0-9]+).*/\2/") 18 | GO111 := $(shell [ $(GOVER_MAJOR) -gt 1 ] || [ $(GOVER_MAJOR) -eq 1 ] && [ $(GOVER_MINOR) -ge 11 ]; echo $$?) 19 | ifeq ($(GO111), 1) 20 | $(error "go below 1.11 does not support modules") 21 | endif 22 | 23 | default: build 24 | 25 | all: dev 26 | 27 | dev: build tools check test 28 | 29 | ci: build check basic-test 30 | 31 | build: zetta-server 32 | 33 | zetta-server: export GO111MODULE=on 34 | zetta-server: 35 | ifeq ("$(WITH_RACE)", "1") 36 | $(GO) build -race -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o bin/zetta-server cmd/zetta-server/main.go 37 | else 38 | $(GO) build -gcflags '$(GCFLAGS)' -ldflags '$(LDFLAGS)' -o bin/zetta-server cmd/zetta-server/main.go 39 | endif 40 | 41 | .PHONY: all ci vendor 42 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/rand.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package expression 15 | 16 | import "time" 17 | 18 | const maxRandValue = 0x3FFFFFFF 19 | 20 | // MysqlRng is random number generator and this implementation is ported from MySQL. 21 | // See https://github.com/tikv/tikv/pull/6117#issuecomment-562489078. 22 | type MysqlRng struct { 23 | seed1 uint32 24 | seed2 uint32 25 | } 26 | 27 | // NewWithSeed create a rng with random seed. 28 | func NewWithSeed(seed int64) *MysqlRng { 29 | seed1 := uint32(seed*0x10001+55555555) % maxRandValue 30 | seed2 := uint32(seed*0x10000001) % maxRandValue 31 | return &MysqlRng{seed1: seed1, seed2: seed2} 32 | } 33 | 34 | // NewWithTime create a rng with time stamp. 35 | func NewWithTime() *MysqlRng { 36 | return NewWithSeed(time.Now().UnixNano()) 37 | } 38 | 39 | // Gen will generate random number. 40 | func (rng *MysqlRng) Gen() float64 { 41 | rng.seed1 = (rng.seed1*3 + rng.seed2) % maxRandValue 42 | rng.seed2 = (rng.seed1 + rng.seed2 + 33) % maxRandValue 43 | return float64(rng.seed1) / float64(maxRandValue) 44 | } 45 | -------------------------------------------------------------------------------- /tablestore/ddl/rollback.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package ddl 15 | 16 | import ( 17 | "github.com/pingcap/tidb/util/logutil" 18 | "github.com/zhihu/zetta/pkg/meta" 19 | "github.com/zhihu/zetta/pkg/model" 20 | "go.uber.org/zap" 21 | ) 22 | 23 | func convertJob2RollbackJob(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, err error) { 24 | switch job.Type { 25 | // When some types of DDL job failed, we need to roll back, or dirty state would be remained. 26 | // Add more later. 27 | default: 28 | job.State = model.JobStateCancelled 29 | err = errCancelledDDLJob 30 | } 31 | 32 | if err != nil { 33 | if job.State != model.JobStateRollingback && job.State != model.JobStateCancelled { 34 | logutil.Logger(w.logCtx).Error("[ddl] run DDL job failed", zap.String("job", job.String()), zap.Error(err)) 35 | } else { 36 | logutil.Logger(w.logCtx).Info("[ddl] the DDL job is cancelled normally", zap.String("job", job.String()), zap.Error(err)) 37 | } 38 | 39 | job.Error = toTError(err) 40 | job.ErrorCount++ 41 | } 42 | return 43 | } 44 | -------------------------------------------------------------------------------- /tablestore/rpc/response.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package rpc 15 | 16 | import ( 17 | "time" 18 | 19 | structpb "github.com/gogo/protobuf/types" 20 | tspb "github.com/zhihu/zetta-proto/pkg/tablestore" 21 | ) 22 | 23 | type Response interface { 24 | CommitTimestamp() time.Time 25 | } 26 | 27 | func TimestampProto(t time.Time) *structpb.Timestamp { 28 | if t.IsZero() { 29 | return nil 30 | } 31 | ts, err := structpb.TimestampProto(t) 32 | if err != nil { 33 | return nil 34 | } 35 | return ts 36 | } 37 | 38 | func BuildMutationResponse(r Response) *tspb.MutationResponse { 39 | resp := &tspb.MutationResponse{ 40 | CommitTimestamp: TimestampProto(r.CommitTimestamp()), 41 | } 42 | return resp 43 | } 44 | 45 | func BuildCommitResponse(r Response) *tspb.CommitResponse { 46 | resp := &tspb.CommitResponse{ 47 | CommitTimestamp: TimestampProto(r.CommitTimestamp()), 48 | } 49 | return resp 50 | } 51 | 52 | type CommitTimeResp struct { 53 | CommitTs time.Time 54 | } 55 | 56 | func (r CommitTimeResp) CommitTimestamp() time.Time { 57 | return r.CommitTs 58 | } 59 | -------------------------------------------------------------------------------- /pkg/metrics/oracles.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // Copyright 2017 PingCAP, Inc. 15 | // 16 | // Licensed under the Apache License, Version 2.0 (the "License"); 17 | // you may not use this file except in compliance with the License. 18 | // You may obtain a copy of the License at 19 | // 20 | // http://www.apache.org/licenses/LICENSE-2.0 21 | // 22 | // Unless required by applicable law or agreed to in writing, software 23 | // distributed under the License is distributed on an "AS IS" BASIS, 24 | // See the License for the specific language governing permissions and 25 | // limitations under the License. 26 | 27 | package metrics 28 | 29 | import ( 30 | "github.com/prometheus/client_golang/prometheus" 31 | ) 32 | 33 | // Metrics for the timestamp oracle. 34 | var ( 35 | TSFutureWaitDuration = prometheus.NewHistogram( 36 | prometheus.HistogramOpts{ 37 | Namespace: "zetta", 38 | Subsystem: "pdclient", 39 | Name: "ts_future_wait_seconds", 40 | Help: "Bucketed histogram of seconds cost for waiting timestamp future.", 41 | Buckets: prometheus.ExponentialBuckets(0.000005, 2, 20), // 5us ~ 5s 42 | }) 43 | ) 44 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/aggregation/explain.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package aggregation 15 | 16 | import ( 17 | "bytes" 18 | "fmt" 19 | 20 | "github.com/pingcap/parser/ast" 21 | ) 22 | 23 | // ExplainAggFunc generates explain information for a aggregation function. 24 | func ExplainAggFunc(agg *AggFuncDesc) string { 25 | var buffer bytes.Buffer 26 | fmt.Fprintf(&buffer, "%s(", agg.Name) 27 | if agg.HasDistinct { 28 | buffer.WriteString("distinct ") 29 | } 30 | for i, arg := range agg.Args { 31 | if agg.Name == ast.AggFuncGroupConcat && i == len(agg.Args)-1 { 32 | if len(agg.OrderByItems) > 0 { 33 | buffer.WriteString(" order by ") 34 | for i, item := range agg.OrderByItems { 35 | order := "asc" 36 | if item.Desc { 37 | order = "desc" 38 | } 39 | fmt.Fprintf(&buffer, "%s %s", item.Expr.ExplainInfo(), order) 40 | if i+1 < len(agg.OrderByItems) { 41 | buffer.WriteString(", ") 42 | } 43 | } 44 | } 45 | buffer.WriteString(" separator ") 46 | } else if i != 0 { 47 | buffer.WriteString(", ") 48 | } 49 | buffer.WriteString(arg.ExplainInfo()) 50 | } 51 | buffer.WriteString(")") 52 | return buffer.String() 53 | } 54 | -------------------------------------------------------------------------------- /tablestore/mysql/bexpression/aggregation/descriptor.go: -------------------------------------------------------------------------------- 1 | package aggregation 2 | 3 | import ( 4 | "github.com/pingcap/parser/ast" 5 | "github.com/zhihu/zetta/tablestore/mysql/expression" 6 | "github.com/zhihu/zetta/tablestore/mysql/sctx" 7 | ) 8 | 9 | // AggFuncDesc describes an aggregation function signature, only used in planner. 10 | type AggFuncDesc struct { 11 | baseFuncDesc 12 | // Mode represents the execution mode of the aggregation function. 13 | //Mode AggFunctionMode 14 | // HasDistinct represents whether the aggregation function contains distinct attribute. 15 | HasDistinct bool 16 | // OrderByItems represents the order by clause used in GROUP_CONCAT 17 | //OrderByItems []*util.ByItems 18 | } 19 | 20 | // NewAggFuncDesc creates an aggregation function signature descriptor. 21 | func NewAggFuncDesc(ctx sctx.Context, name string, args []expression.Expression, hasDistinct bool) (*AggFuncDesc, error) { 22 | b, err := newBaseFuncDesc(ctx, name, args) 23 | if err != nil { 24 | return nil, err 25 | } 26 | return &AggFuncDesc{baseFuncDesc: b, HasDistinct: hasDistinct}, nil 27 | } 28 | 29 | // GetAggFunc gets an evaluator according to the aggregation function signature. 30 | func (a *AggFuncDesc) GetAggFunc(ctx sctx.Context) Aggregation { 31 | aggFunc := aggFunction{AggFuncDesc: a} 32 | switch a.Name { 33 | case ast.AggFuncSum: 34 | return &sumFunction{aggFunction: aggFunc} 35 | case ast.AggFuncCount: 36 | return &countFunction{aggFunction: aggFunc} 37 | case ast.AggFuncAvg: 38 | return &avgFunction{aggFunction: aggFunc} 39 | case ast.AggFuncMax: 40 | return &maxMinFunction{aggFunction: aggFunc, isMax: true} 41 | case ast.AggFuncMin: 42 | return &maxMinFunction{aggFunction: aggFunc, isMax: false} 43 | default: 44 | panic("unsupported agg function") 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /tablestore/mysql/planner/util.go: -------------------------------------------------------------------------------- 1 | package planner 2 | 3 | import ( 4 | "github.com/pingcap/parser/ast" 5 | "github.com/zhihu/zetta/tablestore/mysql/expression" 6 | "github.com/zhihu/zetta/tablestore/mysql/sctx" 7 | ) 8 | 9 | // AggregateFuncExtractor visits Expr tree. 10 | // It converts ColunmNameExpr to AggregateFuncExpr and collects AggregateFuncExpr. 11 | type AggregateFuncExtractor struct { 12 | inAggregateFuncExpr bool 13 | // AggFuncs is the collected AggregateFuncExprs. 14 | AggFuncs []*ast.AggregateFuncExpr 15 | } 16 | 17 | // Enter implements Visitor interface. 18 | func (a *AggregateFuncExtractor) Enter(n ast.Node) (ast.Node, bool) { 19 | switch n.(type) { 20 | case *ast.AggregateFuncExpr: 21 | a.inAggregateFuncExpr = true 22 | case *ast.SelectStmt, *ast.UnionStmt: 23 | return n, true 24 | } 25 | return n, false 26 | } 27 | 28 | // Leave implements Visitor interface. 29 | func (a *AggregateFuncExtractor) Leave(n ast.Node) (ast.Node, bool) { 30 | switch v := n.(type) { 31 | case *ast.AggregateFuncExpr: 32 | a.inAggregateFuncExpr = false 33 | a.AggFuncs = append(a.AggFuncs, v) 34 | } 35 | return n, true 36 | } 37 | 38 | // ByItems wraps a "by" item. 39 | type ByItems struct { 40 | Expr expression.Expression 41 | Desc bool 42 | } 43 | 44 | // String implements fmt.Stringer interface. 45 | //func (by *ByItems) String() string { 46 | // if by.Desc { 47 | //return fmt.Sprintf("%s true", by.Expr) 48 | //} 49 | //return by.Expr.String() 50 | //} 51 | 52 | // Clone makes a copy of ByItems. 53 | func (by *ByItems) Clone() *ByItems { 54 | return &ByItems{Expr: by.Expr.Clone(), Desc: by.Desc} 55 | } 56 | 57 | // Equal checks whether two ByItems are equal. 58 | func (by *ByItems) Equal(ctx sctx.Context, other *ByItems) bool { 59 | return by.Expr.Equal(ctx, other.Expr) && by.Desc == other.Desc 60 | } 61 | -------------------------------------------------------------------------------- /tablestore/ddl/session_pool.go: -------------------------------------------------------------------------------- 1 | package ddl 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/ngaut/pools" 7 | "github.com/pingcap/errors" 8 | "github.com/pingcap/tidb/util/logutil" 9 | "github.com/zhihu/zetta/tablestore/mysql/sqlexec" 10 | ) 11 | 12 | // sessionPool is used to new session. 13 | type sessionPool struct { 14 | mu struct { 15 | sync.Mutex 16 | closed bool 17 | } 18 | resPool *pools.ResourcePool 19 | } 20 | 21 | func newSessionPool(resPool *pools.ResourcePool) *sessionPool { 22 | return &sessionPool{resPool: resPool} 23 | } 24 | 25 | // get gets sessionctx from context resource pool. 26 | // Please remember to call put after you finished using sessionctx. 27 | func (sg *sessionPool) get() (sqlexec.SQLExecutor, error) { 28 | sg.mu.Lock() 29 | if sg.mu.closed { 30 | sg.mu.Unlock() 31 | return nil, errors.Errorf("sessionPool is closed.") 32 | } 33 | sg.mu.Unlock() 34 | 35 | // no need to protect sg.resPool 36 | resource, err := sg.resPool.Get() 37 | if err != nil { 38 | return nil, errors.Trace(err) 39 | } 40 | 41 | se := resource.(sqlexec.SQLExecutor) 42 | return se, nil 43 | } 44 | 45 | // put returns sessionctx to context resource pool. 46 | func (sg *sessionPool) put(resource pools.Resource) { 47 | if sg.resPool == nil { 48 | return 49 | } 50 | 51 | // no need to protect sg.resPool, even the sg.resPool is closed, the ctx still need to 52 | // put into resPool, because when resPool is closing, it will wait all the ctx returns, then resPool finish closing. 53 | sg.resPool.Put(resource) 54 | } 55 | 56 | // close clean up the sessionPool. 57 | func (sg *sessionPool) close() { 58 | sg.mu.Lock() 59 | defer sg.mu.Unlock() 60 | // prevent closing resPool twice. 61 | if sg.mu.closed || sg.resPool == nil { 62 | return 63 | } 64 | logutil.BgLogger().Info("[ddl] closing sessionPool") 65 | sg.resPool.Close() 66 | sg.mu.closed = true 67 | } 68 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/aggregation/max_min.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package aggregation 15 | 16 | import ( 17 | "github.com/pingcap/tidb/sessionctx/stmtctx" 18 | "github.com/pingcap/tidb/types" 19 | "github.com/pingcap/tidb/util/chunk" 20 | ) 21 | 22 | type maxMinFunction struct { 23 | aggFunction 24 | isMax bool 25 | } 26 | 27 | // GetResult implements Aggregation interface. 28 | func (mmf *maxMinFunction) GetResult(evalCtx *AggEvaluateContext) (d types.Datum) { 29 | return evalCtx.Value 30 | } 31 | 32 | // GetPartialResult implements Aggregation interface. 33 | func (mmf *maxMinFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { 34 | return []types.Datum{mmf.GetResult(evalCtx)} 35 | } 36 | 37 | // Update implements Aggregation interface. 38 | func (mmf *maxMinFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error { 39 | a := mmf.Args[0] 40 | value, err := a.Eval(row) 41 | if err != nil { 42 | return err 43 | } 44 | if evalCtx.Value.IsNull() { 45 | value.Copy(&evalCtx.Value) 46 | } 47 | if value.IsNull() { 48 | return nil 49 | } 50 | var c int 51 | c, err = evalCtx.Value.CompareDatum(sc, &value) 52 | if err != nil { 53 | return err 54 | } 55 | if (mmf.isMax && c == -1) || (!mmf.isMax && c == 1) { 56 | value.Copy(&evalCtx.Value) 57 | } 58 | return nil 59 | } 60 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/aggregation/first_row.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package aggregation 15 | 16 | import ( 17 | "github.com/pingcap/errors" 18 | "github.com/pingcap/tidb/sessionctx/stmtctx" 19 | "github.com/pingcap/tidb/types" 20 | "github.com/pingcap/tidb/util/chunk" 21 | ) 22 | 23 | type firstRowFunction struct { 24 | aggFunction 25 | } 26 | 27 | // Update implements Aggregation interface. 28 | func (ff *firstRowFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error { 29 | if evalCtx.GotFirstRow { 30 | return nil 31 | } 32 | if len(ff.Args) != 1 { 33 | return errors.New("Wrong number of args for AggFuncFirstRow") 34 | } 35 | value, err := ff.Args[0].Eval(row) 36 | if err != nil { 37 | return err 38 | } 39 | value.Copy(&evalCtx.Value) 40 | evalCtx.GotFirstRow = true 41 | return nil 42 | } 43 | 44 | // GetResult implements Aggregation interface. 45 | func (ff *firstRowFunction) GetResult(evalCtx *AggEvaluateContext) types.Datum { 46 | return evalCtx.Value 47 | } 48 | 49 | func (ff *firstRowFunction) ResetContext(_ *stmtctx.StatementContext, evalCtx *AggEvaluateContext) { 50 | evalCtx.GotFirstRow = false 51 | } 52 | 53 | // GetPartialResult implements Aggregation interface. 54 | func (ff *firstRowFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { 55 | return []types.Datum{ff.GetResult(evalCtx)} 56 | } 57 | -------------------------------------------------------------------------------- /tablestore/domain/domainctx.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // Copyright 2015 PingCAP, Inc. 15 | // 16 | // Licensed under the Apache License, Version 2.0 (the "License"); 17 | // you may not use this file except in compliance with the License. 18 | // You may obtain a copy of the License at 19 | // 20 | // http://www.apache.org/licenses/LICENSE-2.0 21 | // 22 | // Unless required by applicable law or agreed to in writing, software 23 | // distributed under the License is distributed on an "AS IS" BASIS, 24 | // See the License for the specific language governing permissions and 25 | // limitations under the License. 26 | 27 | package domain 28 | 29 | import "github.com/zhihu/zetta/tablestore/sessionctx" 30 | 31 | // domainKeyType is a dummy type to avoid naming collision in context. 32 | type domainKeyType int 33 | 34 | // String defines a Stringer function for debugging and pretty printing. 35 | func (k domainKeyType) String() string { 36 | return "domain" 37 | } 38 | 39 | const domainKey domainKeyType = 0 40 | 41 | // BindDomain binds domain to context. 42 | func BindDomain(ctx sessionctx.Context, domain *Domain) { 43 | ctx.SetValue(domainKey, domain) 44 | } 45 | 46 | // GetDomain gets domain from context. 47 | func GetDomain(ctx sessionctx.Context) *Domain { 48 | v, ok := ctx.Value(domainKey).(*Domain) 49 | if !ok { 50 | return nil 51 | } 52 | return v 53 | } 54 | -------------------------------------------------------------------------------- /pkg/metrics/domain.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package metrics 15 | 16 | import ( 17 | "github.com/prometheus/client_golang/prometheus" 18 | ) 19 | 20 | // Metrics for the domain package. 21 | var ( 22 | // LoadSchemaCounter records the counter of load schema. 23 | LoadSchemaCounter = prometheus.NewCounterVec( 24 | prometheus.CounterOpts{ 25 | Namespace: "zetta", 26 | Subsystem: "domain", 27 | Name: "load_schema_total", 28 | Help: "Counter of load schema", 29 | }, []string{LblType}) 30 | 31 | // LoadSchemaDuration records the duration of load schema. 32 | LoadSchemaDuration = prometheus.NewHistogram( 33 | prometheus.HistogramOpts{ 34 | Namespace: "zetta", 35 | Subsystem: "domain", 36 | Name: "load_schema_duration_seconds", 37 | Help: "Bucketed histogram of processing time (s) in load schema.", 38 | Buckets: prometheus.ExponentialBuckets(0.001, 2, 20), // 1ms ~ 4000s 39 | }) 40 | 41 | // LoadPrivilegeCounter records the counter of load privilege. 42 | LoadPrivilegeCounter = prometheus.NewCounterVec( 43 | prometheus.CounterOpts{ 44 | Namespace: "zetta", 45 | Subsystem: "domain", 46 | Name: "load_privilege_total", 47 | Help: "Counter of load privilege", 48 | }, []string{LblType}) 49 | 50 | SchemaValidatorStop = "stop" 51 | SchemaValidatorRestart = "restart" 52 | SchemaValidatorReset = "reset" 53 | SchemaValidatorCacheEmpty = "cache_empty" 54 | SchemaValidatorCacheMiss = "cache_miss" 55 | ) 56 | -------------------------------------------------------------------------------- /pkg/codec/bytes_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package codec 15 | 16 | import ( 17 | . "github.com/pingcap/check" 18 | "github.com/pingcap/tidb/util/testleak" 19 | ) 20 | 21 | var _ = Suite(&testBytesSuite{}) 22 | 23 | type testBytesSuite struct { 24 | } 25 | 26 | func (s *testBytesSuite) TestBytesCodec(c *C) { 27 | defer testleak.AfterTest(c)() 28 | inputs := []struct { 29 | enc []byte 30 | dec []byte 31 | }{ 32 | {[]byte{}, []byte{0, 0, 0, 0, 0, 0, 0, 0, 0}}, 33 | {[]byte{0}, []byte{0, 0, 0, 0, 0, 0, 0, 0, 1}}, 34 | {[]byte{1, 2, 3}, []byte{1, 2, 3, 0, 0, 0, 0, 0, 3}}, 35 | {[]byte{1, 2, 3, 0}, []byte{1, 2, 3, 0, 0, 0, 0, 0, 4}}, 36 | {[]byte{1, 2, 3, 4, 5, 6, 7}, []byte{1, 2, 3, 4, 5, 6, 7, 0, 7}}, 37 | {[]byte{0, 0, 0, 0, 0, 0, 0, 0}, []byte{0, 0, 0, 0, 0, 0, 0, 0, 8}}, 38 | {[]byte{1, 2, 3, 4, 5, 6, 7, 8}, []byte{1, 2, 3, 4, 5, 6, 7, 8, 8}}, 39 | {[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9}, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 0, 0, 0, 0, 0, 0, 0, 1}}, 40 | } 41 | 42 | for _, input := range inputs { 43 | b := EncodeBytes(nil, input.enc) 44 | c.Assert(b, BytesEquals, input.dec) 45 | _, d, err := DecodeBytes(b, nil) 46 | c.Assert(err, IsNil) 47 | c.Assert(d, BytesEquals, input.enc) 48 | } 49 | 50 | // Test error decode. 51 | errInputs := [][]byte{ 52 | {1, 2, 3, 4}, 53 | {0, 0, 0, 0, 0, 0, 0, 7}, 54 | {0, 0, 0, 0, 0, 0, 0, 0, 10}, 55 | {1, 2, 0, 0, 0, 0, 0, 1, 3}, 56 | } 57 | 58 | for _, input := range errInputs { 59 | _, _, err := DecodeBytes(input, nil) 60 | c.Assert(err, NotNil) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /pkg/structure/structure.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package structure 15 | 16 | import ( 17 | "github.com/pingcap/parser/terror" 18 | "github.com/pingcap/tidb/kv" 19 | ) 20 | 21 | // structure error codes. 22 | const ( 23 | codeInvalidHashKeyFlag terror.ErrCode = 1 24 | codeInvalidHashKeyPrefix = 2 25 | codeInvalidListIndex = 3 26 | codeInvalidListMetaData = 4 27 | codeWriteOnSnapshot = 5 28 | ) 29 | 30 | var ( 31 | errInvalidHashKeyFlag = terror.ClassStructure.New(codeInvalidHashKeyFlag, "invalid encoded hash key flag") 32 | errInvalidHashKeyPrefix = terror.ClassStructure.New(codeInvalidHashKeyPrefix, "invalid encoded hash key prefix") 33 | errInvalidListIndex = terror.ClassStructure.New(codeInvalidListMetaData, "invalid list index") 34 | errInvalidListMetaData = terror.ClassStructure.New(codeInvalidListMetaData, "invalid list meta data") 35 | errWriteOnSnapshot = terror.ClassStructure.New(codeWriteOnSnapshot, "write on snapshot") 36 | ) 37 | 38 | // NewStructure creates a TxStructure with Retriever, RetrieverMutator and key prefix. 39 | func NewStructure(reader kv.Retriever, readWriter kv.RetrieverMutator, prefix []byte) *TxStructure { 40 | return &TxStructure{ 41 | reader: reader, 42 | readWriter: readWriter, 43 | prefix: prefix, 44 | } 45 | } 46 | 47 | // TxStructure supports some simple data structures like string, hash, list, etc... and 48 | // you can use these in a transaction. 49 | type TxStructure struct { 50 | reader kv.Retriever 51 | readWriter kv.RetrieverMutator 52 | prefix []byte 53 | } 54 | -------------------------------------------------------------------------------- /tablestore/mysql/planner/schema.go: -------------------------------------------------------------------------------- 1 | package planner 2 | 3 | import ( 4 | "github.com/pingcap/tidb/types" 5 | "github.com/zhihu/zetta/tablestore/mysql/expression" 6 | ) 7 | 8 | // baseSchemaProducer stores the schema for the base plans who can produce schema directly. 9 | type baseSchemaProducer struct { 10 | schema *expression.Schema 11 | names types.NameSlice 12 | basePlan 13 | } 14 | 15 | // OutputNames returns the outputting names of each column. 16 | func (s *baseSchemaProducer) OutputNames() types.NameSlice { 17 | return s.names 18 | } 19 | 20 | func (s *baseSchemaProducer) SetOutputNames(names types.NameSlice) { 21 | s.names = names 22 | } 23 | 24 | // Schema implements the Plan.Schema interface. 25 | func (s *baseSchemaProducer) Schema() *expression.Schema { 26 | if s.schema == nil { 27 | s.schema = expression.NewSchema() 28 | } 29 | return s.schema 30 | } 31 | 32 | // SetSchema implements the Plan.SetSchema interface. 33 | func (s *baseSchemaProducer) SetSchema(schema *expression.Schema) { 34 | s.schema = schema 35 | } 36 | 37 | func (s *baseSchemaProducer) setSchemaAndNames(schema *expression.Schema, names types.NameSlice) { 38 | s.schema = schema 39 | s.names = names 40 | } 41 | 42 | // logicalSchemaProducer stores the schema for the logical plans who can produce schema directly. 43 | type logicalSchemaProducer struct { 44 | schema *expression.Schema 45 | names types.NameSlice 46 | baseLogicalPlan 47 | } 48 | 49 | // Schema implements the Plan.Schema interface. 50 | func (s *logicalSchemaProducer) Schema() *expression.Schema { 51 | if s.schema == nil { 52 | s.schema = expression.NewSchema() 53 | } 54 | return s.schema 55 | } 56 | 57 | func (s *logicalSchemaProducer) OutputNames() types.NameSlice { 58 | return s.names 59 | } 60 | 61 | func (s *logicalSchemaProducer) SetOutputNames(names types.NameSlice) { 62 | s.names = names 63 | } 64 | 65 | // SetSchema implements the Plan.SetSchema interface. 66 | func (s *logicalSchemaProducer) SetSchema(schema *expression.Schema) { 67 | s.schema = schema 68 | } 69 | 70 | func (s *logicalSchemaProducer) setSchemaAndNames(schema *expression.Schema, names types.NameSlice) { 71 | s.schema = schema 72 | s.names = names 73 | } 74 | -------------------------------------------------------------------------------- /tablestore/session/structs.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package session 15 | 16 | import ( 17 | "context" 18 | 19 | "github.com/pingcap/tidb/util/chunk" 20 | 21 | tspb "github.com/zhihu/zetta-proto/pkg/tablestore" 22 | ) 23 | 24 | // ColumnInfo contains information of a column 25 | type ColumnInfo struct { 26 | Schema string 27 | Table string 28 | OrgTable string 29 | Name string 30 | OrgName string 31 | ColumnLength uint32 32 | Decimal uint8 33 | Type uint8 34 | DefaultValueLength uint64 35 | DefaultValue []byte 36 | } 37 | 38 | // ResultSet is the result set of an query. 39 | type ResultSet interface { 40 | Columns() []*ColumnInfo 41 | NewChunk() *chunk.Chunk 42 | Next(context.Context, *chunk.Chunk) error 43 | StoreFetchedRows(rows []chunk.Row) 44 | GetFetchedRows() []chunk.Row 45 | Close() error 46 | } 47 | 48 | type RecordSet interface { 49 | Columns() []*tspb.ColumnMeta 50 | // Next(context.Context ) 51 | Next(context.Context) (interface{}, error) 52 | 53 | LastErr() error 54 | Close() error 55 | } 56 | 57 | type recordSet struct { 58 | } 59 | 60 | func (r *recordSet) Columns() []*ColumnInfo { 61 | return nil 62 | } 63 | 64 | func (r *recordSet) NewChunk() *chunk.Chunk { 65 | return nil 66 | } 67 | 68 | func (r *recordSet) Next(ctx context.Context, req *chunk.Chunk) error { 69 | return nil 70 | } 71 | 72 | func (r *recordSet) StoreFetchedRows(rows []chunk.Row) { 73 | 74 | } 75 | 76 | func (r *recordSet) GetFetchedRows() []chunk.Row { 77 | return nil 78 | } 79 | 80 | func (r *recordSet) Close() error { 81 | return nil 82 | } 83 | -------------------------------------------------------------------------------- /pkg/codec/float.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package codec 15 | 16 | import ( 17 | "math" 18 | 19 | "github.com/pingcap/errors" 20 | ) 21 | 22 | func encodeFloatToCmpUint64(f float64) uint64 { 23 | u := math.Float64bits(f) 24 | if f >= 0 { 25 | u |= signMask 26 | } else { 27 | u = ^u 28 | } 29 | return u 30 | } 31 | 32 | func decodeCmpUintToFloat(u uint64) float64 { 33 | if u&signMask > 0 { 34 | u &= ^signMask 35 | } else { 36 | u = ^u 37 | } 38 | return math.Float64frombits(u) 39 | } 40 | 41 | // EncodeFloat encodes a float v into a byte slice which can be sorted lexicographically later. 42 | // EncodeFloat guarantees that the encoded value is in ascending order for comparison. 43 | func EncodeFloat(b []byte, v float64) []byte { 44 | u := encodeFloatToCmpUint64(v) 45 | return EncodeUint(b, u) 46 | } 47 | 48 | // DecodeFloat decodes a float from a byte slice generated with EncodeFloat before. 49 | func DecodeFloat(b []byte) ([]byte, float64, error) { 50 | b, u, err := DecodeUint(b) 51 | return b, decodeCmpUintToFloat(u), errors.WithStack(err) 52 | } 53 | 54 | // EncodeFloatDesc encodes a float v into a byte slice which can be sorted lexicographically later. 55 | // EncodeFloatDesc guarantees that the encoded value is in descending order for comparison. 56 | func EncodeFloatDesc(b []byte, v float64) []byte { 57 | u := encodeFloatToCmpUint64(v) 58 | return EncodeUintDesc(b, u) 59 | } 60 | 61 | // DecodeFloatDesc decodes a float from a byte slice generated with EncodeFloatDesc before. 62 | func DecodeFloatDesc(b []byte) ([]byte, float64, error) { 63 | b, u, err := DecodeUintDesc(b) 64 | return b, decodeCmpUintToFloat(u), errors.Trace(err) 65 | } 66 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/aggregation/bit_or.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package aggregation 15 | 16 | import ( 17 | "github.com/pingcap/tidb/sessionctx/stmtctx" 18 | "github.com/pingcap/tidb/types" 19 | "github.com/pingcap/tidb/util/chunk" 20 | ) 21 | 22 | type bitOrFunction struct { 23 | aggFunction 24 | } 25 | 26 | func (bf *bitOrFunction) CreateContext(sc *stmtctx.StatementContext) *AggEvaluateContext { 27 | evalCtx := bf.aggFunction.CreateContext(sc) 28 | evalCtx.Value.SetUint64(0) 29 | return evalCtx 30 | } 31 | 32 | func (bf *bitOrFunction) ResetContext(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext) { 33 | evalCtx.Value.SetUint64(0) 34 | } 35 | 36 | // Update implements Aggregation interface. 37 | func (bf *bitOrFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error { 38 | a := bf.Args[0] 39 | value, err := a.Eval(row) 40 | if err != nil { 41 | return err 42 | } 43 | if !value.IsNull() { 44 | if value.Kind() == types.KindUint64 { 45 | evalCtx.Value.SetUint64(evalCtx.Value.GetUint64() | value.GetUint64()) 46 | } else { 47 | int64Value, err := value.ToInt64(sc) 48 | if err != nil { 49 | return err 50 | } 51 | evalCtx.Value.SetUint64(evalCtx.Value.GetUint64() | uint64(int64Value)) 52 | } 53 | } 54 | return nil 55 | } 56 | 57 | // GetResult implements Aggregation interface. 58 | func (bf *bitOrFunction) GetResult(evalCtx *AggEvaluateContext) types.Datum { 59 | return evalCtx.Value 60 | } 61 | 62 | // GetPartialResult implements Aggregation interface. 63 | func (bf *bitOrFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { 64 | return []types.Datum{bf.GetResult(evalCtx)} 65 | } 66 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/aggregation/bit_xor.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package aggregation 15 | 16 | import ( 17 | "github.com/pingcap/tidb/sessionctx/stmtctx" 18 | "github.com/pingcap/tidb/types" 19 | "github.com/pingcap/tidb/util/chunk" 20 | ) 21 | 22 | type bitXorFunction struct { 23 | aggFunction 24 | } 25 | 26 | func (bf *bitXorFunction) CreateContext(sc *stmtctx.StatementContext) *AggEvaluateContext { 27 | evalCtx := bf.aggFunction.CreateContext(sc) 28 | evalCtx.Value.SetUint64(0) 29 | return evalCtx 30 | } 31 | 32 | func (bf *bitXorFunction) ResetContext(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext) { 33 | evalCtx.Value.SetUint64(0) 34 | } 35 | 36 | // Update implements Aggregation interface. 37 | func (bf *bitXorFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error { 38 | a := bf.Args[0] 39 | value, err := a.Eval(row) 40 | if err != nil { 41 | return err 42 | } 43 | if !value.IsNull() { 44 | if value.Kind() == types.KindUint64 { 45 | evalCtx.Value.SetUint64(evalCtx.Value.GetUint64() ^ value.GetUint64()) 46 | } else { 47 | int64Value, err := value.ToInt64(sc) 48 | if err != nil { 49 | return err 50 | } 51 | evalCtx.Value.SetUint64(evalCtx.Value.GetUint64() ^ uint64(int64Value)) 52 | } 53 | } 54 | return nil 55 | } 56 | 57 | // GetResult implements Aggregation interface. 58 | func (bf *bitXorFunction) GetResult(evalCtx *AggEvaluateContext) types.Datum { 59 | return evalCtx.Value 60 | } 61 | 62 | // GetPartialResult implements Aggregation interface. 63 | func (bf *bitXorFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { 64 | return []types.Datum{bf.GetResult(evalCtx)} 65 | } 66 | -------------------------------------------------------------------------------- /tablestore/ddl/column.go: -------------------------------------------------------------------------------- 1 | package ddl 2 | 3 | import ( 4 | "github.com/pingcap/errors" 5 | parser_model "github.com/pingcap/parser/model" 6 | "github.com/zhihu/zetta/pkg/meta" 7 | "github.com/zhihu/zetta/pkg/model" 8 | ) 9 | 10 | func setColumnsState(cols []*model.ColumnMeta, state parser_model.SchemaState) { 11 | for _, c := range cols { 12 | c.State = state 13 | } 14 | } 15 | 16 | func addColumns(tbMeta *model.TableMeta, cols []*model.ColumnMeta) { 17 | for _, c := range cols { 18 | addColumn(tbMeta, c) 19 | } 20 | } 21 | 22 | func onAddColumn(d *ddlCtx, t *meta.Meta, job *model.Job) (int64, error) { 23 | var ( 24 | ver int64 25 | err error 26 | ) 27 | column := &model.ColumnMeta{} 28 | if err = job.DecodeArgs(column); err != nil { 29 | job.State = model.JobStateCancelled 30 | return ver, errors.Trace(err) 31 | } 32 | columns := []*model.ColumnMeta{column} 33 | tbMeta, err := getTableInfoAndCancelFaultJob(t, job, job.SchemaID) 34 | if err != nil { 35 | return ver, errors.Trace(err) 36 | } 37 | //setColumnsState(columns, parser_model.StateNone) 38 | addColumns(tbMeta, columns) 39 | ver, err = updateSchemaVersion(t, job) 40 | if err != nil { 41 | return ver, errors.Trace(err) 42 | } 43 | originalState := columns[0].State 44 | switch columns[0].State { 45 | case parser_model.StateNone: 46 | job.SchemaState = model.StateDeleteOnly 47 | setColumnsState(columns, parser_model.StateDeleteOnly) 48 | ver, err = updateVersionAndTableInfo(t, job, tbMeta, originalState != columns[0].State) 49 | case parser_model.StateDeleteOnly: 50 | job.SchemaState = model.StatePublic 51 | setColumnsState(columns, parser_model.StatePublic) 52 | ver, err = updateVersionAndTableInfo(t, job, tbMeta, originalState != columns[0].State) 53 | if err != nil { 54 | return ver, err 55 | } 56 | job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tbMeta) 57 | case parser_model.StatePublic: 58 | job.FinishTableJob(model.JobStateDone, model.StatePublic, ver, tbMeta) 59 | return ver, nil 60 | default: 61 | err = ErrInvalidTableState.GenWithStack("invalid column state %v", originalState) 62 | } 63 | 64 | return ver, errors.Trace(err) 65 | } 66 | 67 | //func (w *worker) onModifyColumn(t *meta.Meta, job *model.Job) (ver int64, _ error) { 68 | //} 69 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/aggregation/bit_and.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package aggregation 15 | 16 | import ( 17 | "math" 18 | 19 | "github.com/pingcap/tidb/sessionctx/stmtctx" 20 | "github.com/pingcap/tidb/types" 21 | "github.com/pingcap/tidb/util/chunk" 22 | ) 23 | 24 | type bitAndFunction struct { 25 | aggFunction 26 | } 27 | 28 | func (bf *bitAndFunction) CreateContext(sc *stmtctx.StatementContext) *AggEvaluateContext { 29 | evalCtx := bf.aggFunction.CreateContext(sc) 30 | evalCtx.Value.SetUint64(math.MaxUint64) 31 | return evalCtx 32 | } 33 | 34 | func (bf bitAndFunction) ResetContext(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext) { 35 | evalCtx.Value.SetUint64(math.MaxUint64) 36 | } 37 | 38 | // Update implements Aggregation interface. 39 | func (bf *bitAndFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error { 40 | a := bf.Args[0] 41 | value, err := a.Eval(row) 42 | if err != nil { 43 | return err 44 | } 45 | if !value.IsNull() { 46 | if value.Kind() == types.KindUint64 { 47 | evalCtx.Value.SetUint64(evalCtx.Value.GetUint64() & value.GetUint64()) 48 | } else { 49 | int64Value, err := value.ToInt64(sc) 50 | if err != nil { 51 | return err 52 | } 53 | evalCtx.Value.SetUint64(evalCtx.Value.GetUint64() & uint64(int64Value)) 54 | } 55 | } 56 | return nil 57 | } 58 | 59 | // GetResult implements Aggregation interface. 60 | func (bf *bitAndFunction) GetResult(evalCtx *AggEvaluateContext) types.Datum { 61 | return evalCtx.Value 62 | } 63 | 64 | // GetPartialResult implements Aggregation interface. 65 | func (bf *bitAndFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { 66 | return []types.Datum{bf.GetResult(evalCtx)} 67 | } 68 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/zhihu/zetta 2 | 3 | go 1.13 4 | 5 | require ( 6 | cloud.google.com/go v0.50.0 7 | github.com/apache/thrift v0.13.0 8 | github.com/blacktear23/go-proxyprotocol v0.0.0-20180807104634-af7a81e8dd0d 9 | github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 10 | github.com/gogo/protobuf v1.3.1 11 | github.com/google/uuid v1.1.1 12 | github.com/gorilla/mux v1.7.4 13 | github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 14 | github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 15 | github.com/klauspost/cpuid v1.2.1 16 | github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 17 | github.com/opentracing/opentracing-go v1.1.0 18 | github.com/pingcap/check v0.0.0-20200212061837-5e12011dc712 19 | github.com/pingcap/errors v0.11.5-0.20201126102027-b0a155152ca3 20 | github.com/pingcap/failpoint v0.0.0-20200702092429-9f69995143ce 21 | github.com/pingcap/kvproto v0.0.0-20200907074027-32a3a0accf7d 22 | github.com/pingcap/log v0.0.0-20200511115504-543df19646ad 23 | github.com/pingcap/parser v0.0.0-20200803072748-fdf66528323d 24 | github.com/pingcap/tidb v1.1.0-beta.0.20200826081922-9c1c21270001 25 | github.com/pingcap/tidb-tools v4.0.1-0.20200530144555-cdec43635625+incompatible 26 | github.com/pingcap/tipb v0.0.0-20200522051215-f31a15d98fce 27 | github.com/pingcap/pd/v4 v4.0.5-0.20200817114353-e465cafe8a91 28 | github.com/pkg/errors v0.9.1 29 | github.com/prometheus/client_golang v1.5.1 30 | github.com/shirou/gopsutil v2.19.11+incompatible // indirect 31 | github.com/spf13/cast v1.3.0 32 | github.com/spf13/viper v1.7.0 33 | github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 34 | // github.com/tikv/pd v1.1.0-beta.0.20210128094944-96efd6f40236 35 | github.com/uber/jaeger-client-go v2.22.1+incompatible 36 | github.com/zhihu/zetta-proto v0.0.0-20210404125403-0511ff71c1a1 37 | go.etcd.io/etcd v0.5.0-alpha.5.0.20191023171146-3cf2f69b5738 38 | go.uber.org/multierr v1.5.0 39 | go.uber.org/zap v1.15.0 40 | golang.org/x/text v0.3.3 41 | golang.org/x/tools v0.0.0-20200820010801-b793a1359eac 42 | google.golang.org/grpc v1.26.0 43 | sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67 44 | 45 | ) 46 | 47 | replace github.com/coreos/go-systemd => github.com/coreos/go-systemd/v22 v22.0.0 48 | -------------------------------------------------------------------------------- /tablestore/mysql/executor/delete.go: -------------------------------------------------------------------------------- 1 | package executor 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/pingcap/tidb/types" 7 | "github.com/pingcap/tidb/util/chunk" 8 | "github.com/zhihu/zetta/tablestore/table" 9 | ) 10 | 11 | type UpdateExec struct { 12 | baseExecutor 13 | 14 | tbl table.Table 15 | values map[int64]types.Datum 16 | } 17 | 18 | func (e *UpdateExec) Open(ctx context.Context) error { 19 | return e.children[0].Open(ctx) 20 | } 21 | 22 | func (e *UpdateExec) Close() error { 23 | return e.children[0].Close() 24 | } 25 | 26 | func (e *UpdateExec) Next(ctx context.Context, req *chunk.Chunk) error { 27 | req.GrowAndReset(e.maxChunkSize) 28 | chk := NewFirstChunk(e.children[0]) 29 | err := Next(ctx, e.children[0], chk) 30 | if err != nil { 31 | return err 32 | } 33 | fts := retTypes(e.children[0]) 34 | chunkIter := chunk.NewIterator4Chunk(chk) 35 | rowIter := chunkIter.Begin() 36 | for ; rowIter != chunkIter.End(); rowIter = chunkIter.Next() { 37 | rowDatums := rowIter.GetDatumRow(fts) 38 | if err = e.tbl.UpdateRecord(e.ctx, rowDatums, e.values); err != nil { 39 | return err 40 | } 41 | e.ctx.GetSessionVars().StmtCtx.AddAffectedRows(1) 42 | } 43 | return nil 44 | } 45 | 46 | type DeleteExec struct { 47 | baseExecutor 48 | 49 | tbl table.Table 50 | } 51 | 52 | func (e *DeleteExec) Open(ctx context.Context) error { 53 | return e.children[0].Open(ctx) 54 | } 55 | 56 | func (e *DeleteExec) Close() error { 57 | return e.children[0].Close() 58 | } 59 | 60 | func (e *DeleteExec) deleteOneRow(ctx context.Context, row []types.Datum) error { 61 | return e.tbl.RemoveRecord(e.ctx, row) 62 | } 63 | 64 | func (e *DeleteExec) Next(ctx context.Context, req *chunk.Chunk) error { 65 | req.GrowAndReset(e.maxChunkSize) 66 | chk := NewFirstChunk(e.children[0]) 67 | err := Next(ctx, e.children[0], chk) 68 | if err != nil { 69 | return err 70 | } 71 | fts := retTypes(e.children[0]) 72 | chunkIter := chunk.NewIterator4Chunk(chk) 73 | rowIter := chunkIter.Begin() 74 | for ; rowIter != chunkIter.End(); rowIter = chunkIter.Next() { 75 | rowDatums := rowIter.GetDatumRow(fts) 76 | if err = e.tbl.RemoveRecord(e.ctx, rowDatums); err != nil { 77 | return err 78 | } 79 | e.ctx.GetSessionVars().StmtCtx.AddAffectedRows(1) 80 | } 81 | return nil 82 | } 83 | -------------------------------------------------------------------------------- /tablestore/table/tables/keyrange.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package tables 15 | 16 | import ( 17 | "bytes" 18 | "fmt" 19 | 20 | "github.com/pingcap/tidb/kv" 21 | tspb "github.com/zhihu/zetta-proto/pkg/tablestore" 22 | ) 23 | 24 | type keyRange struct { 25 | start, end *tspb.ListValue 26 | startClosed, endClosed bool 27 | 28 | // These are populated during an operation 29 | // when we know what table this keyRange applies to. 30 | startKey, endKey []interface{} 31 | } 32 | 33 | func (r *keyRange) String() string { 34 | var sb bytes.Buffer // TODO: Switch to strings.Builder when we drop support for Go 1.9. 35 | if r.startClosed { 36 | sb.WriteString("[") 37 | } else { 38 | sb.WriteString("(") 39 | } 40 | fmt.Fprintf(&sb, "%v,%v", r.start, r.end) 41 | if r.endClosed { 42 | sb.WriteString("]") 43 | } else { 44 | sb.WriteString(")") 45 | } 46 | return sb.String() 47 | } 48 | 49 | type keyRangeList []*keyRange 50 | 51 | func makeKeyRangeList(ranges []*tspb.KeyRange) keyRangeList { 52 | var krl keyRangeList 53 | for _, r := range ranges { 54 | krl = append(krl, makeKeyRange(r)) 55 | } 56 | return krl 57 | } 58 | 59 | func makeKeyRange(r *tspb.KeyRange) *keyRange { 60 | var kr keyRange 61 | switch s := r.StartKeyType.(type) { 62 | case *tspb.KeyRange_StartClosed: 63 | kr.start = s.StartClosed 64 | kr.startClosed = true 65 | case *tspb.KeyRange_StartOpen: 66 | kr.start = s.StartOpen 67 | } 68 | switch e := r.EndKeyType.(type) { 69 | case *tspb.KeyRange_EndClosed: 70 | kr.end = e.EndClosed 71 | kr.endClosed = true 72 | case *tspb.KeyRange_EndOpen: 73 | kr.end = e.EndOpen 74 | } 75 | 76 | return &kr 77 | } 78 | 79 | type KeyValue struct { 80 | Key kv.Key 81 | Val []byte 82 | } 83 | -------------------------------------------------------------------------------- /tablestore/server/driver_zetta.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package server 15 | 16 | import ( 17 | "context" 18 | "fmt" 19 | 20 | "github.com/pingcap/tidb/kv" 21 | "github.com/zhihu/zetta/tablestore/session" 22 | ) 23 | 24 | // ZettaDriver implements IDriver. 25 | type ZettaDriver struct { 26 | Store kv.Storage 27 | } 28 | 29 | // NewZettaDriver creates a new ZettaDriver 30 | func NewZettaDriver(store kv.Storage) *ZettaDriver { 31 | driver := &ZettaDriver{ 32 | Store: store, 33 | } 34 | return driver 35 | } 36 | 37 | // ZettaContext implements QueryCtx 38 | type ZettaContext struct { 39 | session session.Session 40 | currentDB string 41 | // query ? 42 | } 43 | 44 | func (zc *ZettaContext) CurrentDB() string { 45 | return zc.currentDB 46 | } 47 | 48 | func (zc *ZettaContext) Close() error { 49 | zc.session.Close() 50 | return nil 51 | } 52 | 53 | func (zc *ZettaContext) Execute(ctx context.Context, query interface{}) ([]ResultSet, error) { 54 | return nil, nil 55 | } 56 | 57 | func (zc *ZettaContext) GetSession() session.Session { 58 | return zc.session 59 | } 60 | 61 | func (zc *ZettaContext) SetValue(key fmt.Stringer, value interface{}) { 62 | 63 | } 64 | 65 | // OpenCtx xxx 66 | func (zd *ZettaDriver) OpenCtx(connId uint64, dbname string) (QueryCtx, error) { 67 | se, err := session.CreateSession(zd.Store) 68 | if err != nil { 69 | return nil, err 70 | } 71 | // se.SetTLSState(tlsState) 72 | // err = se.SetCollation(int(collation)) 73 | // if err != nil { 74 | // return nil, err 75 | // } 76 | // se.SetClientCapability(capability) 77 | // se.SetConnectionID(connID) 78 | 79 | se.SetDB(dbname) 80 | zc := &ZettaContext{ 81 | session: se, 82 | currentDB: dbname, 83 | // stmts: make(map[int]*TiDBStatement), 84 | } 85 | return zc, nil 86 | } 87 | -------------------------------------------------------------------------------- /tablestore/table/tables/result_iter.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package tables 15 | 16 | import ( 17 | "context" 18 | "io" 19 | "sync/atomic" 20 | 21 | tspb "github.com/zhihu/zetta-proto/pkg/tablestore" 22 | "github.com/zhihu/zetta/tablestore/table" 23 | ) 24 | 25 | type resultIter struct { 26 | columns []*tspb.ColumnMeta 27 | rowChan chan *tspb.SliceCell 28 | rowsCount int64 29 | nextToken []byte 30 | limit int64 31 | lastErr error 32 | quitCh chan struct{} 33 | } 34 | 35 | func NewResultIter(limit int64) *resultIter { 36 | return &resultIter{ 37 | rowChan: make(chan *tspb.SliceCell, 32), 38 | limit: limit, 39 | quitCh: make(chan struct{}), 40 | } 41 | } 42 | 43 | func (ri *resultIter) Columns() []*tspb.ColumnMeta { 44 | return ri.columns 45 | } 46 | 47 | func (ri *resultIter) LastErr() error { 48 | return ri.lastErr 49 | } 50 | 51 | func (ri *resultIter) Next(ctx context.Context) (interface{}, error) { 52 | row, ok := <-ri.rowChan 53 | if !ok { 54 | return nil, io.EOF 55 | } 56 | return row, nil 57 | } 58 | 59 | func (ri *resultIter) NextToken() []byte { 60 | return ri.nextToken 61 | } 62 | 63 | func (ri *resultIter) Close() error { 64 | select { 65 | case <-ri.quitCh: 66 | return ri.lastErr 67 | default: 68 | close(ri.quitCh) 69 | close(ri.rowChan) 70 | } 71 | return ri.lastErr 72 | } 73 | 74 | func (ri *resultIter) sendData(row *tspb.SliceCell) error { 75 | defer func() { 76 | if x := recover(); x != nil { 77 | return 78 | } 79 | }() 80 | if ri.limit > 0 && atomic.LoadInt64(&ri.rowsCount) > ri.limit { 81 | return table.ErrResultSetUserLimitReached 82 | } 83 | select { 84 | case <-ri.quitCh: 85 | case ri.rowChan <- row: 86 | atomic.AddInt64(&ri.rowsCount, 1) 87 | } 88 | 89 | return nil 90 | } 91 | 92 | func (ri *resultIter) clearup() { 93 | 94 | } 95 | -------------------------------------------------------------------------------- /tablestore/mysql/bexpression/aggregation/util.go: -------------------------------------------------------------------------------- 1 | package aggregation 2 | 3 | import ( 4 | "github.com/pingcap/errors" 5 | "github.com/pingcap/tidb/sessionctx/stmtctx" 6 | "github.com/pingcap/tidb/types" 7 | "github.com/pingcap/tidb/util/codec" 8 | "github.com/pingcap/tidb/util/mvmap" 9 | ) 10 | 11 | // distinctChecker stores existing keys and checks if given data is distinct. 12 | type distinctChecker struct { 13 | existingKeys *mvmap.MVMap 14 | key []byte 15 | vals [][]byte 16 | sc *stmtctx.StatementContext 17 | } 18 | 19 | // createDistinctChecker creates a new distinct checker. 20 | func createDistinctChecker(sc *stmtctx.StatementContext) *distinctChecker { 21 | return &distinctChecker{ 22 | existingKeys: mvmap.NewMVMap(), 23 | sc: sc, 24 | } 25 | } 26 | 27 | // Check checks if values is distinct. 28 | func (d *distinctChecker) Check(values []types.Datum) (bool, error) { 29 | d.key = d.key[:0] 30 | var err error 31 | d.key, err = codec.EncodeValue(d.sc, d.key, values...) 32 | if err != nil { 33 | return false, err 34 | } 35 | d.vals = d.existingKeys.Get(d.key, d.vals[:0]) 36 | if len(d.vals) > 0 { 37 | return false, nil 38 | } 39 | d.existingKeys.Put(d.key, []byte{}) 40 | return true, nil 41 | } 42 | 43 | // calculateSum adds v to sum. 44 | func calculateSum(sc *stmtctx.StatementContext, sum, v types.Datum) (data types.Datum, err error) { 45 | // for avg and sum calculation 46 | // avg and sum use decimal for integer and decimal type, use float for others 47 | // see https://dev.mysql.com/doc/refman/5.7/en/group-by-functions.html 48 | 49 | switch v.Kind() { 50 | case types.KindNull: 51 | case types.KindInt64, types.KindUint64: 52 | var d *types.MyDecimal 53 | d, err = v.ToDecimal(sc) 54 | if err == nil { 55 | data = types.NewDecimalDatum(d) 56 | } 57 | case types.KindMysqlDecimal: 58 | v.Copy(&data) 59 | default: 60 | var f float64 61 | f, err = v.ToFloat64(sc) 62 | if err == nil { 63 | data = types.NewFloat64Datum(f) 64 | } 65 | } 66 | 67 | if err != nil { 68 | return data, err 69 | } 70 | if data.IsNull() { 71 | return sum, nil 72 | } 73 | switch sum.Kind() { 74 | case types.KindNull: 75 | return data, nil 76 | case types.KindFloat64, types.KindMysqlDecimal: 77 | return types.ComputePlus(sum, data) 78 | default: 79 | return data, errors.Errorf("invalid value %v for aggregate", sum.Kind()) 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/aggregation/count.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package aggregation 15 | 16 | import ( 17 | "github.com/pingcap/tidb/sessionctx/stmtctx" 18 | "github.com/pingcap/tidb/types" 19 | "github.com/pingcap/tidb/util/chunk" 20 | ) 21 | 22 | type countFunction struct { 23 | aggFunction 24 | } 25 | 26 | // Update implements Aggregation interface. 27 | func (cf *countFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error { 28 | var datumBuf []types.Datum 29 | if cf.HasDistinct { 30 | datumBuf = make([]types.Datum, 0, len(cf.Args)) 31 | } 32 | for _, a := range cf.Args { 33 | value, err := a.Eval(row) 34 | if err != nil { 35 | return err 36 | } 37 | if value.IsNull() { 38 | return nil 39 | } 40 | if cf.Mode == FinalMode || cf.Mode == Partial2Mode { 41 | evalCtx.Count += value.GetInt64() 42 | } 43 | if cf.HasDistinct { 44 | datumBuf = append(datumBuf, value) 45 | } 46 | } 47 | if cf.HasDistinct { 48 | d, err := evalCtx.DistinctChecker.Check(datumBuf) 49 | if err != nil { 50 | return err 51 | } 52 | if !d { 53 | return nil 54 | } 55 | } 56 | if cf.Mode == CompleteMode || cf.Mode == Partial1Mode { 57 | evalCtx.Count++ 58 | } 59 | return nil 60 | } 61 | 62 | func (cf *countFunction) ResetContext(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext) { 63 | if cf.HasDistinct { 64 | evalCtx.DistinctChecker = createDistinctChecker(sc) 65 | } 66 | evalCtx.Count = 0 67 | } 68 | 69 | // GetResult implements Aggregation interface. 70 | func (cf *countFunction) GetResult(evalCtx *AggEvaluateContext) (d types.Datum) { 71 | d.SetInt64(evalCtx.Count) 72 | return d 73 | } 74 | 75 | // GetPartialResult implements Aggregation interface. 76 | func (cf *countFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { 77 | return []types.Datum{cf.GetResult(evalCtx)} 78 | } 79 | -------------------------------------------------------------------------------- /tablestore/mysql/bexpression/aggregation/avg.go: -------------------------------------------------------------------------------- 1 | package aggregation 2 | 3 | import ( 4 | "github.com/cznic/mathutil" 5 | "github.com/pingcap/parser/mysql" 6 | "github.com/pingcap/parser/terror" 7 | "github.com/pingcap/tidb/sessionctx/stmtctx" 8 | "github.com/pingcap/tidb/types" 9 | "github.com/pingcap/tidb/util/chunk" 10 | ) 11 | 12 | type avgFunction struct { 13 | aggFunction 14 | } 15 | 16 | func (af *avgFunction) updateAvg(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext, row chunk.Row) error { 17 | a := af.Args[1] 18 | value, err := a.Eval(row) 19 | if err != nil { 20 | return err 21 | } 22 | if value.IsNull() { 23 | return nil 24 | } 25 | evalCtx.Value, err = calculateSum(sc, evalCtx.Value, value) 26 | if err != nil { 27 | return err 28 | } 29 | count, err := af.Args[0].Eval(row) 30 | if err != nil { 31 | return err 32 | } 33 | evalCtx.Count += count.GetInt64() 34 | return nil 35 | } 36 | 37 | func (af *avgFunction) ResetContext(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext) { 38 | if af.HasDistinct { 39 | evalCtx.DistinctChecker = createDistinctChecker(sc) 40 | } 41 | evalCtx.Value.SetNull() 42 | evalCtx.Count = 0 43 | } 44 | 45 | // Update implements Aggregation interface. 46 | func (af *avgFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) (err error) { 47 | err = af.updateAvg(sc, evalCtx, row) 48 | return err 49 | } 50 | 51 | // GetResult implements Aggregation interface. 52 | func (af *avgFunction) GetResult(evalCtx *AggEvaluateContext) (d types.Datum) { 53 | switch evalCtx.Value.Kind() { 54 | case types.KindFloat64: 55 | sum := evalCtx.Value.GetFloat64() 56 | d.SetFloat64(sum / float64(evalCtx.Count)) 57 | return 58 | case types.KindMysqlDecimal: 59 | x := evalCtx.Value.GetMysqlDecimal() 60 | y := types.NewDecFromInt(evalCtx.Count) 61 | to := new(types.MyDecimal) 62 | err := types.DecimalDiv(x, y, to, types.DivFracIncr) 63 | terror.Log(err) 64 | frac := af.RetTp.Decimal 65 | if frac == -1 { 66 | frac = mysql.MaxDecimalScale 67 | } 68 | err = to.Round(to, mathutil.Min(frac, mysql.MaxDecimalScale), types.ModeHalfEven) 69 | terror.Log(err) 70 | d.SetMysqlDecimal(to) 71 | } 72 | return 73 | } 74 | 75 | // GetPartialResult implements Aggregation interface. 76 | func (af *avgFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { 77 | return []types.Datum{types.NewIntDatum(evalCtx.Count), evalCtx.Value} 78 | } 79 | -------------------------------------------------------------------------------- /pkg/structure/string.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package structure 15 | 16 | import ( 17 | "context" 18 | "strconv" 19 | 20 | "github.com/pingcap/errors" 21 | "github.com/pingcap/tidb/kv" 22 | ) 23 | 24 | // Set sets the string value of the key. 25 | func (t *TxStructure) Set(key []byte, value []byte) error { 26 | if t.readWriter == nil { 27 | return errWriteOnSnapshot 28 | } 29 | ek := t.encodeStringDataKey(key) 30 | return t.readWriter.Set(ek, value) 31 | } 32 | 33 | // Get gets the string value of a key. 34 | func (t *TxStructure) Get(key []byte) ([]byte, error) { 35 | ek := t.encodeStringDataKey(key) 36 | value, err := t.reader.Get(context.TODO(), ek) 37 | if kv.ErrNotExist.Equal(err) { 38 | err = nil 39 | } 40 | return value, errors.Trace(err) 41 | } 42 | 43 | // GetInt64 gets the int64 value of a key. 44 | func (t *TxStructure) GetInt64(key []byte) (int64, error) { 45 | v, err := t.Get(key) 46 | if err != nil || v == nil { 47 | return 0, errors.Trace(err) 48 | } 49 | 50 | n, err := strconv.ParseInt(string(v), 10, 64) 51 | return n, errors.Trace(err) 52 | } 53 | 54 | // Inc increments the integer value of a key by step, returns 55 | // the value after the increment. 56 | func (t *TxStructure) Inc(key []byte, step int64) (int64, error) { 57 | if t.readWriter == nil { 58 | return 0, errWriteOnSnapshot 59 | } 60 | ek := t.encodeStringDataKey(key) 61 | // txn Inc will lock this key, so we don't lock it here. 62 | n, err := kv.IncInt64(t.readWriter, ek, step) 63 | if kv.ErrNotExist.Equal(err) { 64 | err = nil 65 | } 66 | return n, errors.Trace(err) 67 | } 68 | 69 | // Clear removes the string value of the key. 70 | func (t *TxStructure) Clear(key []byte) error { 71 | if t.readWriter == nil { 72 | return errWriteOnSnapshot 73 | } 74 | ek := t.encodeStringDataKey(key) 75 | err := t.readWriter.Delete(ek) 76 | if kv.ErrNotExist.Equal(err) { 77 | err = nil 78 | } 79 | return errors.Trace(err) 80 | } 81 | -------------------------------------------------------------------------------- /pkg/metrics/session.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package metrics 15 | 16 | import "github.com/prometheus/client_golang/prometheus" 17 | 18 | var ( 19 | SessionRetry = prometheus.NewHistogram( 20 | prometheus.HistogramOpts{ 21 | Namespace: "zetta", 22 | Subsystem: "session", 23 | Name: "retry_num", 24 | Help: "Bucketed histogram of session retry count.", 25 | Buckets: prometheus.LinearBuckets(0, 1, 20), // 0 ~ 20 26 | }) 27 | SessionRetryErrorCounter = prometheus.NewCounterVec( 28 | prometheus.CounterOpts{ 29 | Namespace: "zetta", 30 | Subsystem: "session", 31 | Name: "retry_error_total", 32 | Help: "Counter of session retry error.", 33 | }, []string{LblSQLType, LblType}) 34 | 35 | SessionCounter = prometheus.NewGaugeVec( 36 | prometheus.GaugeOpts{ 37 | Namespace: "zetta", 38 | Subsystem: "session", 39 | Name: "session_num", 40 | Help: "num of sessions ", 41 | }, []string{LblType}) 42 | 43 | TransactionCounter = prometheus.NewCounterVec( 44 | prometheus.CounterOpts{ 45 | Namespace: "zetta", 46 | Subsystem: "session", 47 | Name: "transaction_total", 48 | Help: "Counter of transactions.", 49 | }, []string{LblSQLType, LblType}) 50 | 51 | TransactionDuration = prometheus.NewHistogramVec( 52 | prometheus.HistogramOpts{ 53 | Namespace: "zetta", 54 | Subsystem: "session", 55 | Name: "transaction_duration_seconds", 56 | Help: "Bucketed histogram of a transaction execution duration, including retry.", 57 | Buckets: prometheus.ExponentialBuckets(0.001, 2, 20), // 1ms ~ 1049s 58 | }, []string{LblSQLType, LblType}) 59 | 60 | SchemaLeaseErrorCounter = prometheus.NewCounterVec( 61 | prometheus.CounterOpts{ 62 | Namespace: "zetta", 63 | Subsystem: "session", 64 | Name: "schema_lease_error_total", 65 | Help: "Counter of schema lease error", 66 | }, []string{LblType}) 67 | ) 68 | -------------------------------------------------------------------------------- /pkg/codec/decimal.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // Copyright 2015 PingCAP, Inc. 15 | // 16 | // Licensed under the Apache License, Version 2.0 (the "License"); 17 | // you may not use this file except in compliance with the License. 18 | // You may obtain a copy of the License at 19 | // 20 | // http://www.apache.org/licenses/LICENSE-2.0 21 | // 22 | // Unless required by applicable law or agreed to in writing, software 23 | // distributed under the License is distributed on an "AS IS" BASIS, 24 | // See the License for the specific language governing permissions and 25 | // limitations under the License. 26 | 27 | package codec 28 | 29 | import ( 30 | "github.com/pingcap/errors" 31 | "github.com/pingcap/failpoint" 32 | "github.com/pingcap/tidb/types" 33 | ) 34 | 35 | // EncodeDecimal encodes a decimal into a byte slice which can be sorted lexicographically later. 36 | func EncodeDecimal(b []byte, dec *types.MyDecimal, precision, frac int) ([]byte, error) { 37 | if precision == 0 { 38 | precision, frac = dec.PrecisionAndFrac() 39 | } 40 | b = append(b, byte(precision), byte(frac)) 41 | bin, err := dec.ToBin(precision, frac) 42 | b = append(b, bin...) 43 | return b, errors.Trace(err) 44 | } 45 | 46 | // DecodeDecimal decodes bytes to decimal. 47 | func DecodeDecimal(b []byte) ([]byte, *types.MyDecimal, int, int, error) { 48 | failpoint.Inject("errorInDecodeDecimal", func(val failpoint.Value) { 49 | if val.(bool) { 50 | failpoint.Return(b, nil, 0, 0, errors.New("gofail error")) 51 | } 52 | }) 53 | 54 | if len(b) < 3 { 55 | return b, nil, 0, 0, errors.New("insufficient bytes to decode value") 56 | } 57 | precision := int(b[0]) 58 | frac := int(b[1]) 59 | b = b[2:] 60 | dec := new(types.MyDecimal) 61 | binSize, err := dec.FromBin(b, precision, frac) 62 | b = b[binSize:] 63 | if err != nil { 64 | return b, nil, precision, frac, errors.Trace(err) 65 | } 66 | return b, dec, precision, frac, nil 67 | } 68 | -------------------------------------------------------------------------------- /tablestore/domain/domain_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package domain 15 | 16 | import ( 17 | "testing" 18 | 19 | . "github.com/pingcap/check" 20 | "github.com/pingcap/errors" 21 | "github.com/pingcap/tidb/kv" 22 | "github.com/pingcap/tidb/store/mockstore" 23 | "github.com/pingcap/tidb/util/testleak" 24 | tspb "github.com/zhihu/zetta-proto/pkg/tablestore" 25 | "github.com/zhihu/zetta/pkg/meta" 26 | "github.com/zhihu/zetta/pkg/model" 27 | ) 28 | 29 | func TestT(t *testing.T) { 30 | CustomVerboseFlag = true 31 | TestingT(t) 32 | } 33 | 34 | var _ = Suite(&testSuite{}) 35 | 36 | type testSuite struct { 37 | } 38 | 39 | func getMeta(store kv.Storage, c *C) *meta.Meta { 40 | startTs, err := store.CurrentVersion() 41 | c.Assert(err, IsNil) 42 | snapshot, err := store.GetSnapshot(kv.NewVersion(startTs.Ver)) 43 | c.Assert(err, IsNil) 44 | return meta.NewSnapshotMeta(snapshot) 45 | } 46 | 47 | func (*testSuite) TestT(c *C) { 48 | defer testleak.AfterTest(c)() 49 | store, err := mockstore.NewMockTikvStore() 50 | c.Assert(err, IsNil) 51 | defer store.Close() 52 | 53 | dbMeta := &model.DatabaseMeta{ 54 | DatabaseMeta: tspb.DatabaseMeta{ 55 | Id: 1, 56 | Database: "d", 57 | }, 58 | } 59 | err = kv.RunInNewTxn(store, true, func(txn kv.Transaction) error { 60 | err = meta.NewMeta(txn).CreateDatabase(dbMeta) 61 | return errors.Trace(err) 62 | }) 63 | c.Assert(err, IsNil) 64 | 65 | tbMeta := &model.TableMeta{ 66 | TableMeta: tspb.TableMeta{ 67 | Id: 1, 68 | TableName: "t", 69 | }, 70 | } 71 | err = kv.RunInNewTxn(store, true, func(txn kv.Transaction) error { 72 | err = meta.NewMeta(txn).CreateTableOrView(1, tbMeta) 73 | return errors.Trace(err) 74 | }) 75 | 76 | _, err = Bootstrap(store, nil) 77 | c.Assert(err, IsNil) 78 | do := GetDomain4Test() 79 | is := do.InfoSchema() 80 | 81 | dm, _ := is.GetDatabaseMetaByName("d") 82 | c.Assert(dm, DeepEquals, dbMeta) 83 | tm, _ := is.GetTableMetaByName("d", "t") 84 | c.Assert(tm, DeepEquals, tbMeta) 85 | } 86 | -------------------------------------------------------------------------------- /tablestore/infoschema/infoschema_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package infoschema_test 15 | 16 | import ( 17 | "testing" 18 | 19 | . "github.com/pingcap/check" 20 | "github.com/pingcap/errors" 21 | "github.com/pingcap/tidb/kv" 22 | "github.com/pingcap/tidb/store/mockstore" 23 | "github.com/pingcap/tidb/util/testleak" 24 | tspb "github.com/zhihu/zetta-proto/pkg/tablestore" 25 | "github.com/zhihu/zetta/pkg/meta" 26 | "github.com/zhihu/zetta/pkg/model" 27 | "github.com/zhihu/zetta/tablestore/infoschema" 28 | ) 29 | 30 | func TestT(t *testing.T) { 31 | CustomVerboseFlag = true 32 | TestingT(t) 33 | } 34 | 35 | var _ = Suite(&testSuite{}) 36 | 37 | type testSuite struct { 38 | } 39 | 40 | func getMeta(store kv.Storage, c *C) *meta.Meta { 41 | startTs, err := store.CurrentVersion() 42 | c.Assert(err, IsNil) 43 | snapshot, err := store.GetSnapshot(kv.NewVersion(startTs.Ver)) 44 | c.Assert(err, IsNil) 45 | return meta.NewSnapshotMeta(snapshot) 46 | } 47 | 48 | func (*testSuite) TestT(c *C) { 49 | defer testleak.AfterTest(c)() 50 | store, err := mockstore.NewMockTikvStore() 51 | c.Assert(err, IsNil) 52 | defer store.Close() 53 | 54 | dbMeta := &model.DatabaseMeta{ 55 | DatabaseMeta: tspb.DatabaseMeta{ 56 | Id: 1, 57 | Database: "d", 58 | }, 59 | } 60 | err = kv.RunInNewTxn(store, true, func(txn kv.Transaction) error { 61 | err = meta.NewMeta(txn).CreateDatabase(dbMeta) 62 | return errors.Trace(err) 63 | }) 64 | c.Assert(err, IsNil) 65 | 66 | tbMeta := &model.TableMeta{ 67 | TableMeta: tspb.TableMeta{ 68 | Id: 1, 69 | TableName: "t", 70 | Database: "d", 71 | }, 72 | } 73 | err = kv.RunInNewTxn(store, true, func(txn kv.Transaction) error { 74 | err = meta.NewMeta(txn).CreateTableOrView(1, tbMeta) 75 | return errors.Trace(err) 76 | }) 77 | 78 | m := getMeta(store, c) 79 | h := infoschema.NewHandler() 80 | err = h.InitCurrentInfoSchema(m) 81 | c.Assert(err, IsNil) 82 | 83 | is := h.Get() 84 | dm, _ := is.GetDatabaseMetaByName("d") 85 | c.Assert(dm, DeepEquals, dbMeta) 86 | tm, _ := is.GetTableMetaByName("d", "t") 87 | c.Assert(tm, DeepEquals, tbMeta) 88 | } 89 | -------------------------------------------------------------------------------- /tablestore/mysql/bexpression/aggregation/aggregation.go: -------------------------------------------------------------------------------- 1 | package aggregation 2 | 3 | import ( 4 | "bytes" 5 | 6 | "github.com/pingcap/tidb/sessionctx/stmtctx" 7 | "github.com/pingcap/tidb/types" 8 | "github.com/pingcap/tidb/util/chunk" 9 | "github.com/zhihu/zetta/tablestore/mysql/expression" 10 | ) 11 | 12 | // Aggregation stands for aggregate functions. 13 | type Aggregation interface { 14 | // Update during executing. 15 | Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error 16 | 17 | // GetPartialResult will called by coprocessor to get partial results. For avg function, partial results will return 18 | // sum and count values at the same time. 19 | GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum 20 | 21 | // GetResult will be called when all data have been processed. 22 | GetResult(evalCtx *AggEvaluateContext) types.Datum 23 | 24 | // CreateContext creates a new AggEvaluateContext for the aggregation function. 25 | CreateContext(sc *stmtctx.StatementContext) *AggEvaluateContext 26 | 27 | // ResetContext resets the content of the evaluate context. 28 | ResetContext(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext) 29 | } 30 | 31 | // AggEvaluateContext is used to store intermediate result when calculating aggregate functions. 32 | type AggEvaluateContext struct { 33 | DistinctChecker *distinctChecker 34 | Count int64 35 | Value types.Datum 36 | Buffer *bytes.Buffer // Buffer is used for group_concat. 37 | GotFirstRow bool // It will check if the agg has met the first row key. 38 | } 39 | 40 | type aggFunction struct { 41 | *AggFuncDesc 42 | } 43 | 44 | func newAggFunc(funcName string, args []expression.Expression, hasDistinct bool) aggFunction { 45 | agg := &AggFuncDesc{HasDistinct: hasDistinct} 46 | agg.Name = funcName 47 | agg.Args = args 48 | return aggFunction{AggFuncDesc: agg} 49 | } 50 | 51 | // CreateContext implements Aggregation interface. 52 | func (af *aggFunction) CreateContext(sc *stmtctx.StatementContext) *AggEvaluateContext { 53 | evalCtx := &AggEvaluateContext{} 54 | return evalCtx 55 | } 56 | 57 | func (af *aggFunction) ResetContext(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext) { 58 | evalCtx.Value.SetNull() 59 | } 60 | 61 | func (af *aggFunction) updateSum(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext, row chunk.Row) error { 62 | a := af.Args[0] 63 | value, err := a.Eval(row) 64 | if err != nil { 65 | return err 66 | } 67 | if value.IsNull() { 68 | return nil 69 | } 70 | 71 | evalCtx.Value, err = calculateSum(sc, evalCtx.Value, value) 72 | if err != nil { 73 | return err 74 | } 75 | evalCtx.Count++ 76 | return nil 77 | } 78 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/aggregation/window_func.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package aggregation 15 | 16 | import ( 17 | "strings" 18 | 19 | "github.com/pingcap/parser/ast" 20 | //"github.com/pingcap/tidb/expression" 21 | "github.com/pingcap/tidb/sessionctx" 22 | "github.com/zhihu/zetta/tablestore/mysql/expression" 23 | ) 24 | 25 | // WindowFuncDesc describes a window function signature, only used in planner. 26 | type WindowFuncDesc struct { 27 | baseFuncDesc 28 | } 29 | 30 | // NewWindowFuncDesc creates a window function signature descriptor. 31 | func NewWindowFuncDesc(ctx sessionctx.Context, name string, args []expression.Expression) (*WindowFuncDesc, error) { 32 | switch strings.ToLower(name) { 33 | case ast.WindowFuncNthValue: 34 | val, isNull, ok := expression.GetUint64FromConstant(args[1]) 35 | // nth_value does not allow `0`, but allows `null`. 36 | if !ok || (val == 0 && !isNull) { 37 | return nil, nil 38 | } 39 | case ast.WindowFuncNtile: 40 | val, isNull, ok := expression.GetUint64FromConstant(args[0]) 41 | // ntile does not allow `0`, but allows `null`. 42 | if !ok || (val == 0 && !isNull) { 43 | return nil, nil 44 | } 45 | case ast.WindowFuncLead, ast.WindowFuncLag: 46 | if len(args) < 2 { 47 | break 48 | } 49 | _, isNull, ok := expression.GetUint64FromConstant(args[1]) 50 | if !ok || isNull { 51 | return nil, nil 52 | } 53 | } 54 | base, err := newBaseFuncDesc(ctx, name, args) 55 | if err != nil { 56 | return nil, err 57 | } 58 | return &WindowFuncDesc{base}, nil 59 | } 60 | 61 | // noFrameWindowFuncs is the functions that operate on the entire partition, 62 | // they should not have frame specifications. 63 | var noFrameWindowFuncs = map[string]struct{}{ 64 | ast.WindowFuncCumeDist: {}, 65 | ast.WindowFuncDenseRank: {}, 66 | ast.WindowFuncLag: {}, 67 | ast.WindowFuncLead: {}, 68 | ast.WindowFuncNtile: {}, 69 | ast.WindowFuncPercentRank: {}, 70 | ast.WindowFuncRank: {}, 71 | ast.WindowFuncRowNumber: {}, 72 | } 73 | 74 | // NeedFrame checks if the function need frame specification. 75 | func NeedFrame(name string) bool { 76 | _, ok := noFrameWindowFuncs[strings.ToLower(name)] 77 | return !ok 78 | } 79 | -------------------------------------------------------------------------------- /tablestore/mysql/bexpression/constant.go: -------------------------------------------------------------------------------- 1 | package expression 2 | 3 | import ( 4 | "github.com/pingcap/parser/mysql" 5 | "github.com/pingcap/tidb/types" 6 | "github.com/pingcap/tidb/util/chunk" 7 | "github.com/zhihu/zetta/tablestore/mysql/sctx" 8 | ) 9 | 10 | // Constant stands for a constant value. 11 | type Constant struct { 12 | Value types.Datum 13 | RetType *types.FieldType 14 | // DeferredExpr holds deferred function in PlanCache cached plan. 15 | // it's only used to represent non-deterministic functions(see expression.DeferredFunctions) 16 | // in PlanCache cached plan, so let them can be evaluated until cached item be used. 17 | //DeferredExpr Expression 18 | // ParamMarker holds param index inside sessionVars.PreparedParams. 19 | // It's only used to reference a user variable provided in the `EXECUTE` statement or `COM_EXECUTE` binary protocol. 20 | //ParamMarker *ParamMarker 21 | hashcode []byte 22 | 23 | //collationInfo 24 | } 25 | 26 | // Clone implements Expression interface. 27 | func (c *Constant) Clone() Expression { 28 | con := *c 29 | return &con 30 | } 31 | 32 | // Equal implements Expression interface. 33 | func (c *Constant) Equal(ctx sctx.Context, b Expression) bool { 34 | y, ok := b.(*Constant) 35 | if !ok { 36 | return false 37 | } 38 | _, err1 := y.Eval(chunk.Row{}) 39 | _, err2 := c.Eval(chunk.Row{}) 40 | if err1 != nil || err2 != nil { 41 | return false 42 | } 43 | con, err := c.Value.CompareDatum(ctx.GetSessionVars().StmtCtx, &y.Value) 44 | if err != nil || con != 0 { 45 | return false 46 | } 47 | return true 48 | } 49 | 50 | // GetType implements Expression interface. 51 | func (c *Constant) GetType() *types.FieldType { 52 | return c.RetType 53 | } 54 | 55 | // Eval implements Expression interface. 56 | func (c *Constant) Eval(row chunk.Row) (types.Datum, error) { 57 | return c.Value, nil 58 | } 59 | 60 | // EvalInt returns int representation of Constant. 61 | func (c *Constant) EvalInt(ctx sctx.Context, row chunk.Row) (int64, bool, error) { 62 | dt := c.Value 63 | if c.GetType().Tp == mysql.TypeNull || dt.IsNull() { 64 | return 0, true, nil 65 | } else if dt.Kind() == types.KindBinaryLiteral { 66 | val, err := dt.GetBinaryLiteral().ToInt(ctx.GetSessionVars().StmtCtx) 67 | return int64(val), err != nil, err 68 | } else if c.GetType().Hybrid() || dt.Kind() == types.KindString { 69 | res, err := dt.ToInt64(ctx.GetSessionVars().StmtCtx) 70 | return res, false, err 71 | } 72 | return dt.GetInt64(), false, nil 73 | } 74 | 75 | // EvalString returns string representation of Constant. 76 | func (c *Constant) EvalString(ctx sctx.Context, row chunk.Row) (string, bool, error) { 77 | dt := c.Value 78 | if c.GetType().Tp == mysql.TypeNull || dt.IsNull() { 79 | return "", true, nil 80 | } 81 | res, err := dt.ToString() 82 | return res, false, err 83 | } 84 | -------------------------------------------------------------------------------- /tablestore/domain/schema_checker.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // Copyright 2018 PingCAP, Inc. 15 | // 16 | // Licensed under the Apache License, Version 2.0 (the "License"); 17 | // you may not use this file except in compliance with the License. 18 | // You may obtain a copy of the License at 19 | // 20 | // http://www.apache.org/licenses/LICENSE-2.0 21 | // 22 | // Unless required by applicable law or agreed to in writing, software 23 | // distributed under the License is distributed on an "AS IS" BASIS, 24 | // See the License for the specific language governing permissions and 25 | // limitations under the License. 26 | 27 | package domain 28 | 29 | import ( 30 | "sync/atomic" 31 | "time" 32 | 33 | "github.com/zhihu/zetta/pkg/metrics" 34 | ) 35 | 36 | // SchemaChecker is used for checking schema-validity. 37 | type SchemaChecker struct { 38 | SchemaValidator 39 | schemaVer int64 40 | relatedTableIDs []int64 41 | } 42 | 43 | var ( 44 | // SchemaOutOfDateRetryInterval is the backoff time before retrying. 45 | SchemaOutOfDateRetryInterval = int64(500 * time.Millisecond) 46 | // SchemaOutOfDateRetryTimes is the max retry count when the schema is out of date. 47 | SchemaOutOfDateRetryTimes = int32(10) 48 | ) 49 | 50 | // NewSchemaChecker creates a new schema checker. 51 | func NewSchemaChecker(do *Domain, schemaVer int64, relatedTableIDs []int64) *SchemaChecker { 52 | return &SchemaChecker{ 53 | SchemaValidator: do.SchemaValidator, 54 | schemaVer: schemaVer, 55 | relatedTableIDs: relatedTableIDs, 56 | } 57 | } 58 | 59 | // Check checks the validity of the schema version. 60 | func (s *SchemaChecker) Check(txnTS uint64) error { 61 | schemaOutOfDateRetryInterval := atomic.LoadInt64(&SchemaOutOfDateRetryInterval) 62 | schemaOutOfDateRetryTimes := int(atomic.LoadInt32(&SchemaOutOfDateRetryTimes)) 63 | for i := 0; i < schemaOutOfDateRetryTimes; i++ { 64 | result := s.SchemaValidator.Check(txnTS, s.schemaVer, s.relatedTableIDs) 65 | switch result { 66 | case ResultSucc: 67 | return nil 68 | case ResultFail: 69 | metrics.SchemaLeaseErrorCounter.WithLabelValues("changed").Inc() 70 | return ErrInfoSchemaChanged 71 | case ResultUnknown: 72 | metrics.SchemaLeaseErrorCounter.WithLabelValues("outdated").Inc() 73 | time.Sleep(time.Duration(schemaOutOfDateRetryInterval)) 74 | } 75 | 76 | } 77 | return ErrInfoSchemaExpired 78 | } 79 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/aggregation/util.go: -------------------------------------------------------------------------------- 1 | // Copyright 2018 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package aggregation 15 | 16 | import ( 17 | "github.com/pingcap/errors" 18 | "github.com/pingcap/tidb/sessionctx/stmtctx" 19 | "github.com/pingcap/tidb/types" 20 | "github.com/pingcap/tidb/util/codec" 21 | "github.com/pingcap/tidb/util/mvmap" 22 | ) 23 | 24 | // distinctChecker stores existing keys and checks if given data is distinct. 25 | type distinctChecker struct { 26 | existingKeys *mvmap.MVMap 27 | key []byte 28 | vals [][]byte 29 | sc *stmtctx.StatementContext 30 | } 31 | 32 | // createDistinctChecker creates a new distinct checker. 33 | func createDistinctChecker(sc *stmtctx.StatementContext) *distinctChecker { 34 | return &distinctChecker{ 35 | existingKeys: mvmap.NewMVMap(), 36 | sc: sc, 37 | } 38 | } 39 | 40 | // Check checks if values is distinct. 41 | func (d *distinctChecker) Check(values []types.Datum) (bool, error) { 42 | d.key = d.key[:0] 43 | var err error 44 | d.key, err = codec.EncodeValue(d.sc, d.key, values...) 45 | if err != nil { 46 | return false, err 47 | } 48 | d.vals = d.existingKeys.Get(d.key, d.vals[:0]) 49 | if len(d.vals) > 0 { 50 | return false, nil 51 | } 52 | d.existingKeys.Put(d.key, []byte{}) 53 | return true, nil 54 | } 55 | 56 | // calculateSum adds v to sum. 57 | func calculateSum(sc *stmtctx.StatementContext, sum, v types.Datum) (data types.Datum, err error) { 58 | // for avg and sum calculation 59 | // avg and sum use decimal for integer and decimal type, use float for others 60 | // see https://dev.mysql.com/doc/refman/5.7/en/group-by-functions.html 61 | 62 | switch v.Kind() { 63 | case types.KindNull: 64 | case types.KindInt64, types.KindUint64: 65 | var d *types.MyDecimal 66 | d, err = v.ToDecimal(sc) 67 | if err == nil { 68 | data = types.NewDecimalDatum(d) 69 | } 70 | case types.KindMysqlDecimal: 71 | v.Copy(&data) 72 | default: 73 | var f float64 74 | f, err = v.ToFloat64(sc) 75 | if err == nil { 76 | data = types.NewFloat64Datum(f) 77 | } 78 | } 79 | 80 | if err != nil { 81 | return data, err 82 | } 83 | if data.IsNull() { 84 | return sum, nil 85 | } 86 | switch sum.Kind() { 87 | case types.KindNull: 88 | return data, nil 89 | case types.KindFloat64, types.KindMysqlDecimal: 90 | return types.ComputePlus(sum, data) 91 | default: 92 | return data, errors.Errorf("invalid value %v for aggregate", sum.Kind()) 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /tablestore/mysql/server/column.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package server 15 | 16 | import ( 17 | "github.com/pingcap/parser/mysql" 18 | ) 19 | 20 | const maxColumnNameSize = 256 21 | 22 | // ColumnInfo contains information of a column 23 | type ColumnInfo struct { 24 | Schema string 25 | Table string 26 | OrgTable string 27 | Name string 28 | OrgName string 29 | ColumnLength uint32 30 | Charset uint16 31 | Flag uint16 32 | Decimal uint8 33 | Type uint8 34 | DefaultValueLength uint64 35 | DefaultValue []byte 36 | } 37 | 38 | // Dump dumps ColumnInfo to bytes. 39 | func (column *ColumnInfo) Dump(buffer []byte) []byte { 40 | nameDump, orgnameDump := []byte(column.Name), []byte(column.OrgName) 41 | if len(nameDump) > maxColumnNameSize { 42 | nameDump = nameDump[0:maxColumnNameSize] 43 | } 44 | if len(orgnameDump) > maxColumnNameSize { 45 | orgnameDump = orgnameDump[0:maxColumnNameSize] 46 | } 47 | buffer = dumpLengthEncodedString(buffer, []byte("def")) 48 | buffer = dumpLengthEncodedString(buffer, []byte(column.Schema)) 49 | buffer = dumpLengthEncodedString(buffer, []byte(column.Table)) 50 | buffer = dumpLengthEncodedString(buffer, []byte(column.OrgTable)) 51 | buffer = dumpLengthEncodedString(buffer, nameDump) 52 | buffer = dumpLengthEncodedString(buffer, orgnameDump) 53 | 54 | buffer = append(buffer, 0x0c) 55 | 56 | buffer = dumpUint16(buffer, column.Charset) 57 | buffer = dumpUint32(buffer, column.ColumnLength) 58 | buffer = append(buffer, dumpType(column.Type)) 59 | buffer = dumpUint16(buffer, dumpFlag(column.Type, column.Flag)) 60 | buffer = append(buffer, column.Decimal) 61 | buffer = append(buffer, 0, 0) 62 | 63 | if column.DefaultValue != nil { 64 | buffer = dumpUint64(buffer, uint64(len(column.DefaultValue))) 65 | buffer = append(buffer, column.DefaultValue...) 66 | } 67 | 68 | return buffer 69 | } 70 | 71 | func dumpFlag(tp byte, flag uint16) uint16 { 72 | switch tp { 73 | case mysql.TypeSet: 74 | return flag | uint16(mysql.SetFlag) 75 | case mysql.TypeEnum: 76 | return flag | uint16(mysql.EnumFlag) 77 | default: 78 | if mysql.HasBinaryFlag(uint(flag)) { 79 | return flag | uint16(mysql.NotNullFlag) 80 | } 81 | return flag 82 | } 83 | } 84 | 85 | func dumpType(tp byte) byte { 86 | switch tp { 87 | case mysql.TypeSet, mysql.TypeEnum: 88 | return mysql.TypeString 89 | default: 90 | return tp 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /tablestore/mysql/executor/ddl.go: -------------------------------------------------------------------------------- 1 | package executor 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/pingcap/parser/ast" 7 | "github.com/pingcap/tidb/util/chunk" 8 | "github.com/pingcap/tidb/util/mock" 9 | "github.com/zhihu/zetta/pkg/model" 10 | "github.com/zhihu/zetta/tablestore/domain" 11 | "github.com/zhihu/zetta/tablestore/infoschema" 12 | ) 13 | 14 | // DDLExec represents a DDL executor. 15 | // It grabs a DDL instance from Domain, calling the DDL methods to do the work. 16 | type DDLExec struct { 17 | baseExecutor 18 | 19 | stmt ast.StmtNode 20 | is infoschema.InfoSchema 21 | done bool 22 | } 23 | 24 | func (e *DDLExec) Next(ctx context.Context, req *chunk.Chunk) (err error) { 25 | if e.done { 26 | return nil 27 | } 28 | e.done = true 29 | // TODO: For each DDL, we should commit the previous transaction and create a new transaction. 30 | switch x := e.stmt.(type) { 31 | case *ast.CreateDatabaseStmt: 32 | err = e.executeCreateDatabase(x) 33 | case *ast.CreateTableStmt: 34 | err = e.executeCreateTable(x) 35 | case *ast.DropTableStmt: 36 | err = e.executeDropTable(x) 37 | case *ast.CreateIndexStmt: 38 | err = e.executeCreateIndex(x) 39 | case *ast.AlterTableStmt: 40 | err = e.executeAlterTable(x) 41 | } 42 | return err 43 | } 44 | 45 | //Ony support add column for now. 46 | func (e *DDLExec) executeAlterTable(s *ast.AlterTableStmt) error { 47 | tblName := s.Table.Name.L 48 | dbName := s.Table.Schema.L 49 | if dbName == "" { 50 | dbName = e.ctx.GetSessionVars().CurrentDB 51 | } 52 | colMeta := toAddCol(dbName, tblName, s.Specs[0].NewColumns[0]) 53 | return domain.GetOnlyDomain().DDL().AddColumn(mock.NewContext(), dbName, tblName, []*model.ColumnMeta{colMeta}) 54 | } 55 | 56 | func (e *DDLExec) executeCreateIndex(s *ast.CreateIndexStmt) error { 57 | idxMeta := toCreateIndex(s) 58 | currentDB := e.ctx.GetSessionVars().CurrentDB 59 | tbName := idxMeta.TableName 60 | err := domain.GetOnlyDomain().DDL().CreateIndex(mock.NewContext(), currentDB, tbName, idxMeta, s.IfNotExists) 61 | return err 62 | } 63 | 64 | func (e *DDLExec) executeCreateDatabase(s *ast.CreateDatabaseStmt) error { 65 | dbMeta := toCreateDatabase(s) 66 | err := domain.GetOnlyDomain().DDL().CreateSchema(mock.NewContext(), dbMeta) 67 | return err 68 | } 69 | 70 | func (e *DDLExec) executeCreateTable(s *ast.CreateTableStmt) error { 71 | currentDB := e.ctx.GetSessionVars().CurrentDB 72 | tbMeta := toCreateTable(s) 73 | tbMeta.Database = currentDB 74 | if len(tbMeta.Rules) != 0 { 75 | tbMeta.Rules[0].GroupID = "pd" 76 | tbMeta.Rules[0].ID = currentDB + ":" + tbMeta.TableName 77 | } 78 | ctx := mock.NewContext() 79 | err := domain.GetOnlyDomain().DDL().CreateTable(ctx, tbMeta, s.IfNotExists) 80 | return err 81 | } 82 | 83 | func (e *DDLExec) executeDropTable(s *ast.DropTableStmt) error { 84 | currentDB := e.ctx.GetSessionVars().CurrentDB 85 | for _, table := range s.Tables { 86 | err := domain.GetOnlyDomain().DDL().DropTable(mock.NewContext(), currentDB, table.Name.L, s.IfExists) 87 | if err != nil { 88 | return err 89 | } 90 | } 91 | return nil 92 | } 93 | -------------------------------------------------------------------------------- /tablestore/ddl/callback.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // Copyright 2015 PingCAP, Inc. 15 | // 16 | // Licensed under the Apache License, Version 2.0 (the "License"); 17 | // you may not use this file except in compliance with the License. 18 | // You may obtain a copy of the License at 19 | // 20 | // http://www.apache.org/licenses/LICENSE-2.0 21 | // 22 | // Unless required by applicable law or agreed to in writing, software 23 | // distributed under the License is distributed on an "AS IS" BASIS, 24 | // See the License for the specific language governing permissions and 25 | // limitations under the License. 26 | 27 | package ddl 28 | 29 | import ( 30 | "context" 31 | 32 | "github.com/pingcap/tidb/sessionctx" 33 | "github.com/zhihu/zetta/pkg/model" 34 | "github.com/zhihu/zetta/tablestore/infoschema" 35 | ) 36 | 37 | // Interceptor is used for DDL. 38 | type Interceptor interface { 39 | // OnGetInfoSchema is an intercept which is called in the function ddl.GetInfoSchema(). It is used in the tests. 40 | OnGetInfoSchema(ctx sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema 41 | } 42 | 43 | // BaseInterceptor implements Interceptor. 44 | type BaseInterceptor struct{} 45 | 46 | // OnGetInfoSchema implements Interceptor.OnGetInfoSchema interface. 47 | func (bi *BaseInterceptor) OnGetInfoSchema(ctx sessionctx.Context, is infoschema.InfoSchema) infoschema.InfoSchema { 48 | return is 49 | } 50 | 51 | // Callback is used for DDL. 52 | type Callback interface { 53 | // OnChanged is called after schema is changed. 54 | OnChanged(err error) error 55 | // OnJobRunBefore is called before running job. 56 | OnJobRunBefore(job *model.Job) 57 | // OnJobUpdated is called after the running job is updated. 58 | OnJobUpdated(job *model.Job) 59 | // OnWatched is called after watching owner is completed. 60 | OnWatched(ctx context.Context) 61 | } 62 | 63 | // BaseCallback implements Callback.OnChanged interface. 64 | type BaseCallback struct { 65 | } 66 | 67 | // OnChanged implements Callback interface. 68 | func (c *BaseCallback) OnChanged(err error) error { 69 | return err 70 | } 71 | 72 | // OnJobRunBefore implements Callback.OnJobRunBefore interface. 73 | func (c *BaseCallback) OnJobRunBefore(job *model.Job) { 74 | // Nothing to do. 75 | } 76 | 77 | // OnJobUpdated implements Callback.OnJobUpdated interface. 78 | func (c *BaseCallback) OnJobUpdated(job *model.Job) { 79 | // Nothing to do. 80 | } 81 | 82 | // OnWatched implements Callback.OnWatched interface. 83 | func (c *BaseCallback) OnWatched(ctx context.Context) { 84 | // Nothing to do. 85 | } 86 | -------------------------------------------------------------------------------- /tablestore/domain/schema_checker_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // Copyright 2018 PingCAP, Inc. 15 | // 16 | // Licensed under the Apache License, Version 2.0 (the "License"); 17 | // you may not use this file except in compliance with the License. 18 | // You may obtain a copy of the License at 19 | // 20 | // http://www.apache.org/licenses/LICENSE-2.0 21 | // 22 | // Unless required by applicable law or agreed to in writing, software 23 | // distributed under the License is distributed on an "AS IS" BASIS, 24 | // See the License for the specific language governing permissions and 25 | // limitations under the License. 26 | 27 | package domain 28 | 29 | import ( 30 | "time" 31 | 32 | . "github.com/pingcap/check" 33 | "github.com/pingcap/parser/terror" 34 | ) 35 | 36 | func (s *testSuite) TestSchemaCheckerSimple(c *C) { 37 | lease := 5 * time.Millisecond 38 | validator := NewSchemaValidator(lease) 39 | checker := &SchemaChecker{SchemaValidator: validator} 40 | 41 | // Add some schema versions and delta table IDs. 42 | ts := uint64(time.Now().UnixNano()) 43 | validator.Update(ts, 0, 2, []int64{1}) 44 | validator.Update(ts, 2, 4, []int64{2}) 45 | 46 | // checker's schema version is the same as the current schema version. 47 | checker.schemaVer = 4 48 | err := checker.Check(ts) 49 | c.Assert(err, IsNil) 50 | 51 | // checker's schema version is less than the current schema version, and it doesn't exist in validator's items. 52 | // checker's related table ID isn't in validator's changed table IDs. 53 | checker.schemaVer = 2 54 | checker.relatedTableIDs = []int64{3} 55 | err = checker.Check(ts) 56 | c.Assert(err, IsNil) 57 | // The checker's schema version isn't in validator's items. 58 | checker.schemaVer = 1 59 | checker.relatedTableIDs = []int64{3} 60 | err = checker.Check(ts) 61 | c.Assert(terror.ErrorEqual(err, ErrInfoSchemaChanged), IsTrue) 62 | // checker's related table ID is in validator's changed table IDs. 63 | checker.relatedTableIDs = []int64{2} 64 | err = checker.Check(ts) 65 | c.Assert(terror.ErrorEqual(err, ErrInfoSchemaChanged), IsTrue) 66 | 67 | // validator's latest schema version is expired. 68 | time.Sleep(lease + time.Microsecond) 69 | checker.schemaVer = 4 70 | checker.relatedTableIDs = []int64{3} 71 | err = checker.Check(ts) 72 | c.Assert(err, IsNil) 73 | nowTS := uint64(time.Now().UnixNano()) 74 | // Use checker.SchemaValidator.Check instead of checker.Check here because backoff make CI slow. 75 | result := checker.SchemaValidator.Check(nowTS, checker.schemaVer, checker.relatedTableIDs) 76 | c.Assert(result, Equals, ResultUnknown) 77 | } 78 | -------------------------------------------------------------------------------- /pkg/model/flags.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // Copyright 2018 PingCAP, Inc. 15 | // 16 | // Licensed under the Apache License, Version 2.0 (the "License"); 17 | // you may not use this file except in compliance with the License. 18 | // You may obtain a copy of the License at 19 | // 20 | // http://www.apache.org/licenses/LICENSE-2.0 21 | // 22 | // Unless required by applicable law or agreed to in writing, software 23 | // distributed under the License is distributed on an "AS IS" BASIS, 24 | // See the License for the specific language governing permissions and 25 | // limitations under the License. 26 | 27 | package model 28 | 29 | // Flags are used by tipb.SelectRequest.Flags to handle execution mode, like how to handle truncate error. 30 | const ( 31 | // FlagIgnoreTruncate indicates if truncate error should be ignored. 32 | // Read-only statements should ignore truncate error, write statements should not ignore truncate error. 33 | FlagIgnoreTruncate uint64 = 1 34 | // FlagTruncateAsWarning indicates if truncate error should be returned as warning. 35 | // This flag only matters if FlagIgnoreTruncate is not set, in strict sql mode, truncate error should 36 | // be returned as error, in non-strict sql mode, truncate error should be saved as warning. 37 | FlagTruncateAsWarning = 1 << 1 38 | // FlagPadCharToFullLength indicates if sql_mode 'PAD_CHAR_TO_FULL_LENGTH' is set. 39 | FlagPadCharToFullLength = 1 << 2 40 | // FlagInInsertStmt indicates if this is a INSERT statement. 41 | FlagInInsertStmt = 1 << 3 42 | // FlagInUpdateOrDeleteStmt indicates if this is a UPDATE statement or a DELETE statement. 43 | FlagInUpdateOrDeleteStmt = 1 << 4 44 | // FlagInSelectStmt indicates if this is a SELECT statement. 45 | FlagInSelectStmt = 1 << 5 46 | // FlagOverflowAsWarning indicates if overflow error should be returned as warning. 47 | // In strict sql mode, overflow error should be returned as error, 48 | // in non-strict sql mode, overflow error should be saved as warning. 49 | FlagOverflowAsWarning = 1 << 6 50 | // FlagIgnoreZeroInDate indicates if ZeroInDate error should be ignored. 51 | // Read-only statements should ignore ZeroInDate error. 52 | // Write statements should not ignore ZeroInDate error in strict sql mode. 53 | FlagIgnoreZeroInDate = 1 << 7 54 | // FlagDividedByZeroAsWarning indicates if DividedByZero should be returned as warning. 55 | FlagDividedByZeroAsWarning = 1 << 8 56 | // FlagInUnionStmt indicates if this is a UNION statement. 57 | FlagInUnionStmt = 1 << 9 58 | // FlagInLoadDataStmt indicates if this is a LOAD DATA statement. 59 | FlagInLoadDataStmt = 1 << 10 60 | ) 61 | -------------------------------------------------------------------------------- /tablestore/mysql/executor/aggfuncs/aggfuncs.go: -------------------------------------------------------------------------------- 1 | package aggfuncs 2 | 3 | import ( 4 | "unsafe" 5 | 6 | "github.com/pingcap/tidb/util/chunk" 7 | "github.com/zhihu/zetta/tablestore/mysql/expression" 8 | "github.com/zhihu/zetta/tablestore/mysql/sctx" 9 | ) 10 | 11 | // PartialResult represents data structure to store the partial result for the 12 | // aggregate functions. Here we use unsafe.Pointer to allow the partial result 13 | // to be any type. 14 | type PartialResult unsafe.Pointer 15 | 16 | // AggFunc is the interface to evaluate the aggregate functions. 17 | type AggFunc interface { 18 | // AllocPartialResult allocates a specific data structure to store the 19 | // partial result, initializes it, and converts it to PartialResult to 20 | // return back. Aggregate operator implementation, no matter it's a hash 21 | // or stream, should hold this allocated PartialResult for the further 22 | // operations like: "ResetPartialResult", "UpdatePartialResult". 23 | AllocPartialResult() PartialResult 24 | 25 | // ResetPartialResult resets the partial result to the original state for a 26 | // specific aggregate function. It converts the input PartialResult to the 27 | // specific data structure which stores the partial result and then reset 28 | // every field to the proper original state. 29 | ResetPartialResult(pr PartialResult) 30 | 31 | // UpdatePartialResult updates the specific partial result for an aggregate 32 | // function using the input rows which all belonging to the same data group. 33 | // It converts the PartialResult to the specific data structure which stores 34 | // the partial result and then iterates on the input rows and update that 35 | // partial result according to the functionality and the state of the 36 | // aggregate function. 37 | UpdatePartialResult(sctx sctx.Context, rowsInGroup []chunk.Row, pr PartialResult) error 38 | 39 | // MergePartialResult will be called in the final phase when parallelly 40 | // executing. It converts the PartialResult `src`, `dst` to the same specific 41 | // data structure which stores the partial results, and then evaluate the 42 | // final result using the partial results as input values. 43 | MergePartialResult(sctx sctx.Context, src, dst PartialResult) error 44 | 45 | // AppendFinalResult2Chunk finalizes the partial result and append the 46 | // final result to the input chunk. Like other operations, it converts the 47 | // input PartialResult to the specific data structure which stores the 48 | // partial result and then calculates the final result and append that 49 | // final result to the chunk provided. 50 | AppendFinalResult2Chunk(sctx sctx.Context, pr PartialResult, chk *chunk.Chunk) error 51 | } 52 | 53 | type baseAggFunc struct { 54 | // args stores the input arguments for an aggregate function, we should 55 | // call arg.EvalXXX to get the actual input data for this function. 56 | args []expression.Expression 57 | 58 | // ordinal stores the ordinal of the columns in the output chunk, which is 59 | // used to append the final result of this function. 60 | ordinal int 61 | } 62 | 63 | func (*baseAggFunc) MergePartialResult(sctx sctx.Context, src, dst PartialResult) error { 64 | return nil 65 | } 66 | -------------------------------------------------------------------------------- /tablestore/server/driver.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package server 15 | 16 | import ( 17 | "context" 18 | "fmt" 19 | 20 | "github.com/pingcap/tidb/util/chunk" 21 | "github.com/zhihu/zetta/tablestore/session" 22 | ) 23 | 24 | // IDriver opens IContext. 25 | type IDriver interface { 26 | // OpenCtx opens an IContext with connection id, client capability, collation, dbname and optionally the tls state. 27 | // OpenCtx(connID uint64, capability uint32, collation uint8, dbname string, tlsState *tls.ConnectionState) (QueryCtx, error) 28 | OpenCtx(connID uint64, dbname string) (QueryCtx, error) 29 | } 30 | 31 | // QueryCtx is the interface to execute command. 32 | type QueryCtx interface { 33 | // CurrentDB returns current DB. 34 | CurrentDB() string 35 | 36 | GetSession() session.Session 37 | 38 | // Execute(ctx context.Context, query interface{}) ([]ResultSet, error) 39 | 40 | // SetValue saves a value associated with this context for key. 41 | SetValue(key fmt.Stringer, value interface{}) 42 | 43 | // Close closes the QueryCtx. 44 | Close() error 45 | } 46 | 47 | // PreparedStatement is the interface to use a prepared statement. 48 | type PreparedStatement interface { 49 | // ID returns statement ID 50 | ID() int 51 | 52 | // Execute executes the statement. 53 | Execute(context.Context, ...interface{}) (ResultSet, error) 54 | 55 | // AppendParam appends parameter to the statement. 56 | AppendParam(paramID int, data []byte) error 57 | 58 | // NumParams returns number of parameters. 59 | NumParams() int 60 | 61 | // BoundParams returns bound parameters. 62 | BoundParams() [][]byte 63 | 64 | // SetParamsType sets type for parameters. 65 | SetParamsType([]byte) 66 | 67 | // GetParamsType returns the type for parameters. 68 | GetParamsType() []byte 69 | 70 | // StoreResultSet stores ResultSet for subsequent stmt fetching 71 | StoreResultSet(rs ResultSet) 72 | 73 | // GetResultSet gets ResultSet associated this statement 74 | GetResultSet() ResultSet 75 | 76 | // Reset removes all bound parameters. 77 | Reset() 78 | 79 | // Close closes the statement. 80 | Close() error 81 | } 82 | 83 | // ResultSet is the result set of an query. 84 | type ResultSet interface { 85 | // Columns() []*ColumnInfo 86 | NewChunk() *chunk.Chunk 87 | Next(context.Context, *chunk.Chunk) error 88 | StoreFetchedRows(rows []chunk.Row) 89 | GetFetchedRows() []chunk.Row 90 | Close() error 91 | } 92 | 93 | // fetchNotifier represents notifier will be called in COM_FETCH. 94 | type fetchNotifier interface { 95 | // OnFetchReturned be called when COM_FETCH returns. 96 | // it will be used in server-side cursor. 97 | OnFetchReturned() 98 | } 99 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/aggregation/avg.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package aggregation 15 | 16 | import ( 17 | "github.com/cznic/mathutil" 18 | "github.com/pingcap/parser/mysql" 19 | "github.com/pingcap/parser/terror" 20 | "github.com/pingcap/tidb/sessionctx/stmtctx" 21 | "github.com/pingcap/tidb/types" 22 | "github.com/pingcap/tidb/util/chunk" 23 | ) 24 | 25 | type avgFunction struct { 26 | aggFunction 27 | } 28 | 29 | func (af *avgFunction) updateAvg(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext, row chunk.Row) error { 30 | a := af.Args[1] 31 | value, err := a.Eval(row) 32 | if err != nil { 33 | return err 34 | } 35 | if value.IsNull() { 36 | return nil 37 | } 38 | evalCtx.Value, err = calculateSum(sc, evalCtx.Value, value) 39 | if err != nil { 40 | return err 41 | } 42 | count, err := af.Args[0].Eval(row) 43 | if err != nil { 44 | return err 45 | } 46 | evalCtx.Count += count.GetInt64() 47 | return nil 48 | } 49 | 50 | func (af *avgFunction) ResetContext(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext) { 51 | if af.HasDistinct { 52 | evalCtx.DistinctChecker = createDistinctChecker(sc) 53 | } 54 | evalCtx.Value.SetNull() 55 | evalCtx.Count = 0 56 | } 57 | 58 | // Update implements Aggregation interface. 59 | func (af *avgFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) (err error) { 60 | switch af.Mode { 61 | case Partial1Mode, CompleteMode: 62 | err = af.updateSum(sc, evalCtx, row) 63 | case Partial2Mode, FinalMode: 64 | err = af.updateAvg(sc, evalCtx, row) 65 | case DedupMode: 66 | panic("DedupMode is not supported now.") 67 | } 68 | return err 69 | } 70 | 71 | // GetResult implements Aggregation interface. 72 | func (af *avgFunction) GetResult(evalCtx *AggEvaluateContext) (d types.Datum) { 73 | switch evalCtx.Value.Kind() { 74 | case types.KindFloat64: 75 | sum := evalCtx.Value.GetFloat64() 76 | d.SetFloat64(sum / float64(evalCtx.Count)) 77 | return 78 | case types.KindMysqlDecimal: 79 | x := evalCtx.Value.GetMysqlDecimal() 80 | y := types.NewDecFromInt(evalCtx.Count) 81 | to := new(types.MyDecimal) 82 | err := types.DecimalDiv(x, y, to, types.DivFracIncr) 83 | terror.Log(err) 84 | frac := af.RetTp.Decimal 85 | if frac == -1 { 86 | frac = mysql.MaxDecimalScale 87 | } 88 | err = to.Round(to, mathutil.Min(frac, mysql.MaxDecimalScale), types.ModeHalfEven) 89 | terror.Log(err) 90 | d.SetMysqlDecimal(to) 91 | } 92 | return 93 | } 94 | 95 | // GetPartialResult implements Aggregation interface. 96 | func (af *avgFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { 97 | return []types.Datum{types.NewIntDatum(evalCtx.Count), evalCtx.Value} 98 | } 99 | -------------------------------------------------------------------------------- /pkg/metrics/tables.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package metrics 15 | 16 | import "github.com/prometheus/client_golang/prometheus" 17 | 18 | var ( 19 | FetchRowsCounter = prometheus.NewCounterVec( 20 | prometheus.CounterOpts{ 21 | Namespace: "zetta", 22 | Subsystem: "tables", 23 | Name: "fetch_rows_total", 24 | Help: "Counter of fetchRows.", 25 | }, []string{LblType}) 26 | 27 | FetchSparseCounter = prometheus.NewCounterVec( 28 | prometheus.CounterOpts{ 29 | Namespace: "zetta", 30 | Subsystem: "tables", 31 | Name: "fetch_sparse_total", 32 | Help: "Counter of fetchSparse.", 33 | }, []string{LblType}) 34 | 35 | FetchRowsDuration = prometheus.NewHistogramVec( 36 | prometheus.HistogramOpts{ 37 | Namespace: "zetta", 38 | Subsystem: "tables", 39 | Name: "fetch_rows_duration_seconds", 40 | Help: "Bucketed histogram of processing time (s) in running table read-store.", 41 | Buckets: prometheus.ExponentialBuckets(0.0001, 2, 22), // 100us ~ 419s 42 | }, []string{LblType}) 43 | 44 | FetchSparseDuration = prometheus.NewHistogramVec( 45 | prometheus.HistogramOpts{ 46 | Namespace: "zetta", 47 | Subsystem: "tables", 48 | Name: "fetch_sparse_duration_seconds", 49 | Help: "Bucketed histogram of processing time (s) in running mutate executor.", 50 | Buckets: prometheus.ExponentialBuckets(0.0001, 2, 22), // 100us ~ 419s 51 | }, []string{LblType}) 52 | 53 | BatchSparseCounter = prometheus.NewCounterVec( 54 | prometheus.CounterOpts{ 55 | Namespace: "zetta", 56 | Subsystem: "tables", 57 | Name: "batch_sparse_total", 58 | Help: "Counter of batchSparse.", 59 | }, []string{LblType}) 60 | 61 | ScanSparseCounter = prometheus.NewCounterVec( 62 | prometheus.CounterOpts{ 63 | Namespace: "zetta", 64 | Subsystem: "tables", 65 | Name: "scan_sparse_total", 66 | Help: "Counter of scanSparse.", 67 | }, []string{LblType}) 68 | 69 | BatchSparseDuration = prometheus.NewHistogramVec( 70 | prometheus.HistogramOpts{ 71 | Namespace: "zetta", 72 | Subsystem: "tables", 73 | Name: "batch_sparse_duration_seconds", 74 | Help: "Bucketed histogram of processing time (s) in running mutate executor.", 75 | Buckets: prometheus.ExponentialBuckets(0.0001, 2, 22), // 100us ~ 419s 76 | }, []string{LblType}) 77 | 78 | ScanSparseDuration = prometheus.NewHistogramVec( 79 | prometheus.HistogramOpts{ 80 | Namespace: "zetta", 81 | Subsystem: "tables", 82 | Name: "scan_sparse_duration_seconds", 83 | Help: "Bucketed histogram of processing time (s) in running mutate executor.", 84 | Buckets: prometheus.ExponentialBuckets(0.0001, 2, 22), // 100us ~ 419s 85 | }, []string{LblType}) 86 | ) 87 | -------------------------------------------------------------------------------- /pkg/metrics/grpc.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import "github.com/prometheus/client_golang/prometheus" 4 | 5 | var ( 6 | QueryTotalCounter = prometheus.NewCounterVec( 7 | prometheus.CounterOpts{ 8 | Namespace: "zetta", 9 | Subsystem: "grpc", 10 | Name: "query_total", 11 | Help: "Counter of queries.", 12 | }, []string{LblType, LblResult}) 13 | 14 | CreateSessionCounter = prometheus.NewCounterVec( 15 | prometheus.CounterOpts{ 16 | Namespace: "zetta", 17 | Subsystem: "grpc", 18 | Name: "create_session_total", 19 | Help: "Counter of create session api.", 20 | }, []string{LblGRPCType, LblType}) 21 | 22 | DeleteSessionCounter = prometheus.NewCounterVec( 23 | prometheus.CounterOpts{ 24 | Namespace: "zetta", 25 | Subsystem: "grpc", 26 | Name: "delete_session_total", 27 | Help: "Counter of delete session api.", 28 | }, []string{LblGRPCType, LblType}) 29 | 30 | ExecuteErrorCounter = prometheus.NewCounterVec( 31 | prometheus.CounterOpts{ 32 | Namespace: "zetta", 33 | Subsystem: "grpc", 34 | Name: "execute_error_total", 35 | Help: "Counter of execute errors.", 36 | }, []string{LblGRPCType, LblType}) 37 | 38 | ReadCounter = prometheus.NewCounterVec( 39 | prometheus.CounterOpts{ 40 | Namespace: "zetta", 41 | Subsystem: "grpc", 42 | Name: "read_op_total", 43 | Help: "Counter of read api.", 44 | }, []string{LblGRPCType, LblType}) 45 | 46 | SparseReadCounter = prometheus.NewCounterVec( 47 | prometheus.CounterOpts{ 48 | Namespace: "zetta", 49 | Subsystem: "grpc", 50 | Name: "sparse_read_op_total", 51 | Help: "Counter of sparse-read api.", 52 | }, []string{LblGRPCType, LblType}) 53 | 54 | StreamReadCounter = prometheus.NewCounterVec( 55 | prometheus.CounterOpts{ 56 | Namespace: "zetta", 57 | Subsystem: "grpc", 58 | Name: "stream_read_op_total", 59 | Help: "Counter of stream read api.", 60 | }, []string{LblGRPCType, LblType}) 61 | 62 | MutateCounter = prometheus.NewCounterVec( 63 | prometheus.CounterOpts{ 64 | Namespace: "zetta", 65 | Subsystem: "grpc", 66 | Name: "mutate_op_total", 67 | Help: "Counter of mutate api.", 68 | }, []string{LblGRPCType, LblType}) 69 | 70 | CommitCounter = prometheus.NewCounterVec( 71 | prometheus.CounterOpts{ 72 | Namespace: "zetta", 73 | Subsystem: "grpc", 74 | Name: "commit_op_total", 75 | Help: "Counter of commit api errors.", 76 | }, []string{LblGRPCType, LblType}) 77 | 78 | ExecuteReadDuration = prometheus.NewHistogramVec( 79 | prometheus.HistogramOpts{ 80 | Namespace: "zetta", 81 | Subsystem: "grpc", 82 | Name: "execute_read_duration_seconds", 83 | Help: "Bucketed histogram of processing time (s) in running read executor.", 84 | Buckets: prometheus.ExponentialBuckets(0.0001, 2, 22), // 100us ~ 419s 85 | }, []string{LblGRPCType, LblType}) 86 | 87 | ExecuteMutateDuration = prometheus.NewHistogramVec( 88 | prometheus.HistogramOpts{ 89 | Namespace: "zetta", 90 | Subsystem: "grpc", 91 | Name: "execute_mutate_duration_seconds", 92 | Help: "Bucketed histogram of processing time (s) in running mutate executor.", 93 | Buckets: prometheus.ExponentialBuckets(0.0001, 2, 22), // 100us ~ 419s 94 | }, []string{LblGRPCType, LblType}) 95 | ) 96 | -------------------------------------------------------------------------------- /pkg/codec/decimal_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | // Copyright 2015 PingCAP, Inc. 15 | // 16 | // Licensed under the Apache License, Version 2.0 (the "License"); 17 | // you may not use this file except in compliance with the License. 18 | // You may obtain a copy of the License at 19 | // 20 | // http://www.apache.org/licenses/LICENSE-2.0 21 | // 22 | // Unless required by applicable law or agreed to in writing, software 23 | // distributed under the License is distributed on an "AS IS" BASIS, 24 | // See the License for the specific language governing permissions and 25 | // limitations under the License. 26 | 27 | package codec 28 | 29 | import ( 30 | . "github.com/pingcap/check" 31 | "github.com/pingcap/tidb/types" 32 | "github.com/pingcap/tidb/util/testleak" 33 | ) 34 | 35 | var _ = Suite(&testDecimalSuite{}) 36 | 37 | type testDecimalSuite struct { 38 | } 39 | 40 | func (s *testDecimalSuite) TestDecimalCodec(c *C) { 41 | defer testleak.AfterTest(c)() 42 | inputs := []struct { 43 | Input float64 44 | }{ 45 | {float64(123400)}, 46 | {float64(1234)}, 47 | {float64(12.34)}, 48 | {float64(0.1234)}, 49 | {float64(0.01234)}, 50 | {float64(-0.1234)}, 51 | {float64(-0.01234)}, 52 | {float64(12.3400)}, 53 | {float64(-12.34)}, 54 | {float64(0.00000)}, 55 | {float64(0)}, 56 | {float64(-0.0)}, 57 | {float64(-0.000)}, 58 | } 59 | 60 | for _, input := range inputs { 61 | v := types.NewDecFromFloatForTest(input.Input) 62 | datum := types.NewDatum(v) 63 | 64 | b, err := EncodeDecimal([]byte{}, datum.GetMysqlDecimal(), datum.Length(), datum.Frac()) 65 | c.Assert(err, IsNil) 66 | _, d, prec, frac, err := DecodeDecimal(b) 67 | if datum.Length() != 0 { 68 | c.Assert(prec, Equals, datum.Length()) 69 | c.Assert(frac, Equals, datum.Frac()) 70 | } else { 71 | prec1, frac1 := datum.GetMysqlDecimal().PrecisionAndFrac() 72 | c.Assert(prec, Equals, prec1) 73 | c.Assert(frac, Equals, frac1) 74 | } 75 | c.Assert(err, IsNil) 76 | c.Assert(v.Compare(d), Equals, 0) 77 | } 78 | } 79 | 80 | func (s *testDecimalSuite) TestFrac(c *C) { 81 | defer testleak.AfterTest(c)() 82 | inputs := []struct { 83 | Input *types.MyDecimal 84 | }{ 85 | {types.NewDecFromInt(3)}, 86 | {types.NewDecFromFloatForTest(0.03)}, 87 | } 88 | for _, v := range inputs { 89 | testFrac(c, v.Input) 90 | } 91 | } 92 | 93 | func testFrac(c *C, v *types.MyDecimal) { 94 | var d1 types.Datum 95 | d1.SetMysqlDecimal(v) 96 | 97 | b, err := EncodeDecimal([]byte{}, d1.GetMysqlDecimal(), d1.Length(), d1.Frac()) 98 | c.Assert(err, IsNil) 99 | _, dec, _, _, err := DecodeDecimal(b) 100 | c.Assert(err, IsNil) 101 | c.Assert(dec.String(), Equals, v.String()) 102 | } 103 | -------------------------------------------------------------------------------- /tablestore/mysql/bexpression/column.go: -------------------------------------------------------------------------------- 1 | package expression 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/pingcap/tidb/types" 8 | "github.com/pingcap/tidb/util/chunk" 9 | "github.com/zhihu/zetta/pkg/model" 10 | "github.com/zhihu/zetta/tablestore/mysql/sctx" 11 | ) 12 | 13 | // Column represents a column. 14 | type Column struct { 15 | RetType *types.FieldType 16 | // ID is used to specify whether this column is ExtraHandleColumn or to access histogram. 17 | // We'll try to remove it in the future. 18 | ID int64 19 | // UniqueID is the unique id of this column. 20 | UniqueID int64 21 | 22 | // Index is used for execution, to tell the column's position in the given row. 23 | Index int 24 | 25 | hashcode []byte 26 | 27 | // VirtualExpr is used to save expression for virtual column 28 | //VirtualExpr Expression 29 | 30 | OrigName string 31 | IsHidden bool 32 | 33 | // InOperand indicates whether this column is the inner operand of column equal condition converted 34 | // from `[not] in (subq)`. 35 | //InOperand bool 36 | 37 | //collationInfo 38 | Family string 39 | } 40 | 41 | const columnPrefix = "Column#" 42 | 43 | // String implements Stringer interface. 44 | func (col *Column) String() string { 45 | if col.OrigName != "" { 46 | return col.OrigName 47 | } 48 | var builder strings.Builder 49 | fmt.Fprintf(&builder, "%s%d", columnPrefix, col.UniqueID) 50 | return builder.String() 51 | } 52 | 53 | // GetType implements Expression interface. 54 | func (col *Column) GetType() *types.FieldType { 55 | return col.RetType 56 | } 57 | 58 | // Eval implements Expression interface. 59 | func (col *Column) Eval(row chunk.Row) (types.Datum, error) { 60 | return row.GetDatum(col.Index, col.RetType), nil 61 | } 62 | 63 | // EvalInt returns int representation of Column. 64 | func (col *Column) EvalInt(ctx sctx.Context, row chunk.Row) (int64, bool, error) { 65 | if col.GetType().Hybrid() { 66 | val := row.GetDatum(col.Index, col.RetType) 67 | if val.IsNull() { 68 | return 0, true, nil 69 | } 70 | if val.Kind() == types.KindMysqlBit { 71 | val, err := val.GetBinaryLiteral().ToInt(ctx.GetSessionVars().StmtCtx) 72 | return int64(val), err != nil, err 73 | } 74 | res, err := val.ToInt64(ctx.GetSessionVars().StmtCtx) 75 | return res, err != nil, err 76 | } 77 | if row.IsNull(col.Index) { 78 | return 0, true, nil 79 | } 80 | return row.GetInt64(col.Index), false, nil 81 | } 82 | 83 | // EvalString returns string representation of Column. 84 | func (col *Column) EvalString(ctx sctx.Context, row chunk.Row) (string, bool, error) { 85 | if row.IsNull(col.Index) { 86 | return "", true, nil 87 | } 88 | 89 | // Specially handle the ENUM/SET/BIT input value. 90 | if col.GetType().Hybrid() { 91 | val := row.GetDatum(col.Index, col.RetType) 92 | res, err := val.ToString() 93 | return res, err != nil, err 94 | } 95 | 96 | val := row.GetString(col.Index) 97 | return val, false, nil 98 | } 99 | 100 | // Equal implements Expression interface. 101 | func (col *Column) Equal(_ sctx.Context, expr Expression) bool { 102 | if newCol, ok := expr.(*Column); ok { 103 | return newCol.UniqueID == col.UniqueID 104 | } 105 | return false 106 | } 107 | 108 | // Clone implements Expression interface. 109 | func (col *Column) Clone() Expression { 110 | newCol := *col 111 | return &newCol 112 | } 113 | 114 | func (col *Column) ToColumnMeta() *model.ColumnMeta { 115 | cm := &model.ColumnMeta{} 116 | cm.ColumnMeta.Name = col.OrigName 117 | cm.FieldType = *col.RetType 118 | cm.Family = col.Family 119 | return cm 120 | } 121 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/vectorized.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package expression 15 | 16 | import ( 17 | "github.com/pingcap/errors" 18 | "github.com/pingcap/tidb/sessionctx" 19 | "github.com/pingcap/tidb/types" 20 | "github.com/pingcap/tidb/util/chunk" 21 | ) 22 | 23 | func genVecFromConstExpr(ctx sessionctx.Context, expr Expression, targetType types.EvalType, input *chunk.Chunk, result *chunk.Column) error { 24 | n := 1 25 | if input != nil { 26 | n = input.NumRows() 27 | if n == 0 { 28 | result.Reset(targetType) 29 | return nil 30 | } 31 | } 32 | switch targetType { 33 | case types.ETInt: 34 | v, isNull, err := expr.EvalInt(ctx, chunk.Row{}) 35 | if err != nil { 36 | return err 37 | } 38 | if isNull { 39 | result.ResizeInt64(n, true) 40 | return nil 41 | } 42 | result.ResizeInt64(n, false) 43 | i64s := result.Int64s() 44 | for i := range i64s { 45 | i64s[i] = v 46 | } 47 | case types.ETReal: 48 | v, isNull, err := expr.EvalReal(ctx, chunk.Row{}) 49 | if err != nil { 50 | return err 51 | } 52 | if isNull { 53 | result.ResizeFloat64(n, true) 54 | return nil 55 | } 56 | result.ResizeFloat64(n, false) 57 | f64s := result.Float64s() 58 | for i := range f64s { 59 | f64s[i] = v 60 | } 61 | case types.ETDecimal: 62 | v, isNull, err := expr.EvalDecimal(ctx, chunk.Row{}) 63 | if err != nil { 64 | return err 65 | } 66 | if isNull { 67 | result.ResizeDecimal(n, true) 68 | return nil 69 | } 70 | result.ResizeDecimal(n, false) 71 | ds := result.Decimals() 72 | for i := range ds { 73 | ds[i] = *v 74 | } 75 | case types.ETDatetime, types.ETTimestamp: 76 | v, isNull, err := expr.EvalTime(ctx, chunk.Row{}) 77 | if err != nil { 78 | return err 79 | } 80 | if isNull { 81 | result.ResizeTime(n, true) 82 | return nil 83 | } 84 | result.ResizeTime(n, false) 85 | ts := result.Times() 86 | for i := range ts { 87 | ts[i] = v 88 | } 89 | case types.ETDuration: 90 | v, isNull, err := expr.EvalDuration(ctx, chunk.Row{}) 91 | if err != nil { 92 | return err 93 | } 94 | if isNull { 95 | result.ResizeGoDuration(n, true) 96 | return nil 97 | } 98 | result.ResizeGoDuration(n, false) 99 | ds := result.GoDurations() 100 | for i := range ds { 101 | ds[i] = v.Duration 102 | } 103 | case types.ETJson: 104 | result.ReserveJSON(n) 105 | v, isNull, err := expr.EvalJSON(ctx, chunk.Row{}) 106 | if err != nil { 107 | return err 108 | } 109 | if isNull { 110 | for i := 0; i < n; i++ { 111 | result.AppendNull() 112 | } 113 | } else { 114 | for i := 0; i < n; i++ { 115 | result.AppendJSON(v) 116 | } 117 | } 118 | case types.ETString: 119 | result.ReserveString(n) 120 | v, isNull, err := expr.EvalString(ctx, chunk.Row{}) 121 | if err != nil { 122 | return err 123 | } 124 | if isNull { 125 | for i := 0; i < n; i++ { 126 | result.AppendNull() 127 | } 128 | } else { 129 | for i := 0; i < n; i++ { 130 | result.AppendString(v) 131 | } 132 | } 133 | default: 134 | return errors.Errorf("unsupported Constant type for vectorized evaluation") 135 | } 136 | return nil 137 | } 138 | -------------------------------------------------------------------------------- /tablestore/mysql/server/packetio.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "bufio" 5 | "io" 6 | "time" 7 | 8 | "github.com/pingcap/errors" 9 | "github.com/pingcap/parser/mysql" 10 | "github.com/pingcap/parser/terror" 11 | ) 12 | 13 | const defaultWriterSize = 16 * 1024 14 | 15 | // packetIO is a helper to read and write data in packet format. 16 | type packetIO struct { 17 | bufReadConn *bufferedReadConn 18 | bufWriter *bufio.Writer 19 | sequence uint8 20 | readTimeout time.Duration 21 | } 22 | 23 | func newPacketIO(bufReadConn *bufferedReadConn) *packetIO { 24 | p := &packetIO{sequence: 0} 25 | p.setBufferedReadConn(bufReadConn) 26 | return p 27 | } 28 | 29 | func (p *packetIO) setBufferedReadConn(bufReadConn *bufferedReadConn) { 30 | p.bufReadConn = bufReadConn 31 | p.bufWriter = bufio.NewWriterSize(bufReadConn, defaultWriterSize) 32 | } 33 | 34 | func (p *packetIO) setReadTimeout(timeout time.Duration) { 35 | p.readTimeout = timeout 36 | } 37 | 38 | func (p *packetIO) readOnePacket() ([]byte, error) { 39 | var header [4]byte 40 | if p.readTimeout > 0 { 41 | if err := p.bufReadConn.SetReadDeadline(time.Now().Add(p.readTimeout)); err != nil { 42 | return nil, err 43 | } 44 | } 45 | if _, err := io.ReadFull(p.bufReadConn, header[:]); err != nil { 46 | return nil, errors.Trace(err) 47 | } 48 | 49 | sequence := header[3] 50 | if sequence != p.sequence { 51 | return nil, errInvalidSequence.GenWithStack("invalid sequence %d != %d", sequence, p.sequence) 52 | } 53 | 54 | p.sequence++ 55 | 56 | length := int(uint32(header[0]) | uint32(header[1])<<8 | uint32(header[2])<<16) 57 | 58 | data := make([]byte, length) 59 | if p.readTimeout > 0 { 60 | if err := p.bufReadConn.SetReadDeadline(time.Now().Add(p.readTimeout)); err != nil { 61 | return nil, err 62 | } 63 | } 64 | if _, err := io.ReadFull(p.bufReadConn, data); err != nil { 65 | return nil, errors.Trace(err) 66 | } 67 | return data, nil 68 | } 69 | 70 | func (p *packetIO) readPacket() ([]byte, error) { 71 | data, err := p.readOnePacket() 72 | if err != nil { 73 | return nil, errors.Trace(err) 74 | } 75 | 76 | if len(data) < mysql.MaxPayloadLen { 77 | return data, nil 78 | } 79 | 80 | // handle multi-packet 81 | for { 82 | buf, err := p.readOnePacket() 83 | if err != nil { 84 | return nil, errors.Trace(err) 85 | } 86 | 87 | data = append(data, buf...) 88 | 89 | if len(buf) < mysql.MaxPayloadLen { 90 | break 91 | } 92 | } 93 | 94 | return data, nil 95 | } 96 | 97 | // writePacket writes data that already have header 98 | func (p *packetIO) writePacket(data []byte) error { 99 | length := len(data) - 4 100 | 101 | for length >= mysql.MaxPayloadLen { 102 | data[0] = 0xff 103 | data[1] = 0xff 104 | data[2] = 0xff 105 | 106 | data[3] = p.sequence 107 | 108 | if n, err := p.bufWriter.Write(data[:4+mysql.MaxPayloadLen]); err != nil { 109 | return errors.Trace(mysql.ErrBadConn) 110 | } else if n != (4 + mysql.MaxPayloadLen) { 111 | return errors.Trace(mysql.ErrBadConn) 112 | } else { 113 | p.sequence++ 114 | length -= mysql.MaxPayloadLen 115 | data = data[mysql.MaxPayloadLen:] 116 | } 117 | } 118 | 119 | data[0] = byte(length) 120 | data[1] = byte(length >> 8) 121 | data[2] = byte(length >> 16) 122 | data[3] = p.sequence 123 | 124 | if n, err := p.bufWriter.Write(data); err != nil { 125 | terror.Log(errors.Trace(err)) 126 | return errors.Trace(mysql.ErrBadConn) 127 | } else if n != len(data) { 128 | return errors.Trace(mysql.ErrBadConn) 129 | } else { 130 | p.sequence++ 131 | return nil 132 | } 133 | } 134 | 135 | func (p *packetIO) flush() error { 136 | err := p.bufWriter.Flush() 137 | if err != nil { 138 | return errors.Trace(err) 139 | } 140 | return err 141 | } 142 | -------------------------------------------------------------------------------- /tablestore/ddl/delete_range_manager.go: -------------------------------------------------------------------------------- 1 | package ddl 2 | 3 | import ( 4 | "context" 5 | "encoding/hex" 6 | "fmt" 7 | 8 | "github.com/pingcap/errors" 9 | "github.com/pingcap/tidb/kv" 10 | "github.com/pingcap/tidb/util/chunk" 11 | "github.com/zhihu/zetta/pkg/model" 12 | "github.com/zhihu/zetta/pkg/tablecodec" 13 | ) 14 | 15 | const ( 16 | insertSQL = `INSERT INTO mysql.gc_delete_range VALUES(%d,%d,"%s","%s",%d);` 17 | selectSQL = `SELECT start_key, end_key FROM mysql.gc_delete_range WHERE ts<%d;` 18 | deleteSQL = `DELETE FROM mysql.gc_delete_range where start_key="%s" and end_key="%s";` 19 | ) 20 | 21 | type deleteRangeManager struct { 22 | store kv.Storage 23 | sessPool *sessionPool 24 | } 25 | 26 | func newDelRangeManager(store kv.Storage, sessPool *sessionPool) *deleteRangeManager { 27 | return &deleteRangeManager{ 28 | store: store, 29 | sessPool: sessPool, 30 | } 31 | } 32 | 33 | func (d *deleteRangeManager) clearRangeInfo(ctx context.Context, rg kv.KeyRange) error { 34 | startKeyEncoded := hex.EncodeToString(rg.StartKey) 35 | endKeyEncoded := hex.EncodeToString(rg.EndKey) 36 | sql := fmt.Sprintf(deleteSQL, startKeyEncoded, endKeyEncoded) 37 | se, err := d.sessPool.get() 38 | if err != nil { 39 | return errors.Trace(err) 40 | } 41 | defer d.sessPool.put(se) 42 | _, err = se.Execute(ctx, sql) 43 | return err 44 | } 45 | 46 | func (d *deleteRangeManager) getRangesToDelete(ctx context.Context, ts uint64) ([]kv.KeyRange, error) { 47 | ranges := make([]kv.KeyRange, 0) 48 | //sql := fmt.Sprintf(selectSQL, ts) 49 | sql := "SELECT start_key, end_key FROM mysql.gc_delete_range;" 50 | se, err := d.sessPool.get() 51 | if err != nil { 52 | return ranges, errors.Trace(err) 53 | } 54 | defer d.sessPool.put(se) 55 | recordSet, err := se.Execute(ctx, sql) 56 | if err != nil { 57 | return ranges, errors.Trace(err) 58 | } 59 | chk := recordSet.NewChunk() 60 | it := chunk.NewIterator4Chunk(chk) 61 | for { 62 | err = recordSet.Next(ctx, chk) 63 | if err != nil { 64 | return ranges, errors.Trace(err) 65 | } 66 | if chk.NumRows() == 0 { 67 | return ranges, nil 68 | } 69 | for row := it.Begin(); row != it.End(); row = it.Next() { 70 | startKey, err := hex.DecodeString(row.GetString(0)) 71 | if err != nil { 72 | return nil, errors.Trace(err) 73 | } 74 | endKey, err := hex.DecodeString(row.GetString(1)) 75 | if err != nil { 76 | return nil, errors.Trace(err) 77 | } 78 | rg := kv.KeyRange{ 79 | StartKey: kv.Key(startKey), 80 | EndKey: kv.Key(endKey), 81 | } 82 | ranges = append(ranges, rg) 83 | } 84 | } 85 | } 86 | 87 | func (d *deleteRangeManager) addDelRangeJob(job *model.Job) error { 88 | now, err := d.getNowTSO() 89 | if err != nil { 90 | return errors.Trace(err) 91 | } 92 | switch job.Type { 93 | case model.ActionDropTable, model.ActionTruncateTable: 94 | tableID := job.TableID 95 | startKey := tablecodec.EncodeTablePrefix(tableID) 96 | endKey := tablecodec.EncodeTablePrefix(tableID + 1) 97 | return d.doInsert(job.ID, tableID, startKey, endKey, now) 98 | } 99 | return nil 100 | } 101 | 102 | func (d *deleteRangeManager) doInsert(jobID, eleID int64, startKey, endKey kv.Key, ts uint64) error { 103 | se, err := d.sessPool.get() 104 | if err != nil { 105 | return err 106 | } 107 | defer d.sessPool.put(se) 108 | startKeyEncoded := hex.EncodeToString(startKey) 109 | endKeyEncoded := hex.EncodeToString(endKey) 110 | sql := fmt.Sprintf(insertSQL, jobID, eleID, startKeyEncoded, endKeyEncoded, ts) 111 | _, err = se.Execute(context.Background(), sql) 112 | return err 113 | } 114 | 115 | func (d *deleteRangeManager) getNowTSO() (uint64, error) { 116 | ver, err := d.store.CurrentVersion() 117 | if err != nil { 118 | return 0, err 119 | } 120 | return ver.Ver, nil 121 | } 122 | -------------------------------------------------------------------------------- /tablestore/rpc/mutation.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package rpc 15 | 16 | import ( 17 | "google.golang.org/grpc/codes" 18 | 19 | tspb "github.com/zhihu/zetta-proto/pkg/tablestore" 20 | ) 21 | 22 | // Op is the mutation operation. 23 | type OP int 24 | 25 | const ( 26 | // OpUnknown indicate unknown operation 27 | OpUnknown OP = iota 28 | // OpDelete removes a row from a table. Succeeds whether or not the 29 | // key was present. 30 | OpDelete 31 | // OpInsert inserts a row into a table. If the row already exists, the 32 | // write or transaction fails. 33 | OpInsert 34 | // OpInsertOrUpdate inserts a row into a table. If the row already 35 | // exists, it updates it instead. Any column values not explicitly 36 | // written are preserved. 37 | OpInsertOrUpdate 38 | // OpReplace inserts a row into a table, deleting any existing row. 39 | // Unlike InsertOrUpdate, this means any values not explicitly written 40 | // become NULL. 41 | OpReplace 42 | // OpUpdate updates a row in a table. If the row does not already 43 | // exist, the write or transaction fails. 44 | OpUpdate 45 | ) 46 | 47 | // A Mutation describes a modification to one or more Cloud Spanner rows. The 48 | // mutation represents an insert, update, delete, etc on a table. 49 | type Mutation struct { 50 | // op is the operation type of the mutation. 51 | // See documentation for spanner.op for more details. 52 | Op OP 53 | // Table is the name of the target table to be modified. 54 | Table string 55 | // keySet is a set of primary keys that names the rows 56 | // in a delete operation. 57 | KeySet *tspb.KeySet 58 | // columns names the set of columns that are going to be 59 | // modified by Insert, InsertOrUpdate, Replace or Update 60 | // operations. 61 | Columns []string 62 | // values specifies the new values for the target columns 63 | // named by Columns. 64 | Values []interface{} 65 | ListValues []*tspb.ListValue 66 | } 67 | 68 | func (m *Mutation) FromProto(mutation *tspb.Mutation) error { 69 | switch mutation.GetOperation().(type) { 70 | case *tspb.Mutation_Insert: 71 | m.Op = OpInsert 72 | m.prepareMutationWrite(mutation.GetInsert()) 73 | case *tspb.Mutation_InsertOrUpdate: 74 | m.Op = OpInsertOrUpdate 75 | m.prepareMutationWrite(mutation.GetInsertOrUpdate()) 76 | case *tspb.Mutation_Update: 77 | m.Op = OpUpdate 78 | m.prepareMutationWrite(mutation.GetUpdate()) 79 | case *tspb.Mutation_Replace: 80 | m.Op = OpReplace 81 | m.prepareMutationWrite(mutation.GetReplace()) 82 | case *tspb.Mutation_Delete_: 83 | m.Op = OpDelete 84 | m.prepareMutationDelete(mutation.GetDelete()) 85 | default: 86 | m.Op = OpUnknown 87 | return ErrInvdMutationOp(m.Op) 88 | } 89 | return nil 90 | } 91 | 92 | func (m *Mutation) prepareMutationWrite(w *tspb.Mutation_Write) { 93 | m.Table = w.Table 94 | m.Columns = w.Columns 95 | m.ListValues = w.Values 96 | } 97 | 98 | func (m *Mutation) prepareMutationDelete(d *tspb.Mutation_Delete) { 99 | m.Table = d.GetTable() 100 | m.KeySet = d.GetKeySet() 101 | } 102 | 103 | // ErrInvdMutationOp returns error for unrecognized mutation operation. 104 | func ErrInvdMutationOp(op OP) error { 105 | return zettaErrorf(codes.InvalidArgument, "Unknown op type: %d", op) 106 | } 107 | -------------------------------------------------------------------------------- /tablestore/mysql/bexpression/expression.go: -------------------------------------------------------------------------------- 1 | package expression 2 | 3 | import ( 4 | "github.com/pingcap/parser/ast" 5 | "github.com/pingcap/tidb/types" 6 | "github.com/pingcap/tidb/util/chunk" 7 | "github.com/zhihu/zetta/tablestore/mysql/sctx" 8 | ) 9 | 10 | // Expression represents all scalar expression in SQL. 11 | type Expression interface { 12 | // Eval evaluates an expression through a row. 13 | Eval(row chunk.Row) (types.Datum, error) 14 | 15 | // EvalInt returns the int64 representation of expression. 16 | EvalInt(ctx sctx.Context, row chunk.Row) (val int64, isNull bool, err error) 17 | 18 | // EvalReal returns the float64 representation of expression. 19 | //EvalReal(ctx sessionctx.Context, row chunk.Row) (val float64, isNull bool, err error) 20 | 21 | // EvalString returns the string representation of expression. 22 | EvalString(ctx sctx.Context, row chunk.Row) (val string, isNull bool, err error) 23 | 24 | // GetType gets the type that the expression returns. 25 | GetType() *types.FieldType 26 | 27 | // Equal checks whether two expressions are equal. 28 | Equal(ctx sctx.Context, e Expression) bool 29 | 30 | // Clone copies an expression totally. 31 | Clone() Expression 32 | } 33 | 34 | // FindFieldName finds the column name from NameSlice. 35 | func FindFieldName(names types.NameSlice, astCol *ast.ColumnName) (int, error) { 36 | dbName, tblName, colName := astCol.Schema, astCol.Table, astCol.Name 37 | idx := -1 38 | for i, name := range names { 39 | if (dbName.L == "" || dbName.L == name.DBName.L) && 40 | (tblName.L == "" || tblName.L == name.TblName.L) && 41 | (colName.L == name.ColName.L) { 42 | if idx == -1 { 43 | idx = i 44 | } else { 45 | return -1, errNonUniq.GenWithStackByArgs(name.String(), "field list") 46 | } 47 | } 48 | } 49 | return idx, nil 50 | } 51 | 52 | // CNFExprs stands for a CNF expression. 53 | type CNFExprs []Expression 54 | 55 | // Clone clones itself. 56 | func (e CNFExprs) Clone() CNFExprs { 57 | cnf := make(CNFExprs, 0, len(e)) 58 | for _, expr := range e { 59 | cnf = append(cnf, expr.Clone()) 60 | } 61 | return cnf 62 | } 63 | 64 | // Shallow makes a shallow copy of itself. 65 | func (e CNFExprs) Shallow() CNFExprs { 66 | cnf := make(CNFExprs, 0, len(e)) 67 | cnf = append(cnf, e...) 68 | return cnf 69 | } 70 | 71 | // GetRowLen gets the length if the func is row, returns 1 if not row. 72 | func GetRowLen(e Expression) int { 73 | if f, ok := e.(*ScalarFunction); ok && f.FuncName.L == ast.RowFunc { 74 | return len(f.GetArgs()) 75 | } 76 | return 1 77 | } 78 | 79 | // DatumToConstant generates a Constant expression from a Datum. 80 | func DatumToConstant(d types.Datum, tp byte) *Constant { 81 | return &Constant{Value: d, RetType: types.NewFieldType(tp)} 82 | } 83 | 84 | // EvalBool evaluates expression list to a boolean value. The first returned value 85 | // indicates bool result of the expression list, the second returned value indicates 86 | // whether the result of the expression list is null, it can only be true when the 87 | // first returned values is false. 88 | func EvalBool(ctx sctx.Context, exprList CNFExprs, row chunk.Row) (bool, bool, error) { 89 | for _, expr := range exprList { 90 | data, err := expr.Eval(row) 91 | if err != nil { 92 | return false, false, err 93 | } 94 | if data.IsNull() { 95 | return false, false, nil 96 | } 97 | 98 | i, err := data.ToBool(ctx.GetSessionVars().StmtCtx) 99 | if err != nil { 100 | return false, false, err 101 | } 102 | if i == 0 { 103 | return false, false, nil 104 | } 105 | } 106 | return true, false, nil 107 | } 108 | 109 | type VarAssignment struct { 110 | Name string 111 | Expr Expression 112 | IsDefault bool 113 | IsGlobal bool 114 | IsSystem bool 115 | ExtendValue *Constant 116 | } 117 | -------------------------------------------------------------------------------- /tablestore/mysql/server/driver_base.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/pingcap/parser/ast" 9 | "github.com/pingcap/parser/auth" 10 | "github.com/pingcap/tidb/sessionctx/variable" 11 | "github.com/pingcap/tidb/util" 12 | ) 13 | 14 | type baseContext struct{} 15 | 16 | // Status implements QueryCtx Status method. 17 | func (tc *baseContext) Status() uint16 { 18 | return 1 19 | } 20 | 21 | func (tc *baseContext) GetHistorySQL() string { 22 | return "" 23 | } 24 | 25 | // LastInsertID implements QueryCtx LastInsertID method. 26 | func (tc *baseContext) LastInsertID() uint64 { 27 | return 0 28 | } 29 | 30 | // Value implements QueryCtx Value method. 31 | func (tc *baseContext) Value(key fmt.Stringer) interface{} { 32 | return nil 33 | } 34 | 35 | // SetValue implements QueryCtx SetValue method. 36 | func (tc *baseContext) SetValue(key fmt.Stringer, value interface{}) { 37 | } 38 | 39 | // CommitTxn implements QueryCtx CommitTxn method. 40 | func (tc *baseContext) CommitTxn(ctx context.Context) error { 41 | return nil 42 | } 43 | 44 | // SetProcessInfo implements QueryCtx SetProcessInfo method. 45 | func (tc *baseContext) SetProcessInfo(sql string, t time.Time, command byte, maxExecutionTime uint64) { 46 | } 47 | 48 | // RollbackTxn implements QueryCtx RollbackTxn method. 49 | func (tc *baseContext) RollbackTxn() { 50 | } 51 | 52 | // AffectedRows implements QueryCtx AffectedRows method. 53 | func (tc *baseContext) AffectedRows() uint64 { 54 | return 0 55 | } 56 | 57 | // LastMessage implements QueryCtx LastMessage method. 58 | func (tc *baseContext) LastMessage() string { 59 | return "" 60 | } 61 | 62 | // CurrentDB implements QueryCtx CurrentDB method. 63 | func (tc *baseContext) CurrentDB() string { 64 | return "" 65 | } 66 | 67 | // WarningCount implements QueryCtx WarningCount method. 68 | func (tc *baseContext) WarningCount() uint16 { 69 | return 0 70 | } 71 | 72 | // ExecuteStmt implements QueryCtx interface. 73 | func (tc *baseContext) ExecuteStmt(ctx context.Context, stmt ast.StmtNode) (ResultSet, error) { 74 | return nil, nil 75 | } 76 | 77 | // Parse implements QueryCtx interface. 78 | func (tc *baseContext) Parse(ctx context.Context, sql string) ([]ast.StmtNode, error) { 79 | return nil, nil 80 | } 81 | 82 | // SetSessionManager implements the QueryCtx interface. 83 | func (tc *baseContext) SetSessionManager(sm util.SessionManager) { 84 | } 85 | 86 | // SetClientCapability implements QueryCtx SetClientCapability method. 87 | func (tc *baseContext) SetClientCapability(flags uint32) { 88 | } 89 | 90 | // Close implements QueryCtx Close method. 91 | func (tc *baseContext) Close() error { 92 | return nil 93 | } 94 | 95 | // Auth implements QueryCtx Auth method. 96 | func (tc *baseContext) Auth(user *auth.UserIdentity, auth []byte, salt []byte) bool { 97 | return false 98 | } 99 | 100 | // FieldList implements QueryCtx FieldList method. 101 | func (tc *baseContext) FieldList(table string) (columns []*ColumnInfo, err error) { 102 | return nil, nil 103 | } 104 | 105 | // GetStatement implements QueryCtx GetStatement method. 106 | func (tc *baseContext) GetStatement(stmtID int) PreparedStatement { 107 | return nil 108 | } 109 | 110 | // Prepare implements QueryCtx Prepare method. 111 | func (tc *baseContext) Prepare(sql string) (statement PreparedStatement, columns, params []*ColumnInfo, err error) { 112 | return 113 | } 114 | 115 | // ShowProcess implements QueryCtx ShowProcess method. 116 | func (tc *baseContext) ShowProcess() *util.ProcessInfo { 117 | return nil 118 | } 119 | 120 | // SetCommandValue implements QueryCtx SetCommandValue method. 121 | func (tc *baseContext) SetCommandValue(command byte) { 122 | } 123 | 124 | // GetSessionVars return SessionVars. 125 | func (tc *baseContext) GetSessionVars() *variable.SessionVars { 126 | return nil 127 | } 128 | -------------------------------------------------------------------------------- /tablestore/zstore/scan.go: -------------------------------------------------------------------------------- 1 | package zstore 2 | 3 | import ( 4 | "github.com/pingcap/errors" 5 | "github.com/pingcap/tidb/kv" 6 | "github.com/pingcap/tidb/store/tikv" 7 | ) 8 | 9 | var ( 10 | scanBatchSize = 100 11 | batchGetSize = 5120 12 | ) 13 | 14 | type KvPair struct { 15 | Key []byte 16 | Value []byte 17 | Err error 18 | } 19 | type KvPairs struct { 20 | Keys [][]byte 21 | Vals [][]byte 22 | Err error 23 | } 24 | type Scanner struct { 25 | rawkv *tikv.RawKVClient 26 | batchSize int 27 | cache *KvPairs 28 | idx int 29 | nextStartKey []byte 30 | endKey []byte 31 | 32 | // Use for reverse scan. 33 | nextEndKey []byte 34 | reverse bool 35 | 36 | valid bool 37 | eof bool 38 | } 39 | 40 | func NewScanner(rawkv *tikv.RawKVClient, startKey []byte, endKey []byte, batchSize int, reverse bool) (*Scanner, error) { 41 | // It must be > 1. Otherwise scanner won't skipFirst. 42 | if batchSize <= 1 { 43 | batchSize = scanBatchSize 44 | } 45 | scanner := &Scanner{ 46 | rawkv: rawkv, 47 | batchSize: batchSize, 48 | valid: true, 49 | nextStartKey: startKey, 50 | endKey: endKey, 51 | reverse: reverse, 52 | nextEndKey: endKey, 53 | } 54 | err := scanner.Next() 55 | if kv.IsErrNotFound(err) { 56 | return scanner, nil 57 | } 58 | return scanner, errors.Trace(err) 59 | } 60 | 61 | // Valid return valid. 62 | func (s *Scanner) Valid() bool { 63 | return s.valid 64 | } 65 | 66 | // Key return key. 67 | func (s *Scanner) Key() kv.Key { 68 | if s.valid { 69 | return s.cache.Keys[s.idx] 70 | } 71 | return nil 72 | } 73 | 74 | // Value return value. 75 | func (s *Scanner) Value() []byte { 76 | if s.valid { 77 | return s.cache.Vals[s.idx] 78 | } 79 | return nil 80 | } 81 | 82 | // Next return next element. 83 | func (s *Scanner) Next() error { 84 | if !s.valid { 85 | return errors.New("scanner iterator is invalid") 86 | } 87 | var err error 88 | for { 89 | s.idx++ 90 | if s.cache == nil || s.idx >= len(s.cache.Keys) { 91 | if s.eof { 92 | s.Close() 93 | return nil 94 | } 95 | err = s.getData() 96 | if err != nil { 97 | s.Close() 98 | return errors.Trace(err) 99 | } 100 | if s.idx >= len(s.cache.Keys) { 101 | continue 102 | } 103 | } 104 | curKey := s.cache.Keys[s.idx] 105 | if (!s.reverse && (len(s.endKey) > 0 && kv.Key(curKey).Cmp(kv.Key(s.endKey)) >= 0)) || 106 | (s.reverse && len(s.nextStartKey) > 0 && kv.Key(curKey).Cmp(kv.Key(s.nextStartKey)) < 0) { 107 | s.eof = true 108 | s.Close() 109 | return nil 110 | } 111 | return nil 112 | } 113 | } 114 | 115 | // Close close iterator. 116 | func (s *Scanner) Close() { 117 | s.valid = false 118 | } 119 | func (s *Scanner) getData() error { 120 | var ( 121 | reqEndKey, reqStartKey []byte 122 | keys, vals [][]byte 123 | err error 124 | ) 125 | for { 126 | if !s.reverse { 127 | reqStartKey = s.nextStartKey 128 | reqEndKey = s.endKey 129 | // if len(reqEndKey) >0 && bytes.Compare(reqEndKey, reqStartEnd) 130 | keys, vals, err = s.rawkv.Scan(reqStartKey, reqEndKey, s.batchSize) 131 | if err != nil { 132 | return errors.Trace(err) 133 | } 134 | } else { 135 | reqEndKey = s.nextEndKey 136 | keys, vals, err = s.rawkv.ReverseScan(reqEndKey, reqStartKey, s.batchSize) 137 | if err != nil { 138 | return errors.Trace(err) 139 | } 140 | } 141 | s.cache = &KvPairs{ 142 | Keys: keys, 143 | Vals: vals, 144 | } 145 | s.idx = 0 146 | if len(s.cache.Keys) < s.batchSize { 147 | // No more data in future 148 | s.eof = true 149 | return nil 150 | } 151 | lastKey := keys[len(keys)-1] 152 | if !s.reverse { 153 | s.nextStartKey = []byte(kv.Key(lastKey).Next()) 154 | } else { 155 | s.nextEndKey = lastKey 156 | } 157 | return nil 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /tablestore/ddl/schema_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package ddl 15 | 16 | import ( 17 | "context" 18 | "time" 19 | 20 | . "github.com/pingcap/check" 21 | "github.com/pingcap/tidb/kv" 22 | "github.com/pingcap/tidb/sessionctx" 23 | tspb "github.com/zhihu/zetta-proto/pkg/tablestore" 24 | "github.com/zhihu/zetta/pkg/meta" 25 | "github.com/zhihu/zetta/pkg/model" 26 | ) 27 | 28 | func isDDLJobDone(c *C, t *meta.Meta) bool { 29 | job, err := t.GetDDLJobByIdx(0) 30 | c.Assert(err, IsNil) 31 | // Cannot find in job queue. 32 | if job == nil { 33 | return true 34 | } 35 | 36 | time.Sleep(testLease) 37 | return false 38 | } 39 | 40 | func testCheckSchemaState(c *C, d *ddl, dbInfo *model.DatabaseMeta, state model.SchemaState) { 41 | isDropped := true 42 | 43 | for { 44 | kv.RunInNewTxn(d.store, false, func(txn kv.Transaction) error { 45 | t := meta.NewMeta(txn) 46 | info, err := t.GetDatabase(dbInfo.Id) 47 | c.Assert(err, IsNil) 48 | 49 | if state == model.StateNone { 50 | isDropped = isDDLJobDone(c, t) 51 | if !isDropped { 52 | return nil 53 | } 54 | c.Assert(info, IsNil) 55 | return nil 56 | } 57 | 58 | c.Assert(info.Database, DeepEquals, dbInfo.Database) 59 | c.Assert(info.State, Equals, state) 60 | return nil 61 | }) 62 | 63 | if isDropped { 64 | break 65 | } 66 | } 67 | } 68 | 69 | func testSchemaInfo(c *C, d *ddl, name string) *model.DatabaseMeta { 70 | dbInfo := &model.DatabaseMeta{ 71 | DatabaseMeta: tspb.DatabaseMeta{ 72 | Database: name, 73 | }, 74 | } 75 | genIDs, err := d.genGlobalIDs(1) 76 | c.Assert(err, IsNil) 77 | dbInfo.Id = genIDs[0] 78 | return dbInfo 79 | } 80 | 81 | func buildDropSchemaJob(dbInfo *model.DatabaseMeta) *model.Job { 82 | return &model.Job{ 83 | SchemaID: dbInfo.Id, 84 | Type: model.ActionDropSchema, 85 | BinlogInfo: &model.HistoryInfo{}, 86 | } 87 | } 88 | 89 | func testDropSchema(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DatabaseMeta) (*model.Job, int64) { 90 | job := buildDropSchemaJob(dbInfo) 91 | err := d.doDDLJob(ctx, job) 92 | c.Assert(err, IsNil) 93 | ver := getSchemaVer(c, ctx) 94 | return job, ver 95 | } 96 | 97 | func testCreateSchema(c *C, ctx sessionctx.Context, d *ddl, dbInfo *model.DatabaseMeta) *model.Job { 98 | job := &model.Job{ 99 | SchemaID: dbInfo.Id, 100 | Type: model.ActionCreateSchema, 101 | BinlogInfo: &model.HistoryInfo{}, 102 | Args: []interface{}{dbInfo}, 103 | } 104 | err := d.doDDLJob(ctx, job) 105 | c.Assert(err, IsNil) 106 | 107 | v := getSchemaVer(c, ctx) 108 | dbInfo.State = model.StatePublic 109 | checkHistoryJobArgs(c, ctx, job.ID, &historyJobArgs{ver: v, db: dbInfo}) 110 | dbInfo.State = model.StateNone 111 | return job 112 | } 113 | 114 | func (s *testDDLSuite) TestCreateDatabase(c *C) { 115 | store := testCreateStore(c, "test_schema_ddl_job") 116 | defer store.Close() 117 | d := testNewDDL(context.Background(), nil, store, nil, nil, testLease) 118 | defer d.Stop() 119 | ctx := testNewContext(d) 120 | 121 | dbMeta := testSchemaInfo(c, d, "test") 122 | schemaJob := testCreateSchema(c, ctx, d, dbMeta) 123 | testCheckSchemaState(c, d, dbMeta, model.StatePublic) 124 | testCheckJobDone(c, d, schemaJob, true) 125 | 126 | job, _ := testDropSchema(c, ctx, d, dbMeta) 127 | testCheckSchemaState(c, d, dbMeta, model.StateNone) 128 | testCheckJobDone(c, d, job, false) 129 | } 130 | -------------------------------------------------------------------------------- /tablestore/mysql/executor/data_model_tranfs.go: -------------------------------------------------------------------------------- 1 | package executor 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "strings" 7 | 8 | "github.com/pingcap/parser/ast" 9 | "github.com/pingcap/parser/mysql" 10 | "github.com/pingcap/pd/v4/server/schedule/placement" 11 | "github.com/pingcap/tidb/ddl" 12 | "github.com/pingcap/tidb/util/mock" 13 | "github.com/zhihu/zetta/pkg/model" 14 | "github.com/zhihu/zetta/tablestore/mysql/expression" 15 | ) 16 | 17 | /* 18 | Transfer MySQL data model to TableStore data model. 19 | */ 20 | 21 | func toCreateDatabase(s *ast.CreateDatabaseStmt) *model.DatabaseMeta { 22 | dbMeta := &model.DatabaseMeta{} 23 | dbMeta.Database = s.Name 24 | return dbMeta 25 | } 26 | 27 | func toAddCol(dbName, tblName string, s *ast.ColumnDef) *model.ColumnMeta { 28 | cm := model.NewColumnMetaFromColumnDef(s) 29 | if s.Options != nil { 30 | for _, v := range s.Options { 31 | switch v.Tp { 32 | case ast.ColumnOptionNotNull: 33 | cm.Flag |= mysql.NotNullFlag 34 | case ast.ColumnOptionNull: 35 | cm.Flag &= ^mysql.NotNullFlag 36 | case ast.ColumnOptionDefaultValue: 37 | v, _ := expression.EvalAstExpr(mock.NewContext(), v.Expr) 38 | cm.DefaultValue, _ = v.ToString() 39 | } 40 | } 41 | } 42 | return cm 43 | } 44 | 45 | //func parseLabelsFromComment(comment string) 46 | 47 | func toCreateTable(s *ast.CreateTableStmt) *model.TableMeta { 48 | tbMeta := &model.TableMeta{} 49 | tbMeta.TableName = s.Table.Name.L 50 | tbMeta.Columns = make([]*model.ColumnMeta, len(s.Cols)) 51 | for i, col := range s.Cols { 52 | tbMeta.Columns[i] = model.NewColumnMetaFromColumnDef(col) 53 | tbMeta.Columns[i].ColumnType = model.FieldTypeToProtoType(col.Tp) 54 | } 55 | tbMeta.Indices = make([]*model.IndexMeta, 0) 56 | for _, cons := range s.Constraints { 57 | if cons.Tp == ast.ConstraintPrimaryKey { 58 | tbMeta.PrimaryKey = model.GetPrimaryKeysFromConstraints(cons) 59 | continue 60 | } 61 | tbMeta.Indices = append(tbMeta.Indices, model.NewIndexMetaFromConstraits(cons)) 62 | } 63 | for i, k := range tbMeta.PrimaryKey { 64 | tbMeta.PrimaryKey[i] = strings.ToLower(k) 65 | } 66 | 67 | tbInfo, _ := ddl.BuildTableInfoFromAST(s) 68 | tbMeta.TableInfo = *tbInfo 69 | for i, col := range tbInfo.Columns { 70 | tbMeta.Columns[i].ColumnInfo = *col 71 | } 72 | 73 | var rule *placement.Rule 74 | for _, opt := range s.Options { 75 | if opt.Tp == ast.TableOptionEngine { 76 | if tbMeta.Attributes == nil { 77 | tbMeta.Attributes = make(map[string]string) 78 | } 79 | tbMeta.Attributes["AccessMode"] = opt.StrValue 80 | } 81 | if opt.Tp == ast.TableOptionComment { 82 | rule = buildRuleFromComment(opt.StrValue, tbMeta) 83 | } 84 | } 85 | 86 | if rule != nil { 87 | tbMeta.Rules = append(tbMeta.Rules, rule) 88 | } 89 | 90 | return tbMeta 91 | } 92 | 93 | func buildRuleFromComment(comment string, tbMeta *model.TableMeta) *placement.Rule { 94 | fmt.Println("comment:", comment) 95 | if comment == "" { 96 | return nil 97 | } 98 | labelConstraint := placement.LabelConstraint{} 99 | err := json.Unmarshal([]byte(comment), &labelConstraint) 100 | if err != nil { 101 | fmt.Println("json decode err:", err) 102 | return nil 103 | } 104 | fmt.Println("label constraint:", labelConstraint) 105 | rule := &placement.Rule{ 106 | Index: 7, 107 | Override: true, 108 | Count: 3, 109 | Role: placement.Voter, 110 | } 111 | rule.LabelConstraints = append(rule.LabelConstraints, labelConstraint) 112 | return rule 113 | } 114 | 115 | func toCreateIndex(s *ast.CreateIndexStmt) *model.IndexMeta { 116 | idxMeta := &model.IndexMeta{} 117 | tbName := s.Table.Name.L 118 | idxName := s.IndexName 119 | defineCols := make([]string, len(s.IndexPartSpecifications)) 120 | for i, col := range s.IndexPartSpecifications { 121 | colName := col.Column.Name.L 122 | defineCols[i] = colName 123 | } 124 | idxMeta.TableName = tbName 125 | idxMeta.Name = idxName 126 | idxMeta.DefinedColumns = defineCols 127 | if s.KeyType == ast.IndexKeyTypeUnique { 128 | idxMeta.Unique = true 129 | } 130 | return idxMeta 131 | } 132 | -------------------------------------------------------------------------------- /pkg/structure/type.go: -------------------------------------------------------------------------------- 1 | // Copyright 2015 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package structure 15 | 16 | import ( 17 | "bytes" 18 | 19 | "github.com/pingcap/errors" 20 | "github.com/pingcap/tidb/kv" 21 | "github.com/zhihu/zetta/pkg/codec" 22 | ) 23 | 24 | // TypeFlag is for data structure meta/data flag. 25 | type TypeFlag byte 26 | 27 | const ( 28 | // StringMeta is the flag for string meta. 29 | StringMeta TypeFlag = 'S' 30 | // StringData is the flag for string data. 31 | StringData TypeFlag = 's' 32 | // HashMeta is the flag for hash meta. 33 | HashMeta TypeFlag = 'H' 34 | // HashData is the flag for hash data. 35 | HashData TypeFlag = 'h' 36 | // ListMeta is the flag for list meta. 37 | ListMeta TypeFlag = 'L' 38 | // ListData is the flag for list data. 39 | ListData TypeFlag = 'l' 40 | ) 41 | 42 | func (t *TxStructure) encodeStringDataKey(key []byte) kv.Key { 43 | // for codec Encode, we may add extra bytes data, so here and following encode 44 | // we will use extra length like 4 for a little optimization. 45 | ek := make([]byte, 0, len(t.prefix)+len(key)+24) 46 | ek = append(ek, t.prefix...) 47 | ek = codec.EncodeBytes(ek, key) 48 | return codec.EncodeUint(ek, uint64(StringData)) 49 | } 50 | 51 | func (t *TxStructure) encodeHashMetaKey(key []byte) kv.Key { 52 | ek := make([]byte, 0, len(t.prefix)+len(key)+24) 53 | ek = append(ek, t.prefix...) 54 | ek = codec.EncodeBytes(ek, key) 55 | return codec.EncodeUint(ek, uint64(HashMeta)) 56 | } 57 | 58 | func (t *TxStructure) encodeHashDataKey(key []byte, field []byte) kv.Key { 59 | ek := make([]byte, 0, len(t.prefix)+len(key)+len(field)+30) 60 | ek = append(ek, t.prefix...) 61 | ek = codec.EncodeBytes(ek, key) 62 | ek = codec.EncodeUint(ek, uint64(HashData)) 63 | return codec.EncodeBytes(ek, field) 64 | } 65 | 66 | // EncodeHashDataKey exports for tests. 67 | func (t *TxStructure) EncodeHashDataKey(key []byte, field []byte) kv.Key { 68 | return t.encodeHashDataKey(key, field) 69 | } 70 | 71 | func (t *TxStructure) decodeHashDataKey(ek kv.Key) ([]byte, []byte, error) { 72 | var ( 73 | key []byte 74 | field []byte 75 | err error 76 | tp uint64 77 | ) 78 | 79 | if !bytes.HasPrefix(ek, t.prefix) { 80 | return nil, nil, errors.New("invalid encoded hash data key prefix") 81 | } 82 | 83 | ek = ek[len(t.prefix):] 84 | 85 | ek, key, err = codec.DecodeBytes(ek, nil) 86 | if err != nil { 87 | return nil, nil, errors.Trace(err) 88 | } 89 | 90 | ek, tp, err = codec.DecodeUint(ek) 91 | if err != nil { 92 | return nil, nil, errors.Trace(err) 93 | } else if TypeFlag(tp) != HashData { 94 | return nil, nil, errInvalidHashKeyFlag.GenWithStack("invalid encoded hash data key flag %c", byte(tp)) 95 | } 96 | 97 | _, field, err = codec.DecodeBytes(ek, nil) 98 | return key, field, errors.Trace(err) 99 | } 100 | 101 | func (t *TxStructure) hashDataKeyPrefix(key []byte) kv.Key { 102 | ek := make([]byte, 0, len(t.prefix)+len(key)+24) 103 | ek = append(ek, t.prefix...) 104 | ek = codec.EncodeBytes(ek, key) 105 | return codec.EncodeUint(ek, uint64(HashData)) 106 | } 107 | 108 | func (t *TxStructure) encodeListMetaKey(key []byte) kv.Key { 109 | ek := make([]byte, 0, len(t.prefix)+len(key)+24) 110 | ek = append(ek, t.prefix...) 111 | ek = codec.EncodeBytes(ek, key) 112 | return codec.EncodeUint(ek, uint64(ListMeta)) 113 | } 114 | 115 | func (t *TxStructure) encodeListDataKey(key []byte, index int64) kv.Key { 116 | ek := make([]byte, 0, len(t.prefix)+len(key)+36) 117 | ek = append(ek, t.prefix...) 118 | ek = codec.EncodeBytes(ek, key) 119 | ek = codec.EncodeUint(ek, uint64(ListData)) 120 | return codec.EncodeInt(ek, index) 121 | } 122 | -------------------------------------------------------------------------------- /tablestore/mysql/planner/plan.go: -------------------------------------------------------------------------------- 1 | package planner 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/pingcap/tidb/types" 7 | "github.com/pingcap/tidb/util/stringutil" 8 | "github.com/zhihu/zetta/tablestore/mysql/expression" 9 | "github.com/zhihu/zetta/tablestore/mysql/sctx" 10 | ) 11 | 12 | type Plan interface { 13 | Schema() *expression.Schema 14 | Type() string 15 | // OutputNames returns the outputting names of each column. 16 | OutputNames() types.NameSlice 17 | 18 | // SetOutputNames sets the outputting name by the given slice. 19 | SetOutputNames(names types.NameSlice) 20 | ExplainID() fmt.Stringer 21 | } 22 | 23 | type LogicalPlan interface { 24 | Plan 25 | // Get all the children. 26 | Children() []LogicalPlan 27 | 28 | // SetChildren sets the children for the plan. 29 | SetChildren(...LogicalPlan) 30 | 31 | // SetChild sets the ith child for the plan. 32 | SetChild(i int, child LogicalPlan) 33 | 34 | // PruneColumns prunes the unused columns. 35 | PruneColumns([]*expression.Column) ([]*expression.Column, error) 36 | 37 | // Push the expression in where clause to datasource to use index. 38 | // Only selection need to implement this now. 39 | PushFilterDown([]expression.Expression) []expression.Expression 40 | 41 | PushLimitDown(uint64) 42 | } 43 | 44 | type PhysicalPlan interface { 45 | Plan 46 | // Get all the children. 47 | Children() []PhysicalPlan 48 | 49 | // SetChildren sets the children for the plan. 50 | SetChildren(...PhysicalPlan) 51 | 52 | // SetChild sets the ith child for the plan. 53 | SetChild(i int, child PhysicalPlan) 54 | } 55 | 56 | type basePlan struct { 57 | ctx sctx.Context 58 | tp string 59 | schema *expression.Schema 60 | } 61 | 62 | func newBasePlan(ctx sctx.Context, tp string) basePlan { 63 | return basePlan{ 64 | tp: tp, 65 | ctx: ctx, 66 | } 67 | } 68 | 69 | func (bp *basePlan) Type() string { 70 | return bp.tp 71 | } 72 | 73 | func (p *basePlan) ExplainID() fmt.Stringer { 74 | return stringutil.MemoizeStr(func() string { 75 | return p.tp 76 | }) 77 | } 78 | 79 | func (bp *basePlan) Schema() *expression.Schema { 80 | return bp.schema 81 | } 82 | 83 | // OutputNames returns the outputting names of each column. 84 | func (p *basePlan) OutputNames() types.NameSlice { 85 | return nil 86 | } 87 | 88 | func (p *basePlan) SetOutputNames(names types.NameSlice) { 89 | } 90 | 91 | type baseLogicalPlan struct { 92 | basePlan 93 | self LogicalPlan 94 | children []LogicalPlan 95 | } 96 | 97 | func newBaseLogicalPlan(ctx sctx.Context, tp string, plan LogicalPlan) baseLogicalPlan { 98 | return baseLogicalPlan{ 99 | basePlan: newBasePlan(ctx, tp), 100 | self: plan, 101 | children: make([]LogicalPlan, 0), 102 | } 103 | } 104 | 105 | func (blp *baseLogicalPlan) Children() []LogicalPlan { 106 | return blp.children 107 | } 108 | 109 | func (blp *baseLogicalPlan) SetChildren(children ...LogicalPlan) { 110 | blp.children = children 111 | } 112 | 113 | func (blp *baseLogicalPlan) SetChild(i int, child LogicalPlan) { 114 | blp.children[i] = child 115 | } 116 | 117 | func (blp *baseLogicalPlan) PruneColumns(usedColumns []*expression.Column) ([]*expression.Column, error) { 118 | return usedColumns, nil 119 | } 120 | 121 | func (blp *baseLogicalPlan) PushFilterDown(exprs []expression.Expression) []expression.Expression { 122 | return nil 123 | } 124 | 125 | func (blp *baseLogicalPlan) PushLimitDown(limit uint64) { 126 | if len(blp.children) > 0 { 127 | blp.children[0].PushLimitDown(limit) 128 | } 129 | } 130 | 131 | type basePhysicalPlan struct { 132 | basePlan 133 | children []PhysicalPlan 134 | self PhysicalPlan 135 | } 136 | 137 | func newBasePhysicalPlan(ctx sctx.Context, tp string, plan PhysicalPlan) *basePhysicalPlan { 138 | return &basePhysicalPlan{ 139 | basePlan: newBasePlan(ctx, tp), 140 | self: plan, 141 | children: make([]PhysicalPlan, 0), 142 | } 143 | } 144 | 145 | func (bpp *basePhysicalPlan) Children() []PhysicalPlan { 146 | return bpp.children 147 | } 148 | func (bpp *basePhysicalPlan) SetChildren(children ...PhysicalPlan) { 149 | bpp.children = children 150 | } 151 | 152 | func (bpp *basePhysicalPlan) SetChild(i int, child PhysicalPlan) { 153 | bpp.children[i] = child 154 | } 155 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/aggregation/agg_to_pb.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package aggregation 15 | 16 | import ( 17 | "github.com/pingcap/errors" 18 | "github.com/pingcap/parser/ast" 19 | //"github.com/pingcap/tidb/expression" 20 | "github.com/pingcap/tidb/kv" 21 | "github.com/pingcap/tidb/sessionctx" 22 | "github.com/pingcap/tidb/sessionctx/stmtctx" 23 | "github.com/pingcap/tidb/types" 24 | "github.com/pingcap/tipb/go-tipb" 25 | "github.com/zhihu/zetta/tablestore/mysql/expression" 26 | ) 27 | 28 | // AggFuncToPBExpr converts aggregate function to pb. 29 | func AggFuncToPBExpr(sc *stmtctx.StatementContext, client kv.Client, aggFunc *AggFuncDesc) *tipb.Expr { 30 | if aggFunc.HasDistinct { 31 | // do nothing and ignore aggFunc.HasDistinct 32 | } 33 | if len(aggFunc.OrderByItems) > 0 { 34 | return nil 35 | } 36 | pc := expression.NewPBConverter(client, sc) 37 | var tp tipb.ExprType 38 | switch aggFunc.Name { 39 | case ast.AggFuncCount: 40 | tp = tipb.ExprType_Count 41 | case ast.AggFuncApproxCountDistinct: 42 | tp = tipb.ExprType_ApproxCountDistinct 43 | case ast.AggFuncFirstRow: 44 | tp = tipb.ExprType_First 45 | case ast.AggFuncGroupConcat: 46 | tp = tipb.ExprType_GroupConcat 47 | case ast.AggFuncMax: 48 | tp = tipb.ExprType_Max 49 | case ast.AggFuncMin: 50 | tp = tipb.ExprType_Min 51 | case ast.AggFuncSum: 52 | tp = tipb.ExprType_Sum 53 | case ast.AggFuncAvg: 54 | tp = tipb.ExprType_Avg 55 | case ast.AggFuncBitOr: 56 | tp = tipb.ExprType_Agg_BitOr 57 | case ast.AggFuncBitXor: 58 | tp = tipb.ExprType_Agg_BitXor 59 | case ast.AggFuncBitAnd: 60 | tp = tipb.ExprType_Agg_BitAnd 61 | case ast.AggFuncVarPop: 62 | tp = tipb.ExprType_VarPop 63 | case ast.AggFuncJsonObjectAgg: 64 | tp = tipb.ExprType_JsonObjectAgg 65 | } 66 | if !client.IsRequestTypeSupported(kv.ReqTypeSelect, int64(tp)) { 67 | return nil 68 | } 69 | 70 | children := make([]*tipb.Expr, 0, len(aggFunc.Args)) 71 | for _, arg := range aggFunc.Args { 72 | pbArg := pc.ExprToPB(arg) 73 | if pbArg == nil { 74 | return nil 75 | } 76 | children = append(children, pbArg) 77 | } 78 | return &tipb.Expr{Tp: tp, Children: children, FieldType: expression.ToPBFieldType(aggFunc.RetTp)} 79 | } 80 | 81 | // PBExprToAggFuncDesc converts pb to aggregate function. 82 | func PBExprToAggFuncDesc(ctx sessionctx.Context, aggFunc *tipb.Expr, fieldTps []*types.FieldType) (*AggFuncDesc, error) { 83 | var name string 84 | switch aggFunc.Tp { 85 | case tipb.ExprType_Count: 86 | name = ast.AggFuncCount 87 | case tipb.ExprType_ApproxCountDistinct: 88 | name = ast.AggFuncApproxCountDistinct 89 | case tipb.ExprType_First: 90 | name = ast.AggFuncFirstRow 91 | case tipb.ExprType_GroupConcat: 92 | name = ast.AggFuncGroupConcat 93 | case tipb.ExprType_Max: 94 | name = ast.AggFuncMax 95 | case tipb.ExprType_Min: 96 | name = ast.AggFuncMin 97 | case tipb.ExprType_Sum: 98 | name = ast.AggFuncSum 99 | case tipb.ExprType_Avg: 100 | name = ast.AggFuncAvg 101 | case tipb.ExprType_Agg_BitOr: 102 | name = ast.AggFuncBitOr 103 | case tipb.ExprType_Agg_BitXor: 104 | name = ast.AggFuncBitXor 105 | case tipb.ExprType_Agg_BitAnd: 106 | name = ast.AggFuncBitAnd 107 | default: 108 | return nil, errors.Errorf("unknown aggregation function type: %v", aggFunc.Tp) 109 | } 110 | 111 | args, err := expression.PBToExprs(aggFunc.Children, fieldTps, ctx.GetSessionVars().StmtCtx) 112 | if err != nil { 113 | return nil, err 114 | } 115 | base := baseFuncDesc{ 116 | Name: name, 117 | Args: args, 118 | RetTp: expression.FieldTypeFromPB(aggFunc.FieldType), 119 | } 120 | base.WrapCastForAggArgs(ctx) 121 | return &AggFuncDesc{ 122 | baseFuncDesc: base, 123 | Mode: Partial1Mode, 124 | HasDistinct: false, 125 | }, nil 126 | } 127 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/aggregation/concat.go: -------------------------------------------------------------------------------- 1 | // Copyright 2017 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package aggregation 15 | 16 | import ( 17 | "bytes" 18 | "fmt" 19 | 20 | "github.com/cznic/mathutil" 21 | "github.com/pingcap/errors" 22 | //"github.com/pingcap/tidb/expression" 23 | "github.com/zhihu/zetta/tablestore/mysql/expression" 24 | "github.com/pingcap/tidb/sessionctx/stmtctx" 25 | "github.com/pingcap/tidb/types" 26 | "github.com/pingcap/tidb/util/chunk" 27 | ) 28 | 29 | type concatFunction struct { 30 | aggFunction 31 | separator string 32 | maxLen uint64 33 | sepInited bool 34 | // truncated according to MySQL, a 'group_concat' function generates exactly one 'truncated' warning during its life time, no matter 35 | // how many group actually truncated. 'truncated' acts as a sentinel to indicate whether this warning has already been 36 | // generated. 37 | truncated bool 38 | } 39 | 40 | func (cf *concatFunction) writeValue(evalCtx *AggEvaluateContext, val types.Datum) { 41 | if val.Kind() == types.KindBytes { 42 | evalCtx.Buffer.Write(val.GetBytes()) 43 | } else { 44 | evalCtx.Buffer.WriteString(fmt.Sprintf("%v", val.GetValue())) 45 | } 46 | } 47 | 48 | func (cf *concatFunction) initSeparator(sc *stmtctx.StatementContext, row chunk.Row) error { 49 | sepArg := cf.Args[len(cf.Args)-1] 50 | sepDatum, err := sepArg.Eval(row) 51 | if err != nil { 52 | return err 53 | } 54 | if sepDatum.IsNull() { 55 | return errors.Errorf("Invalid separator argument.") 56 | } 57 | cf.separator, err = sepDatum.ToString() 58 | return err 59 | } 60 | 61 | // Update implements Aggregation interface. 62 | func (cf *concatFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.StatementContext, row chunk.Row) error { 63 | datumBuf := make([]types.Datum, 0, len(cf.Args)) 64 | if !cf.sepInited { 65 | err := cf.initSeparator(sc, row) 66 | if err != nil { 67 | return err 68 | } 69 | cf.sepInited = true 70 | } 71 | 72 | // The last parameter is the concat separator, we only concat the first "len(cf.Args)-1" parameters. 73 | for i, length := 0, len(cf.Args)-1; i < length; i++ { 74 | value, err := cf.Args[i].Eval(row) 75 | if err != nil { 76 | return err 77 | } 78 | if value.IsNull() { 79 | return nil 80 | } 81 | datumBuf = append(datumBuf, value) 82 | } 83 | if cf.HasDistinct { 84 | d, err := evalCtx.DistinctChecker.Check(datumBuf) 85 | if err != nil { 86 | return err 87 | } 88 | if !d { 89 | return nil 90 | } 91 | } 92 | if evalCtx.Buffer == nil { 93 | evalCtx.Buffer = &bytes.Buffer{} 94 | } else { 95 | evalCtx.Buffer.WriteString(cf.separator) 96 | } 97 | for _, val := range datumBuf { 98 | cf.writeValue(evalCtx, val) 99 | } 100 | if cf.maxLen > 0 && uint64(evalCtx.Buffer.Len()) > cf.maxLen { 101 | i := mathutil.MaxInt 102 | if uint64(i) > cf.maxLen { 103 | i = int(cf.maxLen) 104 | } 105 | evalCtx.Buffer.Truncate(i) 106 | if !cf.truncated { 107 | sc.AppendWarning(expression.ErrCutValueGroupConcat.GenWithStackByArgs(cf.Args[0].String())) 108 | } 109 | cf.truncated = true 110 | } 111 | return nil 112 | } 113 | 114 | // GetResult implements Aggregation interface. 115 | func (cf *concatFunction) GetResult(evalCtx *AggEvaluateContext) (d types.Datum) { 116 | if evalCtx.Buffer != nil { 117 | d.SetString(evalCtx.Buffer.String(), cf.RetTp.Collate) 118 | } else { 119 | d.SetNull() 120 | } 121 | return d 122 | } 123 | 124 | func (cf *concatFunction) ResetContext(sc *stmtctx.StatementContext, evalCtx *AggEvaluateContext) { 125 | if cf.HasDistinct { 126 | evalCtx.DistinctChecker = createDistinctChecker(sc) 127 | } 128 | evalCtx.Buffer = nil 129 | } 130 | 131 | // GetPartialResult implements Aggregation interface. 132 | func (cf *concatFunction) GetPartialResult(evalCtx *AggEvaluateContext) []types.Datum { 133 | return []types.Datum{cf.GetResult(evalCtx)} 134 | } 135 | -------------------------------------------------------------------------------- /tablestore/mysql/bexpression/aggregation/base_func.go: -------------------------------------------------------------------------------- 1 | package aggregation 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/cznic/mathutil" 7 | "github.com/pingcap/errors" 8 | "github.com/pingcap/parser/ast" 9 | "github.com/pingcap/parser/mysql" 10 | "github.com/pingcap/tidb/types" 11 | "github.com/zhihu/zetta/tablestore/mysql/expression" 12 | "github.com/zhihu/zetta/tablestore/mysql/sctx" 13 | ) 14 | 15 | // baseFuncDesc describes an function signature, only used in planner. 16 | type baseFuncDesc struct { 17 | // Name represents the function name. 18 | Name string 19 | // Args represents the arguments of the function. 20 | Args []expression.Expression 21 | // RetTp represents the return type of the function. 22 | RetTp *types.FieldType 23 | } 24 | 25 | func newBaseFuncDesc(ctx sctx.Context, name string, args []expression.Expression) (baseFuncDesc, error) { 26 | b := baseFuncDesc{Name: strings.ToLower(name), Args: args} 27 | err := b.typeInfer(ctx) 28 | return b, err 29 | } 30 | 31 | // typeInfer infers the arguments and return types of an function. 32 | func (a *baseFuncDesc) typeInfer(ctx sctx.Context) error { 33 | switch a.Name { 34 | case ast.AggFuncCount: 35 | a.typeInfer4Count() 36 | case ast.AggFuncSum: 37 | a.typeInfer4Sum() 38 | case ast.AggFuncAvg: 39 | a.typeInfer4Avg() 40 | case ast.AggFuncMax, ast.AggFuncMin, ast.AggFuncFirstRow: 41 | a.typeInfer4MaxMin() 42 | default: 43 | return errors.Errorf("unsupported agg function: %s", a.Name) 44 | } 45 | return nil 46 | } 47 | 48 | func (a *baseFuncDesc) typeInfer4Count() { 49 | a.RetTp = types.NewFieldType(mysql.TypeLonglong) 50 | a.RetTp.Flen = 21 51 | a.RetTp.Decimal = 0 52 | // count never returns null 53 | a.RetTp.Flag |= mysql.NotNullFlag 54 | types.SetBinChsClnFlag(a.RetTp) 55 | } 56 | 57 | // typeInfer4Sum should returns a "decimal", otherwise it returns a "double". 58 | // Because child returns integer or decimal type. 59 | func (a *baseFuncDesc) typeInfer4Sum() { 60 | switch a.Args[0].GetType().Tp { 61 | case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: 62 | a.RetTp = types.NewFieldType(mysql.TypeNewDecimal) 63 | a.RetTp.Flen, a.RetTp.Decimal = mysql.MaxDecimalWidth, 0 64 | case mysql.TypeNewDecimal: 65 | a.RetTp = types.NewFieldType(mysql.TypeNewDecimal) 66 | a.RetTp.Flen, a.RetTp.Decimal = mysql.MaxDecimalWidth, a.Args[0].GetType().Decimal 67 | if a.RetTp.Decimal < 0 || a.RetTp.Decimal > mysql.MaxDecimalScale { 68 | a.RetTp.Decimal = mysql.MaxDecimalScale 69 | } 70 | case mysql.TypeDouble, mysql.TypeFloat: 71 | a.RetTp = types.NewFieldType(mysql.TypeDouble) 72 | a.RetTp.Flen, a.RetTp.Decimal = mysql.MaxRealWidth, a.Args[0].GetType().Decimal 73 | default: 74 | a.RetTp = types.NewFieldType(mysql.TypeDouble) 75 | a.RetTp.Flen, a.RetTp.Decimal = mysql.MaxRealWidth, types.UnspecifiedLength 76 | } 77 | types.SetBinChsClnFlag(a.RetTp) 78 | } 79 | 80 | // typeInfer4Avg should returns a "decimal", otherwise it returns a "double". 81 | // Because child returns integer or decimal type. 82 | func (a *baseFuncDesc) typeInfer4Avg() { 83 | switch a.Args[0].GetType().Tp { 84 | case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong, mysql.TypeNewDecimal: 85 | a.RetTp = types.NewFieldType(mysql.TypeNewDecimal) 86 | if a.Args[0].GetType().Decimal < 0 { 87 | a.RetTp.Decimal = mysql.MaxDecimalScale 88 | } else { 89 | a.RetTp.Decimal = mathutil.Min(a.Args[0].GetType().Decimal+types.DivFracIncr, mysql.MaxDecimalScale) 90 | } 91 | a.RetTp.Flen = mysql.MaxDecimalWidth 92 | case mysql.TypeDouble, mysql.TypeFloat: 93 | a.RetTp = types.NewFieldType(mysql.TypeDouble) 94 | a.RetTp.Flen, a.RetTp.Decimal = mysql.MaxRealWidth, a.Args[0].GetType().Decimal 95 | default: 96 | a.RetTp = types.NewFieldType(mysql.TypeDouble) 97 | a.RetTp.Flen, a.RetTp.Decimal = mysql.MaxRealWidth, types.UnspecifiedLength 98 | } 99 | types.SetBinChsClnFlag(a.RetTp) 100 | } 101 | 102 | func (a *baseFuncDesc) typeInfer4MaxMin() { 103 | a.RetTp = a.Args[0].GetType() 104 | if (a.Name == ast.AggFuncMax || a.Name == ast.AggFuncMin) && a.RetTp.Tp != mysql.TypeBit { 105 | a.RetTp = a.Args[0].GetType().Clone() 106 | a.RetTp.Flag &^= mysql.NotNullFlag 107 | } 108 | // TODO: fix other aggFuncs for TypeEnum & TypeSet 109 | if (a.RetTp.Tp == mysql.TypeEnum || a.RetTp.Tp == mysql.TypeSet) && a.Name != ast.AggFuncFirstRow { 110 | a.RetTp = &types.FieldType{Tp: mysql.TypeString, Flen: mysql.MaxFieldCharLength} 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /tablestore/server/hthrift/thrift_server.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package hthrift 15 | 16 | import ( 17 | "errors" 18 | "fmt" 19 | "sync" 20 | 21 | "github.com/apache/thrift/lib/go/thrift" 22 | "github.com/ngaut/pools" 23 | 24 | "github.com/pingcap/tidb/util/logutil" 25 | "github.com/zhihu/zetta/tablestore/config" 26 | "github.com/zhihu/zetta/tablestore/server/hthrift/hbase" 27 | "go.uber.org/zap" 28 | ) 29 | 30 | type TServer struct { 31 | addr string 32 | cfg *config.Config 33 | ss *thrift.TSimpleServer 34 | sessPool *sessionPool 35 | } 36 | 37 | func NewTServer(cfg *config.Config, factory pools.Factory) (*TServer, error) { 38 | var ( 39 | protocolFactory thrift.TProtocolFactory 40 | transportFactory thrift.TTransportFactory 41 | capacity = 100 42 | ) 43 | 44 | switch cfg.HBase.ThriftProtocol { 45 | case "compact": 46 | protocolFactory = thrift.NewTCompactProtocolFactory() 47 | case "simplejson": 48 | protocolFactory = thrift.NewTSimpleJSONProtocolFactory() 49 | case "json": 50 | protocolFactory = thrift.NewTJSONProtocolFactory() 51 | case "binary", "": 52 | protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() 53 | default: 54 | protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() 55 | } 56 | 57 | switch cfg.HBase.ThriftTransport { 58 | case "buffered", "": 59 | transportFactory = thrift.NewTBufferedTransportFactory(8192) 60 | case "framed": 61 | transportFactory = thrift.NewTTransportFactory() 62 | transportFactory = thrift.NewTFramedTransportFactory(transportFactory) 63 | default: 64 | transportFactory = thrift.NewTBufferedTransportFactory(8192) 65 | } 66 | 67 | listenAddr := fmt.Sprintf("%v:%d", cfg.HBase.ThriftHost, cfg.HBase.ThriftPort) 68 | transport, err := thrift.NewTServerSocket(listenAddr) 69 | if err != nil { 70 | logutil.BgLogger().Error("create thrift-server socket error", zap.Error(err)) 71 | return nil, err 72 | } 73 | 74 | tserver := &TServer{ 75 | addr: listenAddr, 76 | cfg: cfg, 77 | sessPool: newSessionPool(capacity, factory), 78 | } 79 | handler := NewHBaseHandler(tserver) 80 | processor := hbase.NewHbaseProcessor(handler) 81 | simpleServer := thrift.NewTSimpleServer4(processor, transport, transportFactory, protocolFactory) 82 | tserver.ss = simpleServer 83 | return tserver, nil 84 | } 85 | 86 | func (ts *TServer) Run() { 87 | go func() { 88 | logutil.BgLogger().Info("hbase thrift server start", zap.String("addr", ts.addr)) 89 | if err := ts.ss.Serve(); err != nil { 90 | logutil.BgLogger().Error("thrift server serve error", zap.Error(err)) 91 | return 92 | } 93 | }() 94 | } 95 | 96 | func (ts *TServer) Close() { 97 | ts.ss.Stop() 98 | ts.sessPool.Close() 99 | } 100 | 101 | type sessionPool struct { 102 | resources chan pools.Resource 103 | factory pools.Factory 104 | mu struct { 105 | sync.RWMutex 106 | closed bool 107 | } 108 | } 109 | 110 | func newSessionPool(cap int, factory pools.Factory) *sessionPool { 111 | return &sessionPool{ 112 | resources: make(chan pools.Resource, cap), 113 | factory: factory, 114 | } 115 | } 116 | 117 | func (p *sessionPool) Get() (resource pools.Resource, err error) { 118 | var ok bool 119 | select { 120 | case resource, ok = <-p.resources: 121 | if !ok { 122 | err = errors.New("session pool closed") 123 | } 124 | default: 125 | resource, err = p.factory() 126 | } 127 | return 128 | } 129 | 130 | func (p *sessionPool) Put(resource pools.Resource) { 131 | p.mu.RLock() 132 | defer p.mu.RUnlock() 133 | if p.mu.closed { 134 | resource.Close() 135 | return 136 | } 137 | 138 | select { 139 | case p.resources <- resource: 140 | default: 141 | resource.Close() 142 | } 143 | } 144 | func (p *sessionPool) Close() { 145 | p.mu.Lock() 146 | if p.mu.closed { 147 | p.mu.Unlock() 148 | return 149 | } 150 | p.mu.closed = true 151 | close(p.resources) 152 | p.mu.Unlock() 153 | 154 | for r := range p.resources { 155 | r.Close() 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /pkg/codec/bytes.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 Zhizhesihai (Beijing) Technology Limited. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package codec 15 | 16 | import ( 17 | "encoding/binary" 18 | 19 | "github.com/pingcap/errors" 20 | ) 21 | 22 | const ( 23 | encGroupSize = 9 24 | encPad = 0x0 25 | ) 26 | 27 | /* 28 | This is the new algorithm. Similarly to the legacy format the input 29 | is split up into N-1 bytes and a flag byte is used as the Nth byte 30 | in the output. 31 | - If the previous segment needed any padding the flag is set to the 32 | number of bytes used (0..N-2). 0 is possible in the first segment 33 | if the input is 0 bytes long. 34 | - If no padding was used and there is no more data left in the input 35 | the flag is set to N-1 36 | - If no padding was used and there is still data left in the input the 37 | flag is set to N. 38 | For N=9, the following input values encode to the specified 39 | outout (where 'X' indicates a byte of the original input): 40 | - 0 bytes is encoded as 0 0 0 0 0 0 0 0 0 41 | - 1 byte is encoded as X 0 0 0 0 0 0 0 1 42 | - 2 bytes is encoded as X X 0 0 0 0 0 0 2 43 | - 7 bytes is encoded as X X X X X X X 0 7 44 | - 8 bytes is encoded as X X X X X X X X 8 45 | - 9 bytes is encoded as X X X X X X X X 9 X 0 0 0 0 0 0 0 1 46 | - 10 bytes is encoded as X X X X X X X X 9 X X 0 0 0 0 0 0 2 47 | */ 48 | func EncodeBytes(b []byte, data []byte) []byte { 49 | start := 0 50 | copyLen := encGroupSize - 1 51 | left := len(data) 52 | 53 | for { 54 | if left < encGroupSize-1 { 55 | copyLen = left 56 | } 57 | b = append(b, data[start:start+copyLen]...) 58 | left = left - copyLen 59 | if left == 0 { 60 | padding := make([]byte, encGroupSize-1-copyLen) 61 | b = append(b, padding...) 62 | b = append(b, byte(copyLen)) 63 | break 64 | } 65 | b = append(b, byte(encGroupSize)) 66 | start = start + encGroupSize - 1 67 | } 68 | return b 69 | } 70 | 71 | func decodeBytes(b, buf []byte) ([]byte, []byte, error) { 72 | if buf == nil { 73 | buf = make([]byte, 0, len(b)) 74 | } 75 | if len(b) < encGroupSize { 76 | return b, buf, errors.New("No enough bytes to decode") 77 | } 78 | buf = buf[:0] 79 | for { 80 | used := b[encGroupSize-1] 81 | if used > encGroupSize { 82 | return b, buf, errors.Errorf("Invalid flag, groupBytes: %q", b[:encGroupSize]) 83 | } 84 | if used <= encGroupSize-1 { 85 | for _, pad := range b[used : encGroupSize-1] { 86 | if pad != encPad { 87 | return b, buf, errors.Errorf("Pad bytes not 0x0, groupBytes: %q", b[:encGroupSize]) 88 | } 89 | } 90 | buf = append(buf, b[:used]...) 91 | break 92 | } 93 | buf = append(buf, b[:encGroupSize-1]...) 94 | b = b[encGroupSize:] 95 | } 96 | return b[encGroupSize:], buf, nil 97 | } 98 | 99 | func DecodeBytes(b, buf []byte) ([]byte, []byte, error) { 100 | return decodeBytes(b, buf) 101 | } 102 | 103 | // EncodeCompactBytes joins bytes with its length into a byte slice. It is more 104 | // efficient in both space and time compare to EncodeBytes. Note that the encoded 105 | // result is not memcomparable. 106 | func EncodeCompactBytes(b []byte, data []byte) []byte { 107 | b = reallocBytes(b, binary.MaxVarintLen64+len(data)) 108 | b = EncodeVarint(b, int64(len(data))) 109 | return append(b, data...) 110 | } 111 | 112 | // DecodeCompactBytes decodes bytes which is encoded by EncodeCompactBytes before. 113 | func DecodeCompactBytes(b []byte) ([]byte, []byte, error) { 114 | b, n, err := DecodeVarint(b) 115 | if err != nil { 116 | return nil, nil, errors.Trace(err) 117 | } 118 | if int64(len(b)) < n { 119 | return nil, nil, errors.Errorf("insufficient bytes to decode value, expected length: %v", n) 120 | } 121 | return b[n:], b[:n], nil 122 | } 123 | 124 | // reallocBytes is like realloc. 125 | func reallocBytes(b []byte, n int) []byte { 126 | newSize := len(b) + n 127 | if cap(b) < newSize { 128 | bs := make([]byte, len(b), newSize) 129 | copy(bs, b) 130 | return bs 131 | } 132 | 133 | // slice b has capability to store n bytes 134 | return b 135 | } 136 | -------------------------------------------------------------------------------- /tablestore/mysql/expression/builtin_like_vec.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 PingCAP, Inc. 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // See the License for the specific language governing permissions and 12 | // limitations under the License. 13 | 14 | package expression 15 | 16 | import ( 17 | "regexp" 18 | 19 | "github.com/pingcap/errors" 20 | "github.com/pingcap/tidb/types" 21 | "github.com/pingcap/tidb/util/chunk" 22 | ) 23 | 24 | func (b *builtinLikeSig) vectorized() bool { 25 | return true 26 | } 27 | 28 | func (b *builtinLikeSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { 29 | n := input.NumRows() 30 | bufVal, err := b.bufAllocator.get(types.ETString, n) 31 | if err != nil { 32 | return err 33 | } 34 | defer b.bufAllocator.put(bufVal) 35 | if err = b.args[0].VecEvalString(b.ctx, input, bufVal); err != nil { 36 | return err 37 | } 38 | bufPattern, err := b.bufAllocator.get(types.ETString, n) 39 | if err != nil { 40 | return err 41 | } 42 | defer b.bufAllocator.put(bufPattern) 43 | if err = b.args[1].VecEvalString(b.ctx, input, bufPattern); err != nil { 44 | return err 45 | } 46 | 47 | bufEscape, err := b.bufAllocator.get(types.ETInt, n) 48 | if err != nil { 49 | return err 50 | } 51 | defer b.bufAllocator.put(bufEscape) 52 | if err = b.args[2].VecEvalInt(b.ctx, input, bufEscape); err != nil { 53 | return err 54 | } 55 | escapes := bufEscape.Int64s() 56 | 57 | // Must not use b.pattern to avoid data race 58 | pattern := b.collator().Pattern() 59 | 60 | result.ResizeInt64(n, false) 61 | result.MergeNulls(bufVal, bufPattern, bufEscape) 62 | i64s := result.Int64s() 63 | for i := 0; i < n; i++ { 64 | if result.IsNull(i) { 65 | continue 66 | } 67 | pattern.Compile(bufPattern.GetString(i), byte(escapes[i])) 68 | match := pattern.DoMatch(bufVal.GetString(i)) 69 | i64s[i] = boolToInt64(match) 70 | } 71 | 72 | return nil 73 | } 74 | 75 | func (b *builtinRegexpSig) vectorized() bool { 76 | return true 77 | } 78 | 79 | func (b *builtinRegexpUTF8Sig) vectorized() bool { 80 | return true 81 | } 82 | 83 | func (b *builtinRegexpSharedSig) isMemorizedRegexpInitialized() bool { 84 | return !(b.memorizedRegexp == nil && b.memorizedErr == nil) 85 | } 86 | 87 | func (b *builtinRegexpSharedSig) initMemoizedRegexp(patterns *chunk.Column, n int) { 88 | // Precondition: patterns is generated from a constant expression 89 | if n == 0 { 90 | // If the input rownum is zero, the Regexp error shouldn't be generated. 91 | return 92 | } 93 | for i := 0; i < n; i++ { 94 | if patterns.IsNull(i) { 95 | continue 96 | } 97 | re, err := b.compile(patterns.GetString(i)) 98 | b.memorizedRegexp = re 99 | b.memorizedErr = err 100 | break 101 | } 102 | if !b.isMemorizedRegexpInitialized() { 103 | b.memorizedErr = errors.New("No valid regexp pattern found") 104 | } 105 | if b.memorizedErr != nil { 106 | b.memorizedRegexp = nil 107 | } 108 | } 109 | 110 | func (b *builtinRegexpSharedSig) vecEvalInt(input *chunk.Chunk, result *chunk.Column) error { 111 | n := input.NumRows() 112 | bufExpr, err := b.bufAllocator.get(types.ETString, n) 113 | if err != nil { 114 | return err 115 | } 116 | defer b.bufAllocator.put(bufExpr) 117 | if err := b.args[0].VecEvalString(b.ctx, input, bufExpr); err != nil { 118 | return err 119 | } 120 | 121 | bufPat, err := b.bufAllocator.get(types.ETString, n) 122 | if err != nil { 123 | return err 124 | } 125 | defer b.bufAllocator.put(bufPat) 126 | if err := b.args[1].VecEvalString(b.ctx, input, bufPat); err != nil { 127 | return err 128 | } 129 | 130 | if b.args[1].ConstItem(b.ctx.GetSessionVars().StmtCtx) && !b.isMemorizedRegexpInitialized() { 131 | b.initMemoizedRegexp(bufPat, n) 132 | } 133 | getRegexp := func(pat string) (*regexp.Regexp, error) { 134 | if b.isMemorizedRegexpInitialized() { 135 | return b.memorizedRegexp, b.memorizedErr 136 | } 137 | return b.compile(pat) 138 | } 139 | 140 | result.ResizeInt64(n, false) 141 | result.MergeNulls(bufExpr, bufPat) 142 | i64s := result.Int64s() 143 | for i := 0; i < n; i++ { 144 | if result.IsNull(i) { 145 | continue 146 | } 147 | re, err := getRegexp(bufPat.GetString(i)) 148 | if err != nil { 149 | return err 150 | } 151 | i64s[i] = boolToInt64(re.MatchString(bufExpr.GetString(i))) 152 | } 153 | return nil 154 | } 155 | --------------------------------------------------------------------------------