├── tpch
├── output
│ ├── q19.out
│ ├── q17.out
│ ├── q6.out
│ ├── q14.out
│ ├── q8.out
│ ├── q12.out
│ ├── q15.out
│ ├── q22.out
│ ├── q4.out
│ ├── q5.out
│ ├── q7.out
│ ├── q3.out
│ ├── q13.out
│ ├── q1.out
│ ├── q18.out
│ ├── answer_gen.py
│ ├── q21.out
│ ├── q10.out
│ ├── q9.out
│ └── q20.out
├── dbgen
│ ├── README.md
│ ├── copyright
│ ├── types.go
│ ├── part_psupp.go
│ ├── order_line.go
│ ├── region.go
│ ├── nation.go
│ ├── misc_test.go
│ ├── part_supp.go
│ ├── dist
│ │ └── loaddist_gen.py
│ ├── cust.go
│ ├── lineitem.go
│ ├── dist.go
│ ├── part.go
│ ├── driver.go
│ ├── supp.go
│ ├── order.go
│ ├── misc.go
│ └── rand.go
├── check.go
├── ddl.go
└── loader.go
├── .gitignore
├── pkg
├── sink
│ ├── sink.go
│ ├── type_util.go
│ ├── csv_test.go
│ ├── sql_test.go
│ ├── csv.go
│ ├── sql.go
│ └── concurrent.go
├── util
│ ├── file.go
│ ├── hack.go
│ ├── alloc_test.go
│ ├── explain.go
│ ├── alloc.go
│ ├── version.go
│ ├── output.go
│ └── version_test.go
├── measurement
│ ├── hist_test.go
│ ├── hist.go
│ └── measure.go
├── workload
│ ├── workload.go
│ └── base.go
└── plan-replayer
│ └── replayer.go
├── .github
└── workflows
│ ├── workflow.yaml
│ └── release.yaml
├── .goreleaser.yml
├── cmd
└── go-tpc
│ ├── versioninfo.go
│ ├── rawsql.go
│ ├── tpcc.go
│ ├── misc.go
│ ├── tpch.go
│ └── ch_benchmark.go
├── Dockerfile
├── Makefile
├── tpcc
├── stock_level.go
├── metrics.go
├── prepare.go
├── ddl_test.go
├── order_status.go
├── rand.go
├── delivery.go
└── payment.go
├── ch
└── ddl.go
├── install.sh
├── go.mod
├── rawsql
└── workload.go
├── README.md
└── docs
└── tidb-lightning.toml
/tpch/output/q19.out:
--------------------------------------------------------------------------------
1 | revenue
2 | 4289833.6171
3 |
--------------------------------------------------------------------------------
/tpch/output/q17.out:
--------------------------------------------------------------------------------
1 | avg_yearly
2 | 382688.837143
3 |
--------------------------------------------------------------------------------
/tpch/output/q6.out:
--------------------------------------------------------------------------------
1 | revenue
2 | 123141078.2283
3 |
--------------------------------------------------------------------------------
/tpch/output/q14.out:
--------------------------------------------------------------------------------
1 | promo_revenue
2 | 16.5793993672
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | bin
2 | .idea/
3 | dist/
4 | .vscode/
5 | .DS_Store
6 | vendor/
7 |
--------------------------------------------------------------------------------
/tpch/output/q8.out:
--------------------------------------------------------------------------------
1 | o_year|mkt_share
2 | 1995|0.04039139
3 | 1996|0.04570844
4 |
--------------------------------------------------------------------------------
/tpch/output/q12.out:
--------------------------------------------------------------------------------
1 | l_shipmode|high_line_count|low_line_count
2 | FOB|6335|9242
3 | RAIL|6365|9340
4 |
--------------------------------------------------------------------------------
/tpch/output/q15.out:
--------------------------------------------------------------------------------
1 | s_suppkey|s_name|s_address|s_phone|total_revenue
2 | 5902|Supplier#000005902|rb4HvSpgYH|14-930-257-5773|1932307.3183
3 |
--------------------------------------------------------------------------------
/tpch/output/q22.out:
--------------------------------------------------------------------------------
1 | cntrycode|numcust|totacctbal
2 | 20|916|6824676.02
3 | 21|955|7235832.66
4 | 22|893|6631741.43
5 | 30|910|6813438.36
6 |
--------------------------------------------------------------------------------
/tpch/output/q4.out:
--------------------------------------------------------------------------------
1 | o_orderpriority|order_count
2 | 1-URGENT|10332
3 | 2-HIGH|10407
4 | 3-MEDIUM|10312
5 | 4-NOT SPECIFIED|10409
6 | 5-LOW|10379
7 |
--------------------------------------------------------------------------------
/tpch/output/q5.out:
--------------------------------------------------------------------------------
1 | n_name|revenue
2 | IRAQ|58232553.2776
3 | SAUDI ARABIA|53335013.3675
4 | EGYPT|53293463.6888
5 | IRAN|50487778.1438
6 | JORDAN|48801457.3985
7 |
--------------------------------------------------------------------------------
/tpch/dbgen/README.md:
--------------------------------------------------------------------------------
1 | dbgen
2 | ========
3 |
4 | This dbgen was port from TPC-H dbgen v2.4.0.
5 |
6 | Official TPC-H benchmark - [http://www.tpc.org/tpch](http://www.tpc.org/tpch)
--------------------------------------------------------------------------------
/tpch/dbgen/copyright:
--------------------------------------------------------------------------------
1 | TPC, TPC Benchmark and TPC-C are trademarks of the Transaction Processing Performance Council.
2 | All other materials are ©2015-2016 TPC. All rights reserved.
3 |
--------------------------------------------------------------------------------
/tpch/output/q7.out:
--------------------------------------------------------------------------------
1 | supp_nation|cust_nation|l_year|revenue
2 | INDIA|JAPAN|1995|54055321.9218
3 | INDIA|JAPAN|1996|58259567.0850
4 | JAPAN|INDIA|1995|50588811.3080
5 | JAPAN|INDIA|1996|51211533.1035
6 |
--------------------------------------------------------------------------------
/pkg/sink/sink.go:
--------------------------------------------------------------------------------
1 | package sink
2 |
3 | import (
4 | "context"
5 | )
6 |
7 | type Sink interface {
8 | WriteRow(ctx context.Context, values ...interface{}) error
9 | Flush(ctx context.Context) error
10 | Close(ctx context.Context) error
11 | }
12 |
--------------------------------------------------------------------------------
/tpch/dbgen/types.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | const (
4 | TNone Table = iota - 1
5 | TPart
6 | TPsupp
7 | TSupp
8 | TCust
9 | TOrder
10 | TLine
11 | TOrderLine
12 | TPartPsupp
13 | TNation
14 | TRegion
15 | )
16 |
17 | type Loader interface {
18 | Load(item interface{}) error
19 | Flush() error
20 | }
21 |
--------------------------------------------------------------------------------
/pkg/sink/type_util.go:
--------------------------------------------------------------------------------
1 | package sink
2 |
3 | import (
4 | "reflect"
5 | )
6 |
7 | var (
8 | typeUInt64 = reflect.TypeOf((*uint64)(nil)).Elem()
9 | typeInt64 = reflect.TypeOf((*int64)(nil)).Elem()
10 | typeFloat64 = reflect.TypeOf((*float64)(nil)).Elem()
11 | typeString = reflect.TypeOf((*string)(nil)).Elem()
12 | )
13 |
--------------------------------------------------------------------------------
/pkg/util/file.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | )
7 |
8 | func CreateFile(path string) *os.File {
9 | f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
10 | if err != nil {
11 | fmt.Printf("failed to create file %s, error %v", path, err)
12 | os.Exit(1)
13 | }
14 | return f
15 | }
16 |
--------------------------------------------------------------------------------
/tpch/output/q3.out:
--------------------------------------------------------------------------------
1 | l_orderkey|revenue|o_orderdate|o_shippriority
2 | 4791171|452497.4729|1995-02-23|0
3 | 4163074|437267.7799|1995-02-13|0
4 | 4676933|412072.0035|1995-02-07|0
5 | 3778628|399682.3516|1995-02-25|0
6 | 2773540|398691.5039|1995-02-27|0
7 | 2692902|394554.8742|1995-03-06|0
8 | 181414|393083.4426|1995-03-08|0
9 | 4178471|390099.4832|1995-02-28|0
10 | 4739141|385442.5444|1995-03-07|0
11 | 1310209|381336.0532|1995-02-23|0
12 |
--------------------------------------------------------------------------------
/pkg/measurement/hist_test.go:
--------------------------------------------------------------------------------
1 | package measurement
2 |
3 | import (
4 | "math/rand"
5 | "testing"
6 | "time"
7 | )
8 |
9 | func TestHist(t *testing.T) {
10 | h := NewHistogram(1*time.Millisecond, 20*time.Minute, 1)
11 | for i := 0; i < 10000; i++ {
12 | n := rand.Intn(15020)
13 | h.Measure(time.Millisecond * time.Duration(n))
14 | }
15 | h.Measure(time.Minute * 9)
16 | h.Measure(time.Minute * 8)
17 | t.Logf("%+v", h.Summary())
18 | }
19 |
--------------------------------------------------------------------------------
/.github/workflows/workflow.yaml:
--------------------------------------------------------------------------------
1 | name: "workflow"
2 | on: [push, pull_request]
3 |
4 | jobs:
5 | build:
6 | runs-on: ubuntu-latest
7 | steps:
8 | - uses: actions/setup-go@v3
9 | with:
10 | go-version: 1.25.x
11 | - uses: actions/checkout@v2
12 |
13 | - name: "format"
14 | run: make format
15 |
16 | - name: "test"
17 | run: make test
18 |
19 | - name: "build"
20 | run: make build
21 |
--------------------------------------------------------------------------------
/tpch/output/q13.out:
--------------------------------------------------------------------------------
1 | c_count|custdist
2 | 0|50004
3 | 10|6635
4 | 9|6515
5 | 11|6094
6 | 8|5952
7 | 12|5525
8 | 13|5039
9 | 19|4723
10 | 7|4644
11 | 18|4584
12 | 17|4514
13 | 14|4496
14 | 20|4489
15 | 15|4458
16 | 16|4356
17 | 21|4240
18 | 22|3724
19 | 6|3314
20 | 23|3132
21 | 24|2694
22 | 25|2116
23 | 5|1951
24 | 26|1646
25 | 27|1190
26 | 4|1006
27 | 28|843
28 | 29|607
29 | 3|409
30 | 30|379
31 | 31|230
32 | 32|148
33 | 2|132
34 | 33|68
35 | 34|53
36 | 35|31
37 | 1|22
38 | 36|17
39 | 37|8
40 | 38|5
41 | 40|3
42 | 39|3
43 | 41|1
44 |
--------------------------------------------------------------------------------
/.goreleaser.yml:
--------------------------------------------------------------------------------
1 | ---
2 | env:
3 | - GO111MODULE=on
4 | - CGO_ENABLED=0
5 | - GOEXPERIMENT=jsonv2
6 | before:
7 | hooks:
8 | - go mod tidy
9 | builds:
10 | - binary: go-tpc
11 | id: go-tpc
12 | main: ./cmd/go-tpc/
13 | goarch:
14 | - amd64
15 | - arm64
16 | goos:
17 | - linux
18 | - darwin
19 | mod_timestamp: '{{ .CommitTimestamp }}'
20 |
21 | archives:
22 | - files:
23 | - none*
24 | wrap_in_directory: false
25 |
26 | checksum:
27 | name_template: 'checksums.txt'
28 |
--------------------------------------------------------------------------------
/tpch/output/q1.out:
--------------------------------------------------------------------------------
1 | l_returnflag|l_linestatus|sum_qty|sum_base_price|sum_disc_price|sum_charge|avg_qty|avg_price|avg_disc|count_order
2 | A|F|37734107.00|56586554400.73|53758257134.8700|55909065222.827692|25.522006|38273.129735|0.049985|1478493
3 | N|F|991417.00|1487504710.38|1413082168.0541|1469649223.194375|25.516472|38284.467761|0.050093|38854
4 | N|O|73533166.00|110287596362.18|104774847005.9449|108969626230.358561|25.502145|38249.003129|0.049996|2883411
5 | R|F|37719753.00|56568041380.90|53741292684.6040|55889619119.831932|25.505794|38250.854626|0.050009|1478870
6 |
--------------------------------------------------------------------------------
/cmd/go-tpc/versioninfo.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/spf13/cobra"
7 | )
8 |
9 | var (
10 | version = "dev"
11 | commit = "none"
12 | date = "unknown"
13 | )
14 |
15 | func printVersion() {
16 | fmt.Println("Git Commit Hash:", commit)
17 | fmt.Println("UTC Build Time:", date)
18 | fmt.Println("Release version:", version)
19 | }
20 |
21 | func registerVersionInfo(root *cobra.Command) {
22 | cmd := &cobra.Command{
23 | Use: "version",
24 | Run: func(cmd *cobra.Command, args []string) {
25 | printVersion()
26 | },
27 | }
28 | root.AddCommand(cmd)
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/sink/csv_test.go:
--------------------------------------------------------------------------------
1 | package sink
2 |
3 | import (
4 | "database/sql"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/require"
8 | )
9 |
10 | func TestBuildColumns(t *testing.T) {
11 | v := buildColumns([]interface{}{nil, "a", 123, 456.123})
12 | require.Equal(t, []string{"NULL", "a", "123", "456.123000"}, v)
13 |
14 | v = buildColumns([]interface{}{sql.NullInt64{}})
15 | require.Equal(t, []string{"NULL"}, v)
16 |
17 | v = buildColumns([]interface{}{sql.NullInt64{Valid: true}})
18 | require.Equal(t, []string{"0"}, v)
19 |
20 | type dssHuge int
21 |
22 | v = buildColumns([]interface{}{dssHuge(5)})
23 | require.Equal(t, []string{"5"}, v)
24 | }
25 |
--------------------------------------------------------------------------------
/tpch/dbgen/part_psupp.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | type partPsuppLoader struct{}
4 |
5 | func (p partPsuppLoader) Load(item interface{}) error {
6 | if err := tDefs[TPart].loader.Load(item); err != nil {
7 | return err
8 | }
9 | if err := tDefs[TPsupp].loader.Load(item); err != nil {
10 | return err
11 | }
12 | return nil
13 | }
14 |
15 | func (p partPsuppLoader) Flush() error {
16 | if err := tDefs[TPart].loader.Flush(); err != nil {
17 | return err
18 | }
19 | if err := tDefs[TPsupp].loader.Flush(); err != nil {
20 | return err
21 | }
22 | return nil
23 | }
24 |
25 | func newPartPsuppLoader() partPsuppLoader {
26 | return partPsuppLoader{}
27 | }
28 |
--------------------------------------------------------------------------------
/tpch/output/q18.out:
--------------------------------------------------------------------------------
1 | c_name|c_custkey|o_orderkey|o_orderdate|o_totalprice|sum(l_quantity)
2 | Customer#000128120|128120|4722021|1994-04-07|544089.09|323.00
3 | Customer#000144617|144617|3043270|1997-02-12|530604.44|317.00
4 | Customer#000066790|66790|2199712|1996-09-30|515531.82|327.00
5 | Customer#000015619|15619|3767271|1996-08-07|480083.96|318.00
6 | Customer#000147197|147197|1263015|1997-02-02|467149.67|320.00
7 | Customer#000117919|117919|2869152|1996-06-20|456815.92|317.00
8 | Customer#000126865|126865|4702759|1994-11-07|447606.65|320.00
9 | Customer#000036619|36619|4806726|1995-01-17|446704.09|328.00
10 | Customer#000119989|119989|1544643|1997-09-20|434568.25|320.00
11 |
--------------------------------------------------------------------------------
/pkg/sink/sql_test.go:
--------------------------------------------------------------------------------
1 | package sink
2 |
3 | import (
4 | "database/sql"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/require"
8 | )
9 |
10 | func TestBuildSQLRow(t *testing.T) {
11 | v := buildSQLRow([]interface{}{nil, "a", 123, 456.123})
12 | require.Equal(t, `(NULL,'a',123,456.123000)`, v)
13 |
14 | v = buildSQLRow([]interface{}{sql.NullInt64{}})
15 | require.Equal(t, `(NULL)`, v)
16 |
17 | v = buildSQLRow([]interface{}{sql.NullInt64{Valid: true}})
18 | require.Equal(t, `(0)`, v)
19 |
20 | type dssHuge int
21 |
22 | v = buildSQLRow([]interface{}{dssHuge(5)})
23 | require.Equal(t, `(5)`, v)
24 |
25 | v = buildSQLRow([]interface{}{})
26 | require.Equal(t, `()`, v)
27 | }
28 |
--------------------------------------------------------------------------------
/tpch/dbgen/order_line.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | type OrderLine struct {
4 | order Order
5 | }
6 |
7 | type orderLineLoader struct {
8 | }
9 |
10 | func (o orderLineLoader) Load(item interface{}) error {
11 | if err := tDefs[TOrder].loader.Load(item); err != nil {
12 | return err
13 | }
14 | if err := tDefs[TLine].loader.Load(item); err != nil {
15 | return err
16 | }
17 | return nil
18 | }
19 |
20 | func (o orderLineLoader) Flush() error {
21 | if err := tDefs[TOrder].loader.Flush(); err != nil {
22 | return nil
23 | }
24 | if err := tDefs[TLine].loader.Flush(); err != nil {
25 | return err
26 | }
27 | return nil
28 | }
29 |
30 | func newOrderLineLoader() orderLineLoader {
31 | return orderLineLoader{}
32 | }
33 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Build the go-tpc binary
2 | FROM golang:1.21 as builder
3 |
4 | WORKDIR /workspace
5 | COPY go.mod go.mod
6 | COPY go.sum go.sum
7 | # cache deps before building and copying source so that we don't need to re-download as much
8 | # and so that source changes don't invalidate our downloaded layer
9 | RUN go mod download
10 |
11 | # Copy the source
12 | COPY . .
13 |
14 | # Build
15 | ARG TARGETOS TARGETARCH
16 | RUN GOOS=$TARGETOS GOARCH=$TARGETARCH make build
17 |
18 | FROM alpine
19 |
20 | RUN apk add --no-cache \
21 | dumb-init \
22 | tzdata \
23 | # help to setup or teardown database schemas
24 | mariadb-client
25 |
26 | COPY --from=builder /workspace/bin/go-tpc /go-tpc
27 |
28 | ENTRYPOINT [ "/usr/bin/dumb-init" ]
29 | CMD ["/go-tpc"]
30 |
--------------------------------------------------------------------------------
/pkg/workload/workload.go:
--------------------------------------------------------------------------------
1 | package workload
2 |
3 | import (
4 | "context"
5 | )
6 |
7 | // Workloader is the interface for running customized workload
8 | type Workloader interface {
9 | Name() string
10 | InitThread(ctx context.Context, threadID int) context.Context
11 | CleanupThread(ctx context.Context, threadID int)
12 | Prepare(ctx context.Context, threadID int) error
13 | CheckPrepare(ctx context.Context, threadID int) error
14 | Run(ctx context.Context, threadID int) error
15 | Cleanup(ctx context.Context, threadID int) error
16 | Check(ctx context.Context, threadID int) error
17 | OutputStats(ifSummaryReport bool)
18 | DBName() string
19 |
20 | IsPlanReplayerDumpEnabled() bool
21 | PreparePlanReplayerDump() error
22 | FinishPlanReplayerDump() error
23 | Exec(sql string) error
24 | }
25 |
--------------------------------------------------------------------------------
/pkg/util/hack.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import (
4 | "reflect"
5 | "unsafe"
6 | )
7 |
8 | // String converts slice to string without copy.
9 | // Use at your own risk.
10 | func String(b []byte) (s string) {
11 | if len(b) == 0 {
12 | return ""
13 | }
14 | pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
15 | pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
16 | pstring.Data = pbytes.Data
17 | pstring.Len = pbytes.Len
18 | return
19 | }
20 |
21 | // Slice converts string to slice without copy.
22 | // Use at your own risk.
23 | func Slice(s string) (b []byte) {
24 | pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
25 | pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
26 | pbytes.Data = pstring.Data
27 | pbytes.Len = pstring.Len
28 | pbytes.Cap = pstring.Len
29 | return
30 | }
31 |
--------------------------------------------------------------------------------
/pkg/util/alloc_test.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import "testing"
4 |
5 | func TestBufAlloc(t *testing.T) {
6 | b := NewBufAllocator()
7 | orgBuf := b.buf
8 |
9 | buf1 := b.Alloc(100)
10 | buf1[99] = 'a'
11 |
12 | if orgBuf[99] != 'a' {
13 | t.Fatalf("expect a, but got %c", orgBuf[99])
14 | }
15 |
16 | b.Reset()
17 |
18 | buf1 = b.Alloc(100)
19 | if buf1[99] != 'a' {
20 | t.Fatalf("expect a, but got %c", buf1[99])
21 | }
22 |
23 | buf2 := b.Alloc(100)
24 | buf2[99] = 'b'
25 |
26 | orgBuf[299] = 'd'
27 | buf3 := b.Alloc(1025)
28 | buf3[99] = 'c'
29 |
30 | if orgBuf[99] != 'a' {
31 | t.Fatalf("expect a, but got %c", orgBuf[99])
32 | }
33 |
34 | if orgBuf[199] != 'b' {
35 | t.Fatalf("expect b, but got %c", orgBuf[199])
36 | }
37 |
38 | if orgBuf[299] != 'd' {
39 | t.Fatalf("expect d, but got %d", orgBuf[299])
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/pkg/util/explain.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import (
4 | "database/sql"
5 | "github.com/jedib0t/go-pretty/table"
6 | )
7 |
8 | func RenderExplainAnalyze(rows *sql.Rows) (text string, err error) {
9 | table := table.NewWriter()
10 |
11 | cols, err := rows.Columns()
12 | if err != nil {
13 | return "", err
14 | }
15 | columns := make([]interface{}, len(cols))
16 | for idx, column := range cols {
17 | columns[idx] = column
18 | }
19 | table.AppendHeader(columns)
20 |
21 | for rows.Next() {
22 | rawResult := make([][]byte, len(cols))
23 | row := make([]interface{}, len(cols))
24 | dest := make([]interface{}, len(cols))
25 |
26 | for i := range rawResult {
27 | dest[i] = &rawResult[i]
28 | }
29 |
30 | if err := rows.Scan(dest...); err != nil {
31 | return "", err
32 | }
33 |
34 | for i, raw := range rawResult {
35 | row[i] = string(raw)
36 | }
37 | table.AppendRow(row)
38 | }
39 | return table.Render(), nil
40 | }
41 |
--------------------------------------------------------------------------------
/pkg/workload/base.go:
--------------------------------------------------------------------------------
1 | package workload
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | "math/rand"
7 | "time"
8 |
9 | "github.com/pingcap/go-tpc/pkg/util"
10 | )
11 |
12 | // TpcState saves state for each thread
13 | type TpcState struct {
14 | DB *sql.DB
15 | Conn *sql.Conn
16 |
17 | R *rand.Rand
18 |
19 | Buf *util.BufAllocator
20 | }
21 |
22 | func (t *TpcState) RefreshConn(ctx context.Context) error {
23 | if t.Conn != nil {
24 | t.Conn.Close()
25 | }
26 | conn, err := t.DB.Conn(ctx)
27 | if err != nil {
28 | return err
29 | }
30 | t.Conn = conn
31 | return nil
32 | }
33 |
34 | // NewTpcState creates a base TpcState
35 | func NewTpcState(ctx context.Context, db *sql.DB) *TpcState {
36 | var conn *sql.Conn
37 | var err error
38 | if db != nil {
39 | conn, err = db.Conn(ctx)
40 | if err != nil {
41 | panic(err.Error())
42 | }
43 | }
44 |
45 | r := rand.New(rand.NewSource(time.Now().UnixNano()))
46 |
47 | s := &TpcState{
48 | DB: db,
49 | Conn: conn,
50 | R: r,
51 | Buf: util.NewBufAllocator(),
52 | }
53 | return s
54 | }
55 |
--------------------------------------------------------------------------------
/pkg/util/alloc.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | // BufAllocator helps you manage multi chunks in one []byte.
4 | type BufAllocator struct {
5 | buf []byte
6 | offset int
7 | }
8 |
9 | const defaultBufSize = 1024
10 |
11 | // NewBufAllocator creates a NewBufAllocator
12 | func NewBufAllocator() *BufAllocator {
13 | return &BufAllocator{
14 | buf: make([]byte, defaultBufSize),
15 | offset: 0,
16 | }
17 | }
18 |
19 | func (b *BufAllocator) grow(n int) {
20 | length := len(b.buf) - b.offset
21 | length = 2 * length
22 |
23 | if length < n {
24 | length = n
25 | }
26 |
27 | if length < defaultBufSize {
28 | length = defaultBufSize
29 | }
30 |
31 | b.buf = make([]byte, length)
32 | b.offset = 0
33 | }
34 |
35 | // Alloc allocates a new chunk with the specified size n.
36 | func (b *BufAllocator) Alloc(n int) []byte {
37 | if len(b.buf)-b.offset < n {
38 | b.grow(n)
39 | }
40 |
41 | buf := b.buf[b.offset : n+b.offset]
42 | b.offset += n
43 | return buf
44 | }
45 |
46 | // Reset resets the buffer to later reuse
47 | func (b *BufAllocator) Reset() {
48 | b.offset = 0
49 | }
50 |
--------------------------------------------------------------------------------
/tpch/dbgen/region.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | import (
4 | "context"
5 | "io"
6 |
7 | "github.com/pingcap/go-tpc/pkg/sink"
8 | )
9 |
10 | const (
11 | rCmntSd = 42
12 | rCmntLen = 72
13 | )
14 |
15 | type Region struct {
16 | Code dssHuge
17 | Text string
18 | Join long
19 | Comment string
20 | }
21 |
22 | type regionLoader struct {
23 | *sink.CSVSink
24 | }
25 |
26 | func (r regionLoader) Load(item interface{}) error {
27 | region := item.(*Region)
28 | if err := r.WriteRow(context.TODO(),
29 | region.Code,
30 | region.Text,
31 | region.Comment); err != nil {
32 | return err
33 | }
34 | return nil
35 | }
36 |
37 | func (r regionLoader) Flush() error {
38 | return r.CSVSink.Flush(context.TODO())
39 | }
40 |
41 | func NewRegionLoader(w io.Writer) regionLoader {
42 | return regionLoader{sink.NewCSVSinkWithDelimiter(w, '|')}
43 | }
44 |
45 | func makeRegion(idx dssHuge) *Region {
46 | region := &Region{}
47 |
48 | region.Code = idx - 1
49 | region.Text = regions.members[idx-1].text
50 | region.Join = 0
51 | region.Comment = makeText(rCmntLen, rCmntSd)
52 | return region
53 | }
54 |
--------------------------------------------------------------------------------
/tpch/dbgen/nation.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | import (
4 | "context"
5 | "io"
6 |
7 | "github.com/pingcap/go-tpc/pkg/sink"
8 | )
9 |
10 | const (
11 | nCmntSd = 41
12 | nCmntLen = 72
13 | nationsMax = 90
14 | )
15 |
16 | type Nation struct {
17 | Code dssHuge
18 | Text string
19 | Join long
20 | Comment string
21 | }
22 |
23 | func makeNation(idx dssHuge) *Nation {
24 | nation := &Nation{}
25 | nation.Code = idx - 1
26 | nation.Text = nations.members[idx-1].text
27 | nation.Join = nations.members[idx-1].weight
28 | nation.Comment = makeText(nCmntLen, nCmntSd)
29 |
30 | return nation
31 | }
32 |
33 | type nationLoader struct {
34 | *sink.CSVSink
35 | }
36 |
37 | func (n nationLoader) Load(item interface{}) error {
38 | nation := item.(*Nation)
39 | if err := n.WriteRow(context.TODO(),
40 | nation.Code,
41 | nation.Text,
42 | nation.Join,
43 | nation.Comment); err != nil {
44 | return err
45 | }
46 | return nil
47 | }
48 |
49 | func (n nationLoader) Flush() error {
50 | return n.CSVSink.Flush(context.TODO())
51 | }
52 |
53 | func NewNationLoader(w io.Writer) nationLoader {
54 | return nationLoader{sink.NewCSVSinkWithDelimiter(w, '|')}
55 | }
56 |
--------------------------------------------------------------------------------
/tpch/dbgen/misc_test.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestTextPool(t *testing.T) {
8 | expect := "furiously special foxes haggle furiously blithely ironic deposits. slyly final theodolites boost slyly even asymptotes. carefully final foxes wake furiously around the furiously bold deposits. foxes around the pending, special theodolites believe about the special accounts. furiously special packages wake about the slyly ironic accounts. ironic accounts sleep. blithely pending ideas sleep blithely. carefully bold attainments unwind along the even foxes. blithely regular accounts haggle blithely above the quick pinto beans. requests cajole slyly across the slyly pending ideas. evenly even deposits hinder bold deposits. quick, careful packages could have to use slyly ideas. instructions about the foxes detect across the quickly regular requests. furiously final orbits across the fluffily special dependencies boost slyly about the express theodolites. evenly bold excuses need to wake. slyly even pinto beans use blithely according to the special packages. quickly regular dependencies sleep"
9 |
10 | if string(szTextPool[:1000]) != expect {
11 | t.Errorf("expect %s", expect)
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/tpch/dbgen/part_supp.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | import (
4 | "context"
5 | "io"
6 |
7 | "github.com/pingcap/go-tpc/pkg/sink"
8 | )
9 |
10 | type PartSupp struct {
11 | PartKey dssHuge
12 | SuppKey dssHuge
13 | Qty dssHuge
14 | SCost dssHuge
15 | Comment string
16 | }
17 |
18 | func sdPsupp(child Table, skipCount dssHuge) {
19 | for j := 0; j < suppPerPart; j++ {
20 | advanceStream(psQtySd, skipCount, false)
21 | advanceStream(psScstSd, skipCount, false)
22 | advanceStream(psCmntSd, skipCount*2, false)
23 | }
24 | }
25 |
26 | type partSuppLoader struct {
27 | *sink.CSVSink
28 | }
29 |
30 | func (p partSuppLoader) Load(item interface{}) error {
31 | pSupp := item.(*Part)
32 | for i := 0; i < suppPerPart; i++ {
33 | supp := pSupp.S[i]
34 | if err := p.WriteRow(context.TODO(),
35 | supp.PartKey,
36 | supp.SuppKey,
37 | supp.Qty,
38 | FmtMoney(supp.SCost),
39 | supp.Comment); err != nil {
40 | return err
41 | }
42 | }
43 | return nil
44 | }
45 |
46 | func (p partSuppLoader) Flush() error {
47 | return p.CSVSink.Flush(context.TODO())
48 | }
49 |
50 | func NewPartSuppLoader(w io.Writer) partSuppLoader {
51 | return partSuppLoader{sink.NewCSVSinkWithDelimiter(w, '|')}
52 | }
53 |
--------------------------------------------------------------------------------
/tpch/output/answer_gen.py:
--------------------------------------------------------------------------------
1 | if __name__ == "__main__":
2 | output = """package tpch
3 |
4 | """
5 | max_columns = 1
6 |
7 | for i in range(1, 23):
8 | output += f"""var q{i}a = [][]string{{"""
9 | with open(f"output/q{i}.out", "r") as infile:
10 | line = infile.readline()
11 | line = infile.readline().rstrip('\r\n')
12 | while line:
13 | columns = list(map(lambda column: f"`{column}`", line.split("|")))
14 | max_columns = max(max_columns, len(columns))
15 |
16 | l = ", ".join(columns)
17 | output = f"""{output}
18 | {{{l}}},"""
19 | line = infile.readline().rstrip('\r\n')
20 | output = f"""{output}}}
21 | """
22 |
23 | output += f"""
24 | var ans = map[string][][]string{{
25 | "q1": q1a,
26 | "q2": q2a,
27 | "q3": q3a,
28 | "q4": q4a,
29 | "q5": q5a,
30 | "q6": q6a,
31 | "q7": q7a,
32 | "q8": q8a,
33 | "q9": q9a,
34 | "q10": q10a,
35 | "q11": q11a,
36 | "q12": q12a,
37 | "q13": q13a,
38 | "q14": q14a,
39 | "q15": q15a,
40 | "q16": q16a,
41 | "q17": q17a,
42 | "q18": q18a,
43 | "q19": q19a,
44 | "q20": q20a,
45 | "q21": q21a,
46 | "q22": q22a,
47 | }}
48 |
49 | const maxColumns = {max_columns}
50 | """
51 | print(output)
52 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | GOARCH := $(if $(GOARCH),$(GOARCH),amd64)
2 | GO=GOEXPERIMENT=jsonv2 CGO_ENABLED=0 GOARCH=$(GOARCH) GO111MODULE=on go
3 |
4 | PACKAGE_LIST := go list ./...| grep -vE "cmd"
5 | PACKAGES := $$($(PACKAGE_LIST))
6 | FILES_TO_FMT := $(shell find . -path -prune -o -name '*.go' -print)
7 |
8 | LDFLAGS += -X "main.version=$(shell git describe --tags --dirty --always)"
9 | LDFLAGS += -X "main.commit=$(shell git rev-parse HEAD)"
10 | LDFLAGS += -X "main.date=$(shell date -u '+%Y-%m-%d %I:%M:%S')"
11 |
12 | GOBUILD=$(GO) build -ldflags '$(LDFLAGS)'
13 |
14 | # Image URL to use all building/pushing image targets
15 | IMG ?= go-tpc:latest
16 | PLATFORM ?= linux/amd64,linux/arm64
17 |
18 | all: format test build
19 |
20 | format: vet fmt
21 |
22 | fmt:
23 | @echo "gofmt"
24 | @gofmt -w ${FILES_TO_FMT}
25 | @git diff --exit-code .
26 |
27 | test:
28 | $(GO) test ./... -cover $(PACKAGES)
29 |
30 | build: mod
31 | $(GOBUILD) -o ./bin/go-tpc cmd/go-tpc/*
32 |
33 | vet:
34 | $(GO) vet ./...
35 |
36 | mod:
37 | @echo "go mod tidy"
38 | GO111MODULE=on go mod tidy
39 | @git diff --exit-code -- go.sum go.mod
40 |
41 | docker-build: test
42 | docker build . -t ${IMG}
43 |
44 | docker-push: docker-build
45 | docker push ${IMG}
46 |
47 | # Create multiarch driver if not exists:
48 | # docker buildx create --name multiarch --driver docker-container --use
49 | docker-multiarch: test
50 | docker buildx build --platform ${PLATFORM} . -t ${IMG} --push
51 |
--------------------------------------------------------------------------------
/tpch/dbgen/dist/loaddist_gen.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | if __name__ == "__main__":
4 | records = dict()
5 | record = None
6 |
7 | with open(f"dists.dss", "r") as infile:
8 | while True:
9 | line = infile.readline().rstrip('\r\n')
10 | if not line:
11 | break
12 | if re.match(r"^count|[0-9]+$", line, re.IGNORECASE):
13 | continue
14 | if re.match(r"^begin \w+$", line, re.IGNORECASE):
15 | n = line.split(" ")[1]
16 | record = []
17 | records[n] = record
18 | continue
19 | if re.match(r"^end \w+$", line, re.IGNORECASE):
20 | record = None
21 | continue
22 | if record is None:
23 | continue
24 | record.append(line.split("|"))
25 |
26 | # print(records)
27 |
28 | output = """package dist
29 |
30 | type Item struct {
31 | Text string
32 | Weight int32
33 | }
34 |
35 | type Dist []Item
36 |
37 | var Maps = make(map[string]Dist)
38 |
39 | var _ = func() error {"""
40 |
41 | for key, value in records.items():
42 | output += f"""
43 | Maps["{key}"] = []Item{{"""
44 |
45 | for v in value:
46 | output += f"""
47 | {{
48 | "{v[0]}", {v[1]},
49 | }},"""
50 | output += """
51 | }"""
52 |
53 | output += """
54 | return nil
55 | }()"""
56 |
57 | print(output)
58 |
--------------------------------------------------------------------------------
/tpcc/stock_level.go:
--------------------------------------------------------------------------------
1 | package tpcc
2 |
3 | import (
4 | "context"
5 | )
6 |
7 | const stockLevelCount = `SELECT /*+ TIDB_INLJ(order_line,stock) */ COUNT(DISTINCT (s_i_id)) stock_count FROM order_line, stock
8 | WHERE ol_w_id = ? AND ol_d_id = ? AND ol_o_id < ? AND ol_o_id >= ? - 20 AND s_w_id = ? AND s_i_id = ol_i_id AND s_quantity < ?`
9 | const stockLevelSelectDistrict = `SELECT d_next_o_id FROM district WHERE d_w_id = ? AND d_id = ?`
10 |
11 | func (w *Workloader) runStockLevel(ctx context.Context, thread int) error {
12 | s := getTPCCState(ctx)
13 |
14 | tx, err := w.beginTx(ctx)
15 | if err != nil {
16 | return err
17 | }
18 | defer tx.Rollback()
19 |
20 | wID := randInt(s.R, 1, w.cfg.Warehouses)
21 | dID := randInt(s.R, 1, 10)
22 | threshold := randInt(s.R, 10, 20)
23 |
24 | // SELECT d_next_o_id INTO :o_id FROM district WHERE d_w_id=:w_id AND d_id=:d_id;
25 |
26 | var oID int
27 | if err := s.stockLevelStmt[stockLevelSelectDistrict].QueryRowContext(ctx, wID, dID).Scan(&oID); err != nil {
28 | return err
29 | }
30 |
31 | // SELECT COUNT(DISTINCT (s_i_id)) INTO :stock_count FROM order_line, stock
32 | // WHERE ol_w_id=:w_id AND ol_d_id=:d_id AND ol_o_id<:o_id AND ol_o_id>=:o_id-20
33 | // AND s_w_id=:w_id AND s_i_id=ol_i_id AND s_quantity < :threshold;
34 | var stockCount int
35 | if err := s.stockLevelStmt[stockLevelCount].QueryRowContext(ctx, wID, dID, oID, oID, wID, threshold).Scan(&stockCount); err != nil {
36 | return err
37 | }
38 |
39 | return tx.Commit()
40 | }
41 |
--------------------------------------------------------------------------------
/pkg/util/version.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import (
4 | "strconv"
5 | "strings"
6 | )
7 |
8 | type SemVersion struct {
9 | Major int
10 | Minor int
11 | Patch int
12 | }
13 |
14 | // @version is the `SELECT VERSION()` output of TiDB
15 | func NewTiDBSemVersion(version string) (SemVersion, bool) {
16 | isTiDB := strings.Contains(strings.ToLower(version), "tidb")
17 | if !isTiDB {
18 | return SemVersion{}, false
19 | }
20 |
21 | verItems := strings.Split(version, "-v")
22 | if len(verItems) < 2 {
23 | return SemVersion{}, false
24 | }
25 | verStr := strings.Split(verItems[1], "-")[0]
26 |
27 | parts := strings.Split(verStr, ".")
28 | if len(parts) < 3 {
29 | return SemVersion{}, false
30 | }
31 |
32 | major, err := strconv.Atoi(parts[0])
33 | if err != nil {
34 | return SemVersion{}, false
35 | }
36 | minor, err := strconv.Atoi(parts[1])
37 | if err != nil {
38 | return SemVersion{}, false
39 | }
40 |
41 | patch, err := strconv.Atoi(parts[2])
42 | if err != nil {
43 | return SemVersion{}, false
44 | }
45 |
46 | return SemVersion{
47 | Major: major,
48 | Minor: minor,
49 | Patch: patch,
50 | }, true
51 | }
52 |
53 | func (s SemVersion) String() string {
54 | return strconv.Itoa(s.Major) + "." + strconv.Itoa(s.Minor) + "." + strconv.Itoa(s.Patch)
55 | }
56 |
57 | func (s SemVersion) Compare(other SemVersion) int {
58 | signum := func(x int) int {
59 | if x > 0 {
60 | return 1
61 | }
62 | if x < 0 {
63 | return -1
64 | }
65 | return 0
66 | }
67 |
68 | if diff := s.Major - other.Major; diff != 0 {
69 | return signum(diff)
70 | }
71 | if diff := s.Minor - other.Minor; diff != 0 {
72 | return signum(diff)
73 | }
74 | if diff := s.Patch - other.Patch; diff != 0 {
75 | return signum(diff)
76 | }
77 | return 0
78 | }
79 |
--------------------------------------------------------------------------------
/ch/ddl.go:
--------------------------------------------------------------------------------
1 | package ch
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | )
7 |
8 | var allTables []string
9 |
10 | func init() {
11 | allTables = []string{"customer", "district", "history", "item", "new_order", "order_line", "orders", "region", "warehouse",
12 | "nation", "stock", "supplier"}
13 | }
14 |
15 | func (w *Workloader) createTableDDL(ctx context.Context, query string, tableName string, action string) error {
16 | s := w.getState(ctx)
17 | fmt.Printf("%s %s\n", action, tableName)
18 | if _, err := s.Conn.ExecContext(ctx, query); err != nil {
19 | return err
20 | }
21 | return nil
22 | }
23 |
24 | // createTables creates tables schema.
25 | func (w *Workloader) createTables(ctx context.Context) error {
26 | query := `
27 | CREATE TABLE IF NOT EXISTS nation (
28 | N_NATIONKEY BIGINT NOT NULL,
29 | N_NAME CHAR(25) NOT NULL,
30 | N_REGIONKEY BIGINT NOT NULL,
31 | N_COMMENT VARCHAR(152),
32 | PRIMARY KEY (N_NATIONKEY)
33 | )`
34 |
35 | if err := w.createTableDDL(ctx, query, "nation", "creating"); err != nil {
36 | return err
37 | }
38 |
39 | query = `
40 | CREATE TABLE IF NOT EXISTS region (
41 | R_REGIONKEY BIGINT NOT NULL,
42 | R_NAME CHAR(25) NOT NULL,
43 | R_COMMENT VARCHAR(152),
44 | PRIMARY KEY (R_REGIONKEY)
45 | )`
46 | if err := w.createTableDDL(ctx, query, "region", "creating"); err != nil {
47 | return err
48 | }
49 |
50 | query = `
51 | CREATE TABLE IF NOT EXISTS supplier (
52 | S_SUPPKEY BIGINT NOT NULL,
53 | S_NAME CHAR(25) NOT NULL,
54 | S_ADDRESS VARCHAR(40) NOT NULL,
55 | S_NATIONKEY BIGINT NOT NULL,
56 | S_PHONE CHAR(15) NOT NULL,
57 | S_ACCTBAL DECIMAL(15, 2) NOT NULL,
58 | S_COMMENT VARCHAR(101) NOT NULL,
59 | PRIMARY KEY (S_SUPPKEY)
60 | )`
61 | if err := w.createTableDDL(ctx, query, "supplier", "creating"); err != nil {
62 | return err
63 | }
64 |
65 | return nil
66 | }
67 |
--------------------------------------------------------------------------------
/tpch/dbgen/cust.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "io"
7 |
8 | "github.com/pingcap/go-tpc/pkg/sink"
9 | )
10 |
11 | const (
12 | cPhneSd = 28
13 | cAbalSd = 29
14 | cMsegSd = 30
15 | cAddrLen = 25
16 | cCmntLen = 73
17 | cAddrSd = 26
18 | cCmntSd = 31
19 | cAbalMin = -99999
20 | cAbalMax = 999999
21 | lNtrgSd = 27
22 | )
23 |
24 | type Cust struct {
25 | CustKey dssHuge
26 | Name string
27 | Address string
28 | NationCode dssHuge
29 | Phone string
30 | Acctbal dssHuge
31 | MktSegment string
32 | Comment string
33 | }
34 |
35 | type custLoader struct {
36 | *sink.CSVSink
37 | }
38 |
39 | func (c custLoader) Load(item interface{}) error {
40 | cust := item.(*Cust)
41 | if err := c.WriteRow(context.TODO(),
42 | cust.CustKey,
43 | cust.Name,
44 | cust.Address,
45 | cust.NationCode,
46 | cust.Phone,
47 | FmtMoney(cust.Acctbal),
48 | cust.MktSegment,
49 | cust.Comment); err != nil {
50 | return err
51 | }
52 | return nil
53 | }
54 |
55 | func (c custLoader) Flush() error {
56 | return c.CSVSink.Flush(context.TODO())
57 | }
58 |
59 | func NewCustLoader(w io.Writer) custLoader {
60 | return custLoader{sink.NewCSVSinkWithDelimiter(w, '|')}
61 | }
62 |
63 | func sdCust(child Table, skipCount dssHuge) {
64 | advanceStream(cAddrSd, skipCount*9, false)
65 | advanceStream(cCmntSd, skipCount*2, false)
66 | advanceStream(lNtrgSd, skipCount, false)
67 | advanceStream(cPhneSd, skipCount*3, false)
68 | advanceStream(cAbalSd, skipCount, false)
69 | advanceStream(cMsegSd, skipCount, false)
70 | }
71 |
72 | func makeCust(idx dssHuge) *Cust {
73 | cust := &Cust{}
74 | cust.CustKey = idx
75 | cust.Name = fmt.Sprintf("Customer#%09d", idx)
76 | cust.Address = vStr(cAddrLen, cAddrSd)
77 | i := random(0, dssHuge(nations.count-1), lNtrgSd)
78 | cust.NationCode = i
79 | cust.Phone = genPhone(i, cPhneSd)
80 | cust.Acctbal = random(cAbalMin, cAbalMax, cAbalSd)
81 | pickStr(&cMsegSet, cMsegSd, &cust.MktSegment)
82 | cust.Comment = makeText(cCmntLen, cCmntSd)
83 |
84 | return cust
85 | }
86 |
--------------------------------------------------------------------------------
/pkg/util/output.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import (
4 | "bytes"
5 | json "encoding/json/v2"
6 | "fmt"
7 | "log"
8 | "os"
9 | "strings"
10 |
11 | "github.com/olekukonko/tablewriter"
12 | )
13 |
14 | // output style
15 | const (
16 | OutputStylePlain = "plain"
17 | OutputStyleTable = "table"
18 | OutputStyleJson = "json"
19 | )
20 |
21 | // This logger is goroutine-safe.
22 | var StdErrLogger *log.Logger
23 |
24 | func init() {
25 | StdErrLogger = log.New(os.Stderr, "", 0)
26 | }
27 |
28 | func RenderString(format string, headers []string, values [][]string) {
29 | if len(values) == 0 {
30 | return
31 | }
32 | if len(headers) == 0 {
33 | for _, value := range values {
34 | args := make([]interface{}, len(value))
35 | for i, v := range value {
36 | args[i] = v
37 | }
38 | fmt.Printf(format, args...)
39 | }
40 | return
41 | }
42 |
43 | buf := new(bytes.Buffer)
44 | for _, value := range values {
45 | args := make([]string, len(headers)-2)
46 | for i, header := range headers[2:] {
47 | args[i] = header + ": " + value[i+2]
48 | }
49 | buf.WriteString(fmt.Sprintf(format, value[0], value[1], strings.Join(args, ", ")))
50 | }
51 | fmt.Print(buf.String())
52 | }
53 |
54 | func RenderTable(headers []string, values [][]string) {
55 | if len(values) == 0 {
56 | return
57 | }
58 | tb := tablewriter.NewWriter(os.Stdout)
59 | tb.Header(headers)
60 | tb.Bulk(values)
61 | tb.Render()
62 | }
63 |
64 | func RenderJson(headers []string, values [][]string) {
65 | if len(values) == 0 {
66 | return
67 | }
68 | data := make([]map[string]string, 0, len(values))
69 | for _, value := range values {
70 | line := make(map[string]string, 0)
71 | for i, header := range headers {
72 | line[header] = value[i]
73 | }
74 | data = append(data, line)
75 | }
76 | outStr, err := json.Marshal(data)
77 | if err != nil {
78 | fmt.Println(err)
79 | return
80 | }
81 | fmt.Println(string(outStr))
82 | }
83 |
84 | func IntToString(i int64) string {
85 | return fmt.Sprintf("%d", i)
86 | }
87 |
88 | func FloatToOneString(f float64) string {
89 | return fmt.Sprintf("%.1f", f)
90 | }
91 |
92 | func FloatToTwoString(f float64) string {
93 | return fmt.Sprintf("%.2f", f)
94 | }
95 |
--------------------------------------------------------------------------------
/install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | version=1.0.12
4 |
5 | case $(uname -s) in
6 | Linux|linux) os=linux ;;
7 | Darwin|darwin) os=darwin ;;
8 | *) os= ;;
9 | esac
10 |
11 | if [ -z "$os" ]; then
12 | echo "OS $(uname -s) not supported." >&2
13 | exit 1
14 | fi
15 |
16 | binary_url="https://github.com/pingcap/go-tpc/releases/download/v${version}/go-tpc_${version}_${os}_amd64.tar.gz"
17 |
18 | case $(uname -m) in
19 | amd64|x86_64) arch=amd64 ;;
20 | arm64|aarch64) arch=arm64 ;;
21 | *) arch= ;;
22 | esac
23 |
24 | if [ -z "$GO_TPC_HOME" ]; then
25 | GO_TPC_HOME=$HOME/.go-tpc
26 | fi
27 | bin_dir=$GO_TPC_HOME/bin
28 | mkdir -p "$bin_dir"
29 |
30 | install_binary() {
31 | curl -L $binary_url -o "/tmp/go-tpc_${version}_${os}_amd64.tar.gz" || return 1
32 | tar -zxf "/tmp/go-tpc_${version}_${os}_amd64.tar.gz" -C "$bin_dir" || return 1
33 | rm "/tmp/go-tpc_${version}_${os}_amd64.tar.gz"
34 | return 0
35 | }
36 |
37 | if ! install_binary; then
38 | echo "Failed to download and/or extract go-tpc archive."
39 | exit 1
40 | fi
41 |
42 | chmod 755 "$bin_dir/go-tpc"
43 |
44 |
45 | bold=$(tput bold 2>/dev/null)
46 | sgr0=$(tput sgr0 2>/dev/null)
47 |
48 | # Refrence: https://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux-unix
49 | shell=$(echo $SHELL | awk 'BEGIN {FS="/";} { print $NF }')
50 | echo "Detected shell: ${bold}$shell${sgr0}"
51 | if [ -f "${HOME}/.${shell}_profile" ]; then
52 | PROFILE=${HOME}/.${shell}_profile
53 | elif [ -f "${HOME}/.${shell}_login" ]; then
54 | PROFILE=${HOME}/.${shell}_login
55 | elif [ -f "${HOME}/.${shell}rc" ]; then
56 | PROFILE=${HOME}/.${shell}rc
57 | else
58 | PROFILE=${HOME}/.profile
59 | fi
60 | echo "Shell profile: ${bold}$PROFILE${sgr0}"
61 |
62 | case :$PATH: in
63 | *:$bin_dir:*) : "PATH already contains $bin_dir" ;;
64 | *) printf 'export PATH=%s:$PATH\n' "$bin_dir" >> "$PROFILE"
65 | echo "$PROFILE has been modified to to add go-tpc to PATH"
66 | echo "open a new terminal or ${bold}source ${PROFILE}${sgr0} to use it"
67 | ;;
68 | esac
69 |
70 | echo "Installed path: ${bold}$bin_dir/go-tpc${sgr0}"
71 | echo "==============================================="
72 | echo "Have a try: ${bold}go-tpc tpcc ${sgr0}"
73 | echo "==============================================="
74 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/pingcap/go-tpc
2 |
3 | go 1.25
4 |
5 | require (
6 | github.com/HdrHistogram/hdrhistogram-go v1.2.0
7 | github.com/go-sql-driver/mysql v1.9.3
8 | github.com/jedib0t/go-pretty v4.3.0+incompatible
9 | github.com/lib/pq v1.10.9
10 | github.com/olekukonko/tablewriter v1.1.2
11 | github.com/prometheus/client_golang v1.23.2
12 | github.com/spf13/cobra v1.10.2
13 | github.com/stretchr/testify v1.11.1
14 | go.uber.org/atomic v1.11.0
15 | go.uber.org/automaxprocs v1.6.0
16 | golang.org/x/sync v0.19.0
17 | )
18 |
19 | require (
20 | filippo.io/edwards25519 v1.1.0 // indirect
21 | github.com/beorn7/perks v1.0.1 // indirect
22 | github.com/cespare/xxhash/v2 v2.3.0 // indirect
23 | github.com/clipperhouse/displaywidth v0.6.2 // indirect
24 | github.com/clipperhouse/stringish v0.1.1 // indirect
25 | github.com/clipperhouse/uax29/v2 v2.3.0 // indirect
26 | github.com/davecgh/go-spew v1.1.1 // indirect
27 | github.com/fatih/color v1.18.0 // indirect
28 | github.com/go-openapi/errors v0.22.5 // indirect
29 | github.com/go-openapi/strfmt v0.25.0 // indirect
30 | github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
31 | github.com/google/uuid v1.6.0 // indirect
32 | github.com/inconshreveable/mousetrap v1.1.0 // indirect
33 | github.com/kr/text v0.2.0 // indirect
34 | github.com/mattn/go-colorable v0.1.14 // indirect
35 | github.com/mattn/go-isatty v0.0.20 // indirect
36 | github.com/mattn/go-runewidth v0.0.19 // indirect
37 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
38 | github.com/oklog/ulid v1.3.1 // indirect
39 | github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 // indirect
40 | github.com/olekukonko/errors v1.1.0 // indirect
41 | github.com/olekukonko/ll v0.1.3 // indirect
42 | github.com/pmezard/go-difflib v1.0.0 // indirect
43 | github.com/prometheus/client_model v0.6.2 // indirect
44 | github.com/prometheus/common v0.67.4 // indirect
45 | github.com/prometheus/procfs v0.19.2 // indirect
46 | github.com/spf13/pflag v1.0.10 // indirect
47 | go.mongodb.org/mongo-driver v1.17.6 // indirect
48 | go.yaml.in/yaml/v2 v2.4.3 // indirect
49 | golang.org/x/net v0.48.0 // indirect
50 | golang.org/x/sys v0.39.0 // indirect
51 | golang.org/x/text v0.32.0 // indirect
52 | google.golang.org/protobuf v1.36.11 // indirect
53 | gopkg.in/yaml.v3 v3.0.1 // indirect
54 | )
55 |
--------------------------------------------------------------------------------
/tpch/dbgen/lineitem.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | import (
4 | "context"
5 | "io"
6 |
7 | "github.com/pingcap/go-tpc/pkg/sink"
8 | )
9 |
10 | const (
11 | lQtyMin = 1
12 | lQtyMax = 50
13 | lTaxMin = 0
14 | lTaxMax = 8
15 | lDcntMin = 0
16 | lDcntMax = 10
17 | lPkeyMin = 1
18 | lSdteMin = 1
19 | lSdteMax = 121
20 | lCdteMin = 30
21 | lCdteMax = 90
22 | lRdteMin = 1
23 | lRdteMax = 30
24 | )
25 |
26 | var (
27 | LPkeyMax dssHuge
28 | )
29 |
30 | type LineItem struct {
31 | OKey dssHuge
32 | PartKey dssHuge
33 | SuppKey dssHuge
34 | LCnt dssHuge
35 | Quantity dssHuge
36 | EPrice dssHuge
37 | Discount dssHuge
38 | Tax dssHuge
39 | RFlag string
40 | LStatus string
41 | CDate string
42 | SDate string
43 | RDate string
44 | ShipInstruct string
45 | ShipMode string
46 | Comment string
47 | }
48 |
49 | type lineItemLoader struct {
50 | *sink.CSVSink
51 | }
52 |
53 | func (l lineItemLoader) Load(item interface{}) error {
54 | o := item.(*Order)
55 | for _, line := range o.Lines {
56 | if err := l.WriteRow(context.TODO(),
57 | line.OKey,
58 | line.PartKey,
59 | line.SuppKey,
60 | line.LCnt,
61 | line.Quantity,
62 | FmtMoney(line.EPrice),
63 | FmtMoney(line.Discount),
64 | FmtMoney(line.Tax),
65 | line.RFlag,
66 | line.LStatus,
67 | line.SDate,
68 | line.CDate,
69 | line.RDate,
70 | line.ShipInstruct,
71 | line.ShipMode,
72 | line.Comment,
73 | ); err != nil {
74 | return err
75 | }
76 | }
77 | return nil
78 | }
79 |
80 | func (l lineItemLoader) Flush() error {
81 | return l.CSVSink.Flush(context.TODO())
82 | }
83 |
84 | func NewLineItemLoader(w io.Writer) lineItemLoader {
85 | return lineItemLoader{sink.NewCSVSinkWithDelimiter(w, '|')}
86 | }
87 |
88 | func sdLineItem(child Table, skipCount dssHuge) {
89 | for j := 0; j < oLcntMax; j++ {
90 | for i := lQtySd; i <= lRflgSd; i++ {
91 | advanceStream(i, skipCount, false)
92 | }
93 | advanceStream(lCmntSd, skipCount*2, false)
94 | }
95 | if child == TPsupp {
96 | advanceStream(oOdateSd, skipCount, false)
97 | advanceStream(oLcntSd, skipCount, false)
98 | }
99 | }
100 |
101 | func initLineItem() {
102 | LPkeyMax = tDefs[TPart].base * scale
103 | }
104 |
--------------------------------------------------------------------------------
/.github/workflows/release.yaml:
--------------------------------------------------------------------------------
1 | name: "release"
2 | on:
3 | push:
4 | tags: ['v*']
5 | branches: [ master ]
6 |
7 | jobs:
8 | release:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - uses: actions/setup-go@v3
12 | with:
13 | go-version: 1.25.x
14 | - uses: actions/checkout@v2
15 | with:
16 | fetch-depth: 0
17 |
18 | - name: Release
19 | if: github.event.ref != 'refs/heads/master'
20 | uses: goreleaser/goreleaser-action@v1
21 | with:
22 | version: latest
23 | args: release --clean
24 | env:
25 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
26 |
27 | - name: Build
28 | if: github.event.ref == 'refs/heads/master'
29 | uses: goreleaser/goreleaser-action@v1
30 | with:
31 | version: latest
32 | args: build --snapshot --clean
33 | env:
34 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
35 | - name: Package
36 | if: github.event.ref == 'refs/heads/master'
37 | run: |
38 | tar -zcf go-tpc_latest_linux_amd64.tar.gz -C dist/go-tpc_linux_amd64_v1 go-tpc
39 | tar -zcf go-tpc_latest_linux_arm64.tar.gz -C dist/go-tpc_linux_arm64_v8.0 go-tpc
40 | tar -zcf go-tpc_latest_darwin_amd64.tar.gz -C dist/go-tpc_darwin_amd64_v1 go-tpc
41 | tar -zcf go-tpc_latest_darwin_arm64.tar.gz -C dist/go-tpc_darwin_arm64_v8.0 go-tpc
42 |
43 | - name: Release latest build
44 | uses: softprops/action-gh-release@v1
45 | if: github.event.ref == 'refs/heads/master'
46 | with:
47 | name: Latest Build
48 | tag_name: latest-${{ github.sha }}
49 | files: |
50 | *.tar.gz
51 | - name: Clean legacy latest releases
52 | uses: actions/github-script@v6
53 | if: github.event.ref == 'refs/heads/master'
54 | with:
55 | script: |
56 | const { owner, repo } = context.repo;
57 | const releases = (await github.rest.repos.listReleases({ owner, repo })).data.filter(r => r.draft && r.tag_name.startsWith('latest'));
58 | for (const r of releases) { await github.rest.repos.deleteRelease({ owner, repo, release_id: r.id }).catch(_ => {}); }
59 | - name: Clean legacy latest tags
60 | if: github.event.ref == 'refs/heads/master'
61 | run: |
62 | git tag -l | grep latest | grep -v latest-${{ github.sha }} | xargs -I{} git push -d origin {} || true
63 |
--------------------------------------------------------------------------------
/tpch/dbgen/dist.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | import "github.com/pingcap/go-tpc/tpch/dbgen/dist"
4 |
5 | var (
6 | nations distribution
7 | nations2 distribution
8 | regions distribution
9 | oPrioritySet distribution
10 | lInstructSet distribution
11 | lSmodeSet distribution
12 | lCategorySet distribution
13 | lRflagSet distribution
14 | cMsegSet distribution
15 | colors distribution
16 | pTypesSet distribution
17 | pCntrSet distribution
18 | articles distribution
19 | nouns distribution
20 | adjectives distribution
21 | adverbs distribution
22 | prepositions distribution
23 | verbs distribution
24 | terminators distribution
25 | auxillaries distribution
26 | np distribution
27 | vp distribution
28 | grammar distribution
29 | )
30 |
31 | type setMember struct {
32 | weight long
33 | text string
34 | }
35 |
36 | type distribution struct {
37 | count int
38 | max int32
39 | members []setMember
40 | permute []long
41 | }
42 |
43 | func readDist(name string, d *distribution) {
44 | dist := dist.Maps[name]
45 | d.count = len(dist)
46 | for _, item := range dist {
47 | d.max += item.Weight
48 | d.members = append(d.members, setMember{text: item.Text, weight: long(d.max)})
49 | }
50 | }
51 |
52 | func permute(permute []long, count int, stream long) {
53 | for i := 0; i < count; i++ {
54 | source := random(dssHuge(i), dssHuge(count-1), stream)
55 | permute[source], permute[i] = permute[i], permute[source]
56 | }
57 | }
58 |
59 | func permuteDist(dist *distribution, stream long) {
60 | if len(dist.permute) == 0 {
61 | dist.permute = make([]long, dist.count)
62 | }
63 | for i := 0; i < dist.count; i++ {
64 | dist.permute[i] = long(i)
65 | }
66 | permute(dist.permute, dist.count, stream)
67 | }
68 |
69 | func initDists() {
70 | readDist("p_cntr", &pCntrSet)
71 | readDist("colors", &colors)
72 | readDist("p_types", &pTypesSet)
73 | readDist("nations", &nations)
74 | readDist("regions", ®ions)
75 | readDist("o_oprio", &oPrioritySet)
76 | readDist("instruct", &lInstructSet)
77 | readDist("smode", &lSmodeSet)
78 | readDist("category", &lCategorySet)
79 | readDist("rflag", &lRflagSet)
80 | readDist("msegmnt", &cMsegSet)
81 | readDist("nouns", &nouns)
82 | readDist("verbs", &verbs)
83 | readDist("adjectives", &adjectives)
84 | readDist("adverbs", &adverbs)
85 | readDist("auxillaries", &auxillaries)
86 | readDist("terminators", &terminators)
87 | readDist("articles", &articles)
88 | readDist("prepositions", &prepositions)
89 | readDist("grammar", &grammar)
90 | readDist("np", &np)
91 | readDist("vp", &vp)
92 | }
93 |
--------------------------------------------------------------------------------
/tpcc/metrics.go:
--------------------------------------------------------------------------------
1 | package tpcc
2 |
3 | import "github.com/prometheus/client_golang/prometheus"
4 |
5 | var (
6 | elapsedVec = prometheus.NewGaugeVec(
7 | prometheus.GaugeOpts{
8 | Namespace: "tpc",
9 | Subsystem: "tpcc",
10 | Name: "elapsed",
11 | Help: "The real elapsed time per interval",
12 | }, []string{"op"},
13 | )
14 | sumVec = prometheus.NewGaugeVec(
15 | prometheus.GaugeOpts{
16 | Namespace: "tpc",
17 | Subsystem: "tpcc",
18 | Name: "sum",
19 | Help: "The total latency per interval",
20 | }, []string{"op"},
21 | )
22 | countVec = prometheus.NewGaugeVec(
23 | prometheus.GaugeOpts{
24 | Namespace: "tpc",
25 | Subsystem: "tpcc",
26 | Name: "count",
27 | Help: "The total count of transactions",
28 | }, []string{"op"},
29 | )
30 | opsVec = prometheus.NewGaugeVec(
31 | prometheus.GaugeOpts{
32 | Namespace: "tpc",
33 | Subsystem: "tpcc",
34 | Name: "ops",
35 | Help: "The number of op per second",
36 | }, []string{"op"},
37 | )
38 | avgVec = prometheus.NewGaugeVec(
39 | prometheus.GaugeOpts{
40 | Namespace: "tpc",
41 | Subsystem: "tpcc",
42 | Name: "avg",
43 | Help: "The avarge latency",
44 | }, []string{"op"},
45 | )
46 | p50Vec = prometheus.NewGaugeVec(
47 | prometheus.GaugeOpts{
48 | Namespace: "tpc",
49 | Subsystem: "tpcc",
50 | Name: "p50",
51 | Help: "P50 latency",
52 | }, []string{"op"},
53 | )
54 | p90Vec = prometheus.NewGaugeVec(
55 | prometheus.GaugeOpts{
56 | Namespace: "tpc",
57 | Subsystem: "tpcc",
58 | Name: "p90",
59 | Help: "P90 latency",
60 | }, []string{"op"},
61 | )
62 | p95Vec = prometheus.NewGaugeVec(
63 | prometheus.GaugeOpts{
64 | Namespace: "tpc",
65 | Subsystem: "tpcc",
66 | Name: "p95",
67 | Help: "P95 latency",
68 | }, []string{"op"},
69 | )
70 | p99Vec = prometheus.NewGaugeVec(
71 | prometheus.GaugeOpts{
72 | Namespace: "tpc",
73 | Subsystem: "tpcc",
74 | Name: "p99",
75 | Help: "P99 latency",
76 | }, []string{"op"},
77 | )
78 | p999Vec = prometheus.NewGaugeVec(
79 | prometheus.GaugeOpts{
80 | Namespace: "tpc",
81 | Subsystem: "tpcc",
82 | Name: "p999",
83 | Help: "p999 latency",
84 | }, []string{"op"},
85 | )
86 | maxVec = prometheus.NewGaugeVec(
87 | prometheus.GaugeOpts{
88 | Namespace: "tpc",
89 | Subsystem: "tpcc",
90 | Name: "max",
91 | Help: "Max latency",
92 | }, []string{"op"},
93 | )
94 | )
95 |
96 | func init() {
97 | prometheus.MustRegister(elapsedVec, sumVec, countVec, opsVec, avgVec, p50Vec, p90Vec, p95Vec, p99Vec, p999Vec, maxVec)
98 | }
99 |
--------------------------------------------------------------------------------
/tpch/output/q21.out:
--------------------------------------------------------------------------------
1 | s_name|numwait
2 | Supplier#000000699|19
3 | Supplier#000001543|19
4 | Supplier#000007696|19
5 | Supplier#000009689|19
6 | Supplier#000002701|17
7 | Supplier#000005892|17
8 | Supplier#000005912|17
9 | Supplier#000009367|17
10 | Supplier#000000966|16
11 | Supplier#000000967|16
12 | Supplier#000003408|16
13 | Supplier#000004069|16
14 | Supplier#000004809|16
15 | Supplier#000005081|16
16 | Supplier#000006966|16
17 | Supplier#000007317|16
18 | Supplier#000007624|16
19 | Supplier#000008182|16
20 | Supplier#000008963|16
21 | Supplier#000009774|16
22 | Supplier#000009946|16
23 | Supplier#000001513|15
24 | Supplier#000001846|15
25 | Supplier#000002284|15
26 | Supplier#000003206|15
27 | Supplier#000004012|15
28 | Supplier#000005111|15
29 | Supplier#000005417|15
30 | Supplier#000005958|15
31 | Supplier#000006063|15
32 | Supplier#000006663|15
33 | Supplier#000007331|15
34 | Supplier#000008425|15
35 | Supplier#000009500|15
36 | Supplier#000000133|14
37 | Supplier#000000160|14
38 | Supplier#000001374|14
39 | Supplier#000001485|14
40 | Supplier#000001535|14
41 | Supplier#000001796|14
42 | Supplier#000002016|14
43 | Supplier#000002255|14
44 | Supplier#000002360|14
45 | Supplier#000002884|14
46 | Supplier#000003945|14
47 | Supplier#000004264|14
48 | Supplier#000005437|14
49 | Supplier#000006134|14
50 | Supplier#000006743|14
51 | Supplier#000006837|14
52 | Supplier#000006988|14
53 | Supplier#000007831|14
54 | Supplier#000008129|14
55 | Supplier#000008439|14
56 | Supplier#000008653|14
57 | Supplier#000009269|14
58 | Supplier#000009595|14
59 | Supplier#000009674|14
60 | Supplier#000000208|13
61 | Supplier#000000994|13
62 | Supplier#000001155|13
63 | Supplier#000002276|13
64 | Supplier#000002619|13
65 | Supplier#000002984|13
66 | Supplier#000003762|13
67 | Supplier#000004378|13
68 | Supplier#000004423|13
69 | Supplier#000004439|13
70 | Supplier#000004559|13
71 | Supplier#000005512|13
72 | Supplier#000005902|13
73 | Supplier#000006060|13
74 | Supplier#000006818|13
75 | Supplier#000007076|13
76 | Supplier#000007256|13
77 | Supplier#000007485|13
78 | Supplier#000008558|13
79 | Supplier#000009280|13
80 | Supplier#000009533|13
81 | Supplier#000000022|12
82 | Supplier#000000111|12
83 | Supplier#000000227|12
84 | Supplier#000000502|12
85 | Supplier#000000655|12
86 | Supplier#000000856|12
87 | Supplier#000001041|12
88 | Supplier#000001116|12
89 | Supplier#000002067|12
90 | Supplier#000002120|12
91 | Supplier#000002376|12
92 | Supplier#000002424|12
93 | Supplier#000003074|12
94 | Supplier#000003186|12
95 | Supplier#000003334|12
96 | Supplier#000003344|12
97 | Supplier#000003409|12
98 | Supplier#000003760|12
99 | Supplier#000004084|12
100 | Supplier#000004444|12
101 | Supplier#000004688|12
102 |
--------------------------------------------------------------------------------
/pkg/sink/csv.go:
--------------------------------------------------------------------------------
1 | package sink
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | "encoding/csv"
7 | "fmt"
8 | "io"
9 | "reflect"
10 | )
11 |
12 | // CSVSink writes values to a file in CSV format.
13 | type CSVSink struct {
14 | writer *csv.Writer
15 | underlying io.Writer
16 | }
17 |
18 | var _ Sink = &CSVSink{}
19 |
20 | // NewCSVSink creates a sink that writes values to an io.Writer in CSV format.
21 | func NewCSVSink(w io.Writer) *CSVSink {
22 | return &CSVSink{
23 | writer: csv.NewWriter(w),
24 | underlying: w,
25 | }
26 | }
27 |
28 | // NewCSVSinkWithDelimiter creates a sink that writes values to an io.Writer in CSV format, using a customized delimiter.
29 | func NewCSVSinkWithDelimiter(w io.Writer, delimiter rune) *CSVSink {
30 | sink := NewCSVSink(w)
31 | sink.writer.Comma = delimiter
32 | return sink
33 | }
34 |
35 | func buildColumns(values []interface{}) []string {
36 | columns := make([]string, len(values))
37 | for i, v := range values {
38 | ty := reflect.TypeOf(v)
39 | if ty == nil {
40 | columns[i] = "NULL"
41 | continue
42 | }
43 | switch ty.Kind() {
44 | case reflect.String:
45 | columns[i] = fmt.Sprintf("%s", v)
46 | continue
47 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
48 | columns[i] = fmt.Sprintf("%d", v)
49 | continue
50 | case reflect.Float32, reflect.Float64:
51 | columns[i] = fmt.Sprintf("%f", v)
52 | continue
53 | }
54 | switch v := v.(type) {
55 | case sql.NullString:
56 | if v.Valid {
57 | columns[i] = v.String
58 | } else {
59 | columns[i] = "NULL"
60 | }
61 | case sql.NullInt64:
62 | if v.Valid {
63 | columns[i] = fmt.Sprintf("%d", v.Int64)
64 | } else {
65 | columns[i] = "NULL"
66 | }
67 | case sql.NullFloat64:
68 | if v.Valid {
69 | columns[i] = fmt.Sprintf("%f", v.Float64)
70 | } else {
71 | columns[i] = "NULL"
72 | }
73 | default:
74 | panic(fmt.Sprintf("unsupported type: %T", v))
75 | }
76 | }
77 | return columns
78 | }
79 |
80 | // WriteRow writes a row to the underlying io.Writer. The writing attempt may be deferred until reaching a batch.
81 | func (s *CSVSink) WriteRow(ctx context.Context, values ...interface{}) error {
82 | columns := buildColumns(values)
83 | return s.writer.Write(columns)
84 | }
85 |
86 | // Flush writes any buffered data to the underlying io.Writer.
87 | func (s *CSVSink) Flush(ctx context.Context) error {
88 | s.writer.Flush()
89 | return s.writer.Error()
90 | }
91 |
92 | // Close closes the underlying io.Writer if it is an io.WriteCloser.
93 | func (s *CSVSink) Close(ctx context.Context) error {
94 | if err := s.Flush(ctx); err != nil {
95 | return err
96 | }
97 | if wc, ok := s.underlying.(io.WriteCloser); ok {
98 | return wc.Close()
99 | }
100 | return nil
101 | }
102 |
--------------------------------------------------------------------------------
/pkg/measurement/hist.go:
--------------------------------------------------------------------------------
1 | package measurement
2 |
3 | import (
4 | "fmt"
5 | "sync"
6 | "time"
7 |
8 | "github.com/HdrHistogram/hdrhistogram-go"
9 | "github.com/pingcap/go-tpc/pkg/util"
10 | )
11 |
12 | type Histogram struct {
13 | *hdrhistogram.Histogram
14 | m sync.RWMutex
15 | sum int64
16 | startTime time.Time
17 | }
18 |
19 | type HistInfo struct {
20 | Elapsed float64
21 | Sum float64
22 | Count int64
23 | Ops float64
24 | Avg float64
25 | P50 float64
26 | P90 float64
27 | P95 float64
28 | P99 float64
29 | P999 float64
30 | Max float64
31 | }
32 |
33 | func NewHistogram(minLat, maxLat time.Duration, sf int) *Histogram {
34 | return &Histogram{Histogram: hdrhistogram.New(minLat.Nanoseconds(), maxLat.Nanoseconds(), sf), startTime: time.Now()}
35 | }
36 |
37 | func (h *Histogram) Measure(rawLatency time.Duration) {
38 | latency := rawLatency
39 | if latency < time.Duration(h.LowestTrackableValue()) {
40 | latency = time.Duration(h.LowestTrackableValue())
41 | } else if latency > time.Duration(h.HighestTrackableValue()) {
42 | latency = time.Duration(h.HighestTrackableValue())
43 | }
44 | h.m.Lock()
45 | err := h.RecordValue(latency.Nanoseconds())
46 | h.sum += rawLatency.Nanoseconds()
47 | h.m.Unlock()
48 | if err != nil {
49 | panic(fmt.Sprintf(`recording value error: %s`, err))
50 | }
51 | }
52 |
53 | func (h *Histogram) Empty() bool {
54 | h.m.Lock()
55 | defer h.m.Unlock()
56 | return h.TotalCount() == 0
57 | }
58 |
59 | func (h *Histogram) Summary() []string {
60 | res := h.GetInfo()
61 |
62 | return []string{
63 | util.FloatToOneString(res.Elapsed),
64 | util.IntToString(res.Count),
65 | util.FloatToOneString(res.Ops * 60),
66 | util.FloatToOneString(res.Sum),
67 | util.FloatToOneString(res.Avg),
68 | util.FloatToOneString(res.P50),
69 | util.FloatToOneString(res.P90),
70 | util.FloatToOneString(res.P95),
71 | util.FloatToOneString(res.P99),
72 | util.FloatToOneString(res.P999),
73 | util.FloatToOneString(res.Max),
74 | }
75 | }
76 |
77 | func (h *Histogram) GetInfo() HistInfo {
78 | h.m.RLock()
79 | defer h.m.RUnlock()
80 | sum := time.Duration(h.sum).Seconds() * 1000
81 | avg := time.Duration(h.Mean()).Seconds() * 1000
82 | elapsed := time.Now().Sub(h.startTime).Seconds()
83 | count := h.TotalCount()
84 | ops := float64(count) / elapsed
85 | info := HistInfo{
86 | Elapsed: elapsed,
87 | Sum: sum,
88 | Count: count,
89 | Ops: ops,
90 | Avg: avg,
91 | P50: time.Duration(h.ValueAtQuantile(50)).Seconds() * 1000,
92 | P90: time.Duration(h.ValueAtQuantile(90)).Seconds() * 1000,
93 | P95: time.Duration(h.ValueAtQuantile(95)).Seconds() * 1000,
94 | P99: time.Duration(h.ValueAtQuantile(99)).Seconds() * 1000,
95 | P999: time.Duration(h.ValueAtQuantile(99.9)).Seconds() * 1000,
96 | Max: time.Duration(h.ValueAtQuantile(100)).Seconds() * 1000,
97 | }
98 | return info
99 | }
100 |
--------------------------------------------------------------------------------
/tpch/dbgen/part.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "io"
7 |
8 | "github.com/pingcap/go-tpc/pkg/sink"
9 | )
10 |
11 | const (
12 | pNameScl = 5
13 | pMfgMin = 1
14 | pMfgMax = 5
15 | pBrndMin = 1
16 | pBrndMax = 5
17 | pSizeMin = 1
18 | pSizeMax = 50
19 | psQtyMin = 1
20 | psQtyMax = 9999
21 | psScstMin = 100
22 | psScstMax = 100000
23 | pMfgSd = 0
24 | pBrndSd = 1
25 | pTypeSd = 2
26 | pSizeSd = 3
27 | pCntrSd = 4
28 | psQtySd = 7
29 | psScstSd = 8
30 | pNameSd = 37
31 | pCmntLen = 14
32 | psCmntLen = 124
33 | pCmntSd = 6
34 | psCmntSd = 9
35 | suppPerPart = 4
36 | )
37 |
38 | type Part struct {
39 | PartKey dssHuge
40 | Name string
41 | Mfgr string
42 | Brand string
43 | Type string
44 | Size dssHuge
45 | Container string
46 | RetailPrice dssHuge
47 | Comment string
48 | S []PartSupp
49 | }
50 |
51 | func sdPart(child Table, skipCount dssHuge) {
52 | for i := pMfgSd; i <= pCntrSd; i++ {
53 | advanceStream(i, skipCount, false)
54 | }
55 | advanceStream(pCmntSd, skipCount*2, false)
56 | advanceStream(pNameSd, skipCount*92, false)
57 | }
58 |
59 | func partSuppBridge(p, s dssHuge) dssHuge {
60 | totScnt := tDefs[TSupp].base * scale
61 | return (p+s*(totScnt/suppPerPart+((p-1)/totScnt)))%totScnt + 1
62 | }
63 |
64 | type partLoader struct {
65 | *sink.CSVSink
66 | }
67 |
68 | func (p partLoader) Load(item interface{}) error {
69 | part := item.(*Part)
70 | if err := p.WriteRow(context.TODO(),
71 | part.PartKey,
72 | part.Name,
73 | part.Mfgr,
74 | part.Brand,
75 | part.Type,
76 | part.Size,
77 | part.Container,
78 | FmtMoney(part.RetailPrice),
79 | part.Comment); err != nil {
80 | return err
81 | }
82 | return nil
83 | }
84 |
85 | func (p partLoader) Flush() error {
86 | return p.CSVSink.Flush(context.TODO())
87 | }
88 |
89 | func NewPartLoader(w io.Writer) partLoader {
90 | return partLoader{sink.NewCSVSinkWithDelimiter(w, '|')}
91 | }
92 |
93 | func makePart(idx dssHuge) *Part {
94 | part := &Part{}
95 | part.PartKey = idx
96 | part.Name = aggStr(&colors, pNameScl, pNameSd)
97 | tmp := random(pMfgMin, pMfgMax, pMfgSd)
98 | part.Mfgr = fmt.Sprintf("Manufacturer#%d", tmp)
99 | brnd := random(pBrndMin, pBrndMax, pBrndSd)
100 | part.Brand = fmt.Sprintf("Brand#%02d", tmp*10+brnd)
101 | pickStr(&pTypesSet, pTypeSd, &part.Type)
102 | part.Size = random(pSizeMin, pSizeMax, pSizeSd)
103 | pickStr(&pCntrSet, pCntrSd, &part.Container)
104 | part.RetailPrice = rpbRoutine(idx)
105 | part.Comment = makeText(pCmntLen, pCmntSd)
106 |
107 | for snum := 0; snum < suppPerPart; snum++ {
108 | ps := PartSupp{}
109 | ps.PartKey = part.PartKey
110 | ps.SuppKey = partSuppBridge(idx, dssHuge(snum))
111 | ps.Qty = random(psQtyMin, psQtyMax, psQtySd)
112 | ps.SCost = random(psScstMin, psScstMax, psScstSd)
113 | ps.Comment = makeText(psCmntLen, psCmntSd)
114 | part.S = append(part.S, ps)
115 | }
116 |
117 | return part
118 | }
119 |
--------------------------------------------------------------------------------
/cmd/go-tpc/rawsql.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "io/ioutil"
7 | "os"
8 | "path"
9 | "strings"
10 | "time"
11 |
12 | "github.com/pingcap/go-tpc/pkg/util"
13 | "github.com/pingcap/go-tpc/rawsql"
14 | "github.com/spf13/cobra"
15 | )
16 |
17 | var (
18 | rawsqlConfig rawsql.Config
19 | queryFiles string
20 | refreshConnWait time.Duration
21 | )
22 |
23 | func registerRawsql(root *cobra.Command) {
24 | cmd := &cobra.Command{
25 | Use: "rawsql",
26 | }
27 |
28 | cmdRun := &cobra.Command{
29 | Use: "run",
30 | Short: "Run workload",
31 | Run: func(cmd *cobra.Command, args []string) {
32 | if len(queryFiles) == 0 {
33 | util.StdErrLogger.Printf("empty query files")
34 | os.Exit(1)
35 | }
36 |
37 | execRawsql("run")
38 | },
39 | }
40 |
41 | cmdRun.PersistentFlags().BoolVar(&rawsqlConfig.EnablePlanReplayer,
42 | "use-plan-replayer",
43 | false,
44 | "Use Plan Replayer to dump stats and variables before running queries")
45 |
46 | cmdRun.PersistentFlags().StringVar(&rawsqlConfig.PlanReplayerConfig.PlanReplayerDir,
47 | "plan-replayer-dir",
48 | "",
49 | "Dir of Plan Replayer file dumps")
50 |
51 | cmdRun.PersistentFlags().StringVar(&rawsqlConfig.PlanReplayerConfig.PlanReplayerFileName,
52 | "plan-replayer-file",
53 | "",
54 | "Name of plan Replayer file dumps")
55 |
56 | cmdRun.PersistentFlags().StringVar(&queryFiles,
57 | "query-files",
58 | "",
59 | "path of query files")
60 |
61 | cmdRun.PersistentFlags().BoolVar(&rawsqlConfig.ExecExplainAnalyze,
62 | "use-explain",
63 | false,
64 | "execute explain analyze")
65 |
66 | cmdRun.PersistentFlags().DurationVar(&refreshConnWait, "refresh-conn-wait", 5*time.Second, "duration to wait before refreshing sql connection")
67 |
68 | cmd.AddCommand(cmdRun)
69 | root.AddCommand(cmd)
70 | }
71 |
72 | func execRawsql(action string) {
73 | openDB()
74 | defer closeDB()
75 |
76 | // if globalDB == nil
77 | if globalDB == nil {
78 | util.StdErrLogger.Printf("cannot connect to the database")
79 | os.Exit(1)
80 | }
81 |
82 | rawsqlConfig.OutputStyle = outputStyle
83 | rawsqlConfig.DBName = dbName
84 | rawsqlConfig.QueryNames = strings.Split(queryFiles, ",")
85 | rawsqlConfig.Queries = make(map[string]string, len(rawsqlConfig.QueryNames))
86 | rawsqlConfig.RefreshWait = refreshConnWait
87 | rawsqlConfig.PlanReplayerConfig.Host = hosts[0]
88 | rawsqlConfig.PlanReplayerConfig.StatusPort = statusPort
89 |
90 | for i, filename := range rawsqlConfig.QueryNames {
91 | queryData, err := ioutil.ReadFile(filename)
92 | if err != nil {
93 | util.StdErrLogger.Printf("read file: %s, err: %v\n", filename, err)
94 | os.Exit(1)
95 | }
96 |
97 | baseName := path.Base(filename)
98 | queryName := strings.TrimSuffix(baseName, path.Ext(baseName))
99 | rawsqlConfig.QueryNames[i] = queryName
100 | rawsqlConfig.Queries[queryName] = string(queryData)
101 | }
102 |
103 | w := rawsql.NewWorkloader(globalDB, &rawsqlConfig)
104 |
105 | timeoutCtx, cancel := context.WithTimeout(globalCtx, totalTime)
106 | defer cancel()
107 | executeWorkload(timeoutCtx, w, threads, action)
108 | fmt.Println("Finished")
109 | w.OutputStats(true)
110 | }
111 |
--------------------------------------------------------------------------------
/tpch/dbgen/driver.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | type Table int
8 | type dssHuge int64
9 | type long int64
10 |
11 | var (
12 | scale dssHuge
13 | )
14 |
15 | type tDef struct {
16 | name string
17 | comment string
18 | base dssHuge
19 | loader Loader
20 | genSeed func(Table, dssHuge)
21 | child Table
22 | vTotal dssHuge
23 | }
24 |
25 | var tDefs []tDef
26 |
27 | func genTbl(tnum Table, start, count dssHuge) error {
28 | loader := tDefs[tnum].loader
29 | defer loader.Flush()
30 |
31 | for i := start; i < start+count; i++ {
32 | rowStart(tnum)
33 | switch tnum {
34 | case TLine:
35 | fallthrough
36 | case TOrder:
37 | fallthrough
38 | case TOrderLine:
39 | order := makeOrder(i)
40 | if err := loader.Load(order); err != nil {
41 | return err
42 | }
43 | case TSupp:
44 | supp := makeSupp(i)
45 | if err := loader.Load(supp); err != nil {
46 | return err
47 | }
48 | case TCust:
49 | cust := makeCust(i)
50 | if err := loader.Load(cust); err != nil {
51 | return err
52 | }
53 | case TPsupp:
54 | fallthrough
55 | case TPart:
56 | fallthrough
57 | case TPartPsupp:
58 | part := makePart(i)
59 | if err := loader.Load(part); err != nil {
60 | return err
61 | }
62 | case TNation:
63 | nation := makeNation(i)
64 | if err := loader.Load(nation); err != nil {
65 | return err
66 | }
67 | case TRegion:
68 | region := makeRegion(i)
69 | if err := loader.Load(region); err != nil {
70 | return err
71 | }
72 | }
73 | rowStop(tnum)
74 | }
75 | return nil
76 | }
77 |
78 | func initTDefs() {
79 | tDefs = []tDef{
80 | {"part.tbl", "part table", 200000, nil, sdPart, TPsupp, 0},
81 | {"partsupp.tbl", "partsupplier table", 200000, nil, sdPsupp, TNone, 0},
82 | {"supplier.tbl", "suppliers table", 10000, nil, sdSupp, TNone, 0},
83 | {"customer.tbl", "customers table", 150000, nil, sdCust, TNone, 0},
84 | {"orders.tbl", "order table", 150000 * ordersPerCust, nil, sdOrder, TLine, 0},
85 | {"lineitem.tbl", "lineitem table", 150000 * ordersPerCust, nil, sdLineItem, TNone, 0},
86 | {"orders.tbl", "orders/lineitem tables", 150000 * ordersPerCust, newOrderLineLoader(), sdOrder, TLine, 0},
87 | {"part.tbl", "part/partsupplier tables", 200000, newPartPsuppLoader(), sdPart, TPsupp, 0},
88 | {"nation.tbl", "nation table", dssHuge(nations.count), nil, sdNull, TNone, 0},
89 | {"region.tbl", "region table", dssHuge(regions.count), nil, sdNull, TNone, 0},
90 | }
91 | }
92 |
93 | func InitDbGen(sc int64) {
94 | scale = dssHuge(sc)
95 | initSeeds()
96 | initDists()
97 | initTextPool()
98 |
99 | initTDefs()
100 | initOrder()
101 | initLineItem()
102 | }
103 |
104 | func DbGen(loaders map[Table]Loader, tables []Table) error {
105 | for table, loader := range loaders {
106 | tDefs[table].loader = loader
107 | }
108 |
109 | for _, i := range tables {
110 | fmt.Printf("generating %s\n", tDefs[i].comment)
111 | rowCnt := tDefs[i].base
112 | if i < TNation {
113 | rowCnt *= scale
114 | }
115 | if err := genTbl(i, 1, rowCnt); err != nil {
116 | return fmt.Errorf("fail to generate %s, err: %v", tDefs[i].name, err)
117 | }
118 | fmt.Printf("generate %s done\n", tDefs[i].comment)
119 | }
120 | return nil
121 | }
122 |
--------------------------------------------------------------------------------
/tpch/dbgen/supp.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "io"
7 |
8 | "github.com/pingcap/go-tpc/pkg/sink"
9 | )
10 |
11 | const (
12 | sNtrgSd = 33
13 | sPhneSd = 34
14 | sAbalSd = 35
15 | sAddrLen = 25
16 | sAbalMin = -99999
17 | sAbalMax = 999999
18 | sCmntLen = 63
19 | sAddrSd = 32
20 | sCmntSd = 36
21 | sCmntBbb = 10
22 | bbbJnkSd = 44
23 | bbbTypeSd = 45
24 | bbbCmntSd = 46
25 | bbbOffsetSd = 47
26 | bbbDeadbeats = 50
27 | bbbBase = "Customer "
28 | bbbComplain = "Complaints"
29 | bbbCommend = "Recommends"
30 | bbbCmntLen = 19
31 | bbbBaseLen = 9
32 | )
33 |
34 | type Supp struct {
35 | SuppKey dssHuge
36 | Name string
37 | Address string
38 | NationCode dssHuge
39 | Phone string
40 | Acctbal dssHuge
41 | Comment string
42 | }
43 |
44 | type suppLoader struct {
45 | *sink.CSVSink
46 | }
47 |
48 | func (s suppLoader) Load(item interface{}) error {
49 | supp := item.(*Supp)
50 | if err := s.WriteRow(context.TODO(),
51 | supp.SuppKey,
52 | supp.Name,
53 | supp.Address,
54 | supp.NationCode,
55 | supp.Phone,
56 | FmtMoney(supp.Acctbal),
57 | supp.Comment); err != nil {
58 | return err
59 | }
60 | return nil
61 | }
62 |
63 | func (s suppLoader) Flush() error {
64 | return s.CSVSink.Flush(context.TODO())
65 | }
66 |
67 | func NewSuppLoader(w io.Writer) suppLoader {
68 | return suppLoader{sink.NewCSVSinkWithDelimiter(w, '|')}
69 | }
70 |
71 | func makeSupp(idx dssHuge) *Supp {
72 | supp := &Supp{}
73 | supp.SuppKey = idx
74 | supp.Name = fmt.Sprintf("Supplier#%09d", idx)
75 | supp.Address = vStr(sAddrLen, sAddrSd)
76 | i := random(0, dssHuge(nations.count-1), sNtrgSd)
77 | supp.NationCode = i
78 | supp.Phone = genPhone(i, sPhneSd)
79 | supp.Acctbal = random(sAbalMin, sAbalMax, sAbalSd)
80 | supp.Comment = makeText(sCmntLen, sCmntSd)
81 |
82 | badPress := random(1, 10000, bbbCmntSd)
83 | types := random(0, 100, bbbTypeSd)
84 | noise := random(0, dssHuge(len(supp.Comment)-bbbCmntLen), bbbJnkSd)
85 | offset := random(0, dssHuge(len(supp.Comment))-(bbbCmntLen+noise), bbbOffsetSd)
86 |
87 | if badPress <= sCmntBbb {
88 | if types < bbbDeadbeats {
89 | types = 0
90 | } else {
91 | types = 1
92 | }
93 | supp.Comment = supp.Comment[:offset] + bbbBase + supp.Comment[offset+dssHuge(len(bbbBase)):]
94 | if types == 0 {
95 | supp.Comment = supp.Comment[:bbbBaseLen+offset+noise] +
96 | bbbComplain +
97 | supp.Comment[bbbBaseLen+offset+noise+dssHuge(len(bbbComplain)):]
98 | } else {
99 | supp.Comment = supp.Comment[:bbbBaseLen+offset+noise] +
100 | bbbCommend +
101 | supp.Comment[bbbBaseLen+offset+noise+dssHuge(len(bbbCommend)):]
102 | }
103 | }
104 |
105 | return supp
106 | }
107 |
108 | func sdSupp(child Table, skipCount dssHuge) {
109 | advanceStream(sNtrgSd, skipCount, false)
110 | advanceStream(sPhneSd, skipCount*3, false)
111 | advanceStream(sAbalSd, skipCount, false)
112 | advanceStream(sAddrSd, skipCount*9, false)
113 | advanceStream(sCmntSd, skipCount*2, false)
114 | advanceStream(bbbCmntSd, skipCount, false)
115 | advanceStream(bbbJnkSd, skipCount, false)
116 | advanceStream(bbbOffsetSd, skipCount, false)
117 | advanceStream(bbbTypeSd, skipCount, false)
118 | }
119 |
--------------------------------------------------------------------------------
/tpch/output/q10.out:
--------------------------------------------------------------------------------
1 | c_custkey|c_name|revenue|c_acctbal|n_name|c_address|c_phone|c_comment
2 | 90994|Customer#000090994|730322.8364|7964.20|JORDAN|mbcZQRERKupszUDzRMayQwDEX8P2epkh5|23-191-141-7416|endencies affix furiously according t
3 | 146756|Customer#000146756|703786.0197|107.25|EGYPT|WWH fjsDkKovu8jiTYZmxC3HMLIYEDnDT|14-321-449-8957|xes believe blithely platelets. even, final accounts nag carefully against the
4 | 36376|Customer#000036376|690104.3892|8597.08|EGYPT|wSVoQsr2bzDnLFCak94dBAaFWg|14-523-414-8389|blithely pending instructions haggle slyly final fox
5 | 137710|Customer#000137710|654322.3938|512.19|RUSSIA|PgiBYM2Y5hDMvxNKzwvV3Y5vDiGvkYvZuahwrC|32-925-523-8947| ironic, unusual packages are fluffily. quickly express accounts
6 | 84055|Customer#000084055|641329.6081|4318.55|MOROCCO|sRYQ4pRu5T0ftDC1ndlMeBkCT5k|25-505-122-1521|pecial foxes above the slyly even ideas are blithely along the slyly final packages. quickly ironic depo
7 | 125341|Customer#000125341|634427.6468|4983.51|GERMANY|S29ODD6bceU8QSuuEJznkNaK|17-582-695-5962|arefully even depths. blithely even excuses sleep furiously. foxes use except the dependencies. ca
8 | 91972|Customer#000091972|626473.9908|5606.88|JAPAN|pHQJB6ypmme07bk|22-975-188-8726|ong the furiously regular instructions are carefully quickly final packages. blithely iro
9 | 138763|Customer#000138763|610154.3430|9858.75|ALGERIA|uPDjizkE4zHPf,ovWhk9qmoLH4rxal v8fZ1|10-841-461-4873| detect. carefully bold ideas against the final ideas are blithely a
10 | 64298|Customer#000064298|609078.1864|7003.85|PERU|,y0LFOyyvAI8vDYhp 8jkz|27-265-117-6068|y final requests. ironic packages wake carefully. fi
11 | 148126|Customer#000148126|601322.9515|6193.01|BRAZIL|hevm3Xlpx6E9e6Ha55JlnJbTS46Ue3u4|12-508-455-5443|o beans haggle blithely. even courts cajole slyly thinly ironic foxes. slyly regular requests boost quickly
12 | 148015|Customer#000148015|598849.0336|264.75|SAUDI ARABIA|Y4T GZ1q2xh1V1,5iwGdxPCFrhTc,lbya|30-919-347-1671|ct furiously ironic, ironic asymptotes. special foxes about the
13 | 36985|Customer#000036985|598730.6070|9233.98|ARGENTINA|gWKSR2kLaF8EplJEfZ0Rd0WpU65Ll6nP75VT|11-601-462-5744|ve the sly, unusual packages. slyly ironic foxes sleep. final dependencies need
14 | 53806|Customer#000053806|595638.9198|1026.26|ETHIOPIA|7b4rt2hZ JbYCrKJMLp,xyeo6Be6i4EywxH2lDDj|15-204-530-5443|ely final ideas. fluffily ironic ideas sleep slyly ironic theod
15 | 122599|Customer#000122599|594108.8847|9369.27|KENYA|BNLnvWiOdp5MFpSq6ZIQL04fWvOwBL0K|24-516-880-5086|sits sleep furiously ironic packages. carefully unusual ideas i
16 | 119503|Customer#000119503|593134.2513|6581.07|CHINA|UGx2sFT9srBiXCZ2OP9GtC48xmMj|28-562-176-7568|onic excuses. final theodolites wake blithely ironic requests. bli
17 | 120412|Customer#000120412|586708.4167|3816.86|MOZAMBIQUE|TUCXXTqfjkbbnR8 4Q,WrZZ9SkiVTuhZ|26-942-947-6036|ously ironic packages. regular ac
18 | 41326|Customer#000041326|584253.2308|6150.39|ARGENTINA|e8scpD1KJhadg,7T6GTOi3q0DtSgU21we|11-827-403-2570|eas. furiously regular requests nag fluffily fluffily pending accounts. b
19 | 15127|Customer#000015127|577445.1836|7061.58|VIETNAM|zQUuuAQrB5scfoLJE8ZHH41iMDwQnN8|31-903-342-2961| accounts are blithely final requests. carefully p
20 | 61858|Customer#000061858|571051.0927|-574.88|BRAZIL|ynwNnff2igX6G9L3mFnXpB3MElZjDhnq|12-390-479-1121|ly final theodolites nag blithely blithely even foxes. accou
21 | 134533|Customer#000134533|570965.5790|3241.99|CANADA|L3Lyp3wAVP6|13-127-698-5142|egular foxes. furiously ironic theodolites wake along the blithely final asymptotes? slyl
22 |
--------------------------------------------------------------------------------
/pkg/plan-replayer/replayer.go:
--------------------------------------------------------------------------------
1 | package plan_replayer
2 |
3 | import (
4 | "archive/zip"
5 | "context"
6 | "database/sql"
7 | "encoding/base64"
8 | "fmt"
9 | "io/ioutil"
10 | "math/rand"
11 | "net/http"
12 | "os"
13 | "path/filepath"
14 | "sync"
15 | "time"
16 | )
17 |
18 | type PlanReplayerConfig struct {
19 | Host string
20 | StatusPort int
21 | WorkloadName string
22 | PlanReplayerDir string
23 | PlanReplayerFileName string
24 | }
25 |
26 | type PlanReplayerRunner struct {
27 | sync.Mutex
28 | prepared bool
29 | finished bool
30 | Config PlanReplayerConfig
31 | zf *os.File
32 | zw struct {
33 | writer *zip.Writer
34 | }
35 | }
36 |
37 | func (r *PlanReplayerRunner) Prepare() error {
38 | r.Lock()
39 | defer r.Unlock()
40 | if r.prepared {
41 | return nil
42 | }
43 | if r.Config.PlanReplayerDir == "" {
44 | dir, err := os.Getwd()
45 | if err != nil {
46 | return err
47 | }
48 | r.Config.PlanReplayerDir = dir
49 | }
50 | if r.Config.PlanReplayerFileName == "" {
51 | r.Config.PlanReplayerFileName = fmt.Sprintf("plan_replayer_%s_%s",
52 | r.Config.WorkloadName, time.Now().Format("2006-01-02-15:04:05"))
53 | }
54 |
55 | fileName := fmt.Sprintf("%s.zip", r.Config.PlanReplayerFileName)
56 | zf, err := os.Create(filepath.Join(r.Config.PlanReplayerDir, fileName))
57 | if err != nil {
58 | return err
59 | }
60 | r.zf = zf
61 | // Create zip writer
62 | r.zw.writer = zip.NewWriter(zf)
63 | r.prepared = true
64 | return nil
65 | }
66 |
67 | func (r *PlanReplayerRunner) Finish() error {
68 | r.Lock()
69 | defer r.Unlock()
70 | if r.finished {
71 | return nil
72 | }
73 | err := r.zw.writer.Close()
74 | if err != nil {
75 | return err
76 | }
77 | r.finished = true
78 | return r.zf.Close()
79 | }
80 |
81 | func (r *PlanReplayerRunner) Dump(ctx context.Context, conn *sql.Conn, query, queryName string) error {
82 | r.Lock()
83 | defer r.Unlock()
84 | rows, err := conn.QueryContext(ctx, query)
85 | if err != nil {
86 | return fmt.Errorf("execute query %s failed %v", query, err)
87 | }
88 | defer rows.Close()
89 | var token string
90 | for rows.Next() {
91 | err := rows.Scan(&token)
92 | if err != nil {
93 | return fmt.Errorf("execute query %s failed %v", query, err)
94 | }
95 | }
96 | // TODO: support tls
97 | resp, err := http.Get(fmt.Sprintf("http://%s:%v/plan_replayer/dump/%s", r.Config.Host, r.Config.StatusPort, token))
98 | if err != nil {
99 | return fmt.Errorf("get plan replayer for query %s failed %v", queryName, err)
100 | }
101 | defer resp.Body.Close()
102 | b, err := ioutil.ReadAll(resp.Body)
103 | if err != nil {
104 | return fmt.Errorf("get plan replayer for query %s failed %v", queryName, err)
105 | }
106 | err = r.writeDataIntoZW(b, queryName)
107 | if err != nil {
108 | return fmt.Errorf("dump plan replayer for %s failed %v", queryName, err)
109 | }
110 | return nil
111 | }
112 |
113 | // writeDataIntoZW will dump query stats information by following format in zip
114 | /*
115 | |-q1_time.zip
116 | |-q2_time.zip
117 | |-q3_time.zip
118 | |-...
119 | */
120 | func (r *PlanReplayerRunner) writeDataIntoZW(b []byte, queryName string) error {
121 | k := make([]byte, 16)
122 | //nolint: gosec
123 | _, err := rand.Read(k)
124 | if err != nil {
125 | return err
126 | }
127 | key := base64.URLEncoding.EncodeToString(k)
128 | wr, err := r.zw.writer.Create(fmt.Sprintf("%v_%v_%v.zip",
129 | queryName, time.Now().Format("2006-01-02-15:04:05"), key))
130 | if err != nil {
131 | return err
132 | }
133 | _, err = wr.Write(b)
134 | if err != nil {
135 | return err
136 | }
137 | return nil
138 | }
139 |
--------------------------------------------------------------------------------
/tpcc/prepare.go:
--------------------------------------------------------------------------------
1 | package tpcc
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | )
7 |
8 | type tpccLoader interface {
9 | loadItem(ctx context.Context) error
10 | loadWarehouse(ctx context.Context, warehouse int) error
11 | loadStock(ctx context.Context, warehouse int) error
12 | loadDistrict(ctx context.Context, warehouse int) error
13 | loadCustomer(ctx context.Context, warehouse int, district int) error
14 | loadHistory(ctx context.Context, warehouse int, district int) error
15 | loadOrder(ctx context.Context, warehouse int, district int) ([]int, error)
16 | loadNewOrder(ctx context.Context, warehouse int, district int) error
17 | loadOrderLine(ctx context.Context, warehouse int, district int, olCnts []int) error
18 | }
19 |
20 | func prepareWorkload(ctx context.Context, w tpccLoader, threads, warehouses, threadID int) error {
21 | // - 100,1000 rows in the ITEM table
22 | // - 1 row in the WAREHOUSE table for each configured warehouse
23 | // For each row in the WAREHOUSE table
24 | // + 100,000 rows in the STOCK table
25 | // + 10 rows in the DISTRICT table
26 | // For each row in the DISTRICT table
27 | // * 3,000 rows in the CUSTOMER table
28 | // For each row in the CUSTOMER table
29 | // - 1 row in the HISTORY table
30 | // * 3,000 rows in the ORDER table
31 | // For each row in the ORDER table
32 | // - A number of rows in the ORDER-LINE table equal to O_OL_CNT,
33 | // generated according to the rules for input data generation
34 | // of the New-Order transaction
35 | // * 900 rows in the NEW-ORDER table corresponding to the last 900 rows
36 | // in the ORDER table for that district
37 |
38 | if threadID == 0 {
39 | // load items
40 | if err := w.loadItem(ctx); err != nil {
41 | return fmt.Errorf("load item faield %v", err)
42 | }
43 | }
44 |
45 | for i := threadID % threads; i < warehouses; i += threads {
46 | warehouse := i%warehouses + 1
47 |
48 | // load warehouse
49 | if err := w.loadWarehouse(ctx, warehouse); err != nil {
50 | return fmt.Errorf("load warehouse in %d failed %v", warehouse, err)
51 | }
52 | // load stock
53 | if err := w.loadStock(ctx, warehouse); err != nil {
54 | return fmt.Errorf("load stock at warehouse %d failed %v", warehouse, err)
55 | }
56 |
57 | // load district
58 | if err := w.loadDistrict(ctx, warehouse); err != nil {
59 | return fmt.Errorf("load district at wareshouse %d failed %v", warehouse, err)
60 | }
61 | }
62 |
63 | districts := warehouses * districtPerWarehouse
64 | var err error
65 | for i := threadID % threads; i < districts; i += threads {
66 | warehouse := (i/districtPerWarehouse)%warehouses + 1
67 | district := i%districtPerWarehouse + 1
68 |
69 | // load customer
70 | if err = w.loadCustomer(ctx, warehouse, district); err != nil {
71 | return fmt.Errorf("load customer at warehouse %d district %d failed %v", warehouse, district, err)
72 | }
73 | // load history
74 | if err = w.loadHistory(ctx, warehouse, district); err != nil {
75 | return fmt.Errorf("load history at warehouse %d district %d failed %v", warehouse, district, err)
76 | }
77 | // load orders
78 | var olCnts []int
79 | if olCnts, err = w.loadOrder(ctx, warehouse, district); err != nil {
80 | return fmt.Errorf("load orders at warehouse %d district %d failed %v", warehouse, district, err)
81 | }
82 | // loader new-order
83 | if err = w.loadNewOrder(ctx, warehouse, district); err != nil {
84 | return fmt.Errorf("load new_order at warehouse %d district %d failed %v", warehouse, district, err)
85 | }
86 | // load order-line
87 | if err = w.loadOrderLine(ctx, warehouse, district, olCnts); err != nil {
88 | return fmt.Errorf("load order_line at warehouse %d district %d failed %v", warehouse, district, err)
89 | }
90 | }
91 |
92 | return nil
93 | }
94 |
--------------------------------------------------------------------------------
/pkg/measurement/measure.go:
--------------------------------------------------------------------------------
1 | package measurement
2 |
3 | import (
4 | "fmt"
5 | "sync"
6 | "sync/atomic"
7 | "time"
8 | )
9 |
10 | const (
11 | sigFigs = 1
12 | defaultMinLatency = 1 * time.Millisecond
13 | DefaultMaxLatency = 16 * time.Second
14 | )
15 |
16 | type Measurement struct {
17 | warmUp int32 // use as bool, 1 means in warmup progress, 0 means warmup finished.
18 | sync.RWMutex
19 |
20 | MinLatency time.Duration
21 | MaxLatency time.Duration
22 | SigFigs int
23 | OpCurMeasurement map[string]*Histogram
24 | OpSumMeasurement map[string]*Histogram
25 | }
26 |
27 | func (m *Measurement) getHist(op string, err error, current bool) *Histogram {
28 | opMeasurement := m.OpSumMeasurement
29 | if current {
30 | opMeasurement = m.OpCurMeasurement
31 | }
32 |
33 | // Create hist of {op} and {op}_ERR at the same time, or else the TPM would be incorrect
34 | opPairedKey := fmt.Sprintf("%s_ERR", op)
35 | if err != nil {
36 | op, opPairedKey = opPairedKey, op
37 | }
38 |
39 | m.RLock()
40 | opM, ok := opMeasurement[op]
41 | m.RUnlock()
42 | if !ok {
43 | opM = NewHistogram(m.MinLatency, m.MaxLatency, m.SigFigs)
44 | opPairedM := NewHistogram(m.MinLatency, m.MaxLatency, m.SigFigs)
45 | m.Lock()
46 | opMeasurement[op] = opM
47 | opMeasurement[opPairedKey] = opPairedM
48 | m.Unlock()
49 | }
50 | return opM
51 | }
52 |
53 | func (m *Measurement) measure(op string, err error, lan time.Duration) {
54 | m.getHist(op, err, true).Measure(lan)
55 | m.getHist(op, err, false).Measure(lan)
56 | }
57 |
58 | func (m *Measurement) takeCurMeasurement() (ret map[string]*Histogram) {
59 | m.RLock()
60 | defer m.RUnlock()
61 | ret, m.OpCurMeasurement = m.OpCurMeasurement, make(map[string]*Histogram, 16)
62 | return
63 | }
64 |
65 | func (m *Measurement) getOpName() []string {
66 | m.RLock()
67 | defer m.RUnlock()
68 |
69 | res := make([]string, 0, len(m.OpSumMeasurement))
70 | for op := range m.OpSumMeasurement {
71 | res = append(res, op)
72 | }
73 | return res
74 | }
75 |
76 | // Output prints the measurement summary.
77 | func (m *Measurement) Output(ifSummaryReport bool, outputStyle string, outputFunc func(string, string, map[string]*Histogram)) {
78 | if ifSummaryReport {
79 | m.RLock()
80 | defer m.RUnlock()
81 | outputFunc(outputStyle, "[Summary] ", m.OpSumMeasurement)
82 | return
83 | }
84 | // Clear current measure data every time
85 | var opCurMeasurement = m.takeCurMeasurement()
86 | m.RLock()
87 | defer m.RUnlock()
88 | outputFunc(outputStyle, "[Current] ", opCurMeasurement)
89 | }
90 |
91 | // EnableWarmUp sets whether to enable warm-up.
92 | func (m *Measurement) EnableWarmUp(b bool) {
93 | if b {
94 | atomic.StoreInt32(&m.warmUp, 1)
95 | } else {
96 | atomic.StoreInt32(&m.warmUp, 0)
97 | }
98 | }
99 |
100 | // IsWarmUpFinished returns whether warm-up is finished or not.
101 | func (m *Measurement) IsWarmUpFinished() bool {
102 | return atomic.LoadInt32(&m.warmUp) == 0
103 | }
104 |
105 | // Measure measures the operation.
106 | func (m *Measurement) Measure(op string, lan time.Duration, err error) {
107 | if !m.IsWarmUpFinished() {
108 | return
109 | }
110 | m.measure(op, err, lan)
111 | }
112 |
113 | func NewMeasurement(opts ...func(*Measurement)) *Measurement {
114 | m := &Measurement{
115 | warmUp: 0,
116 | RWMutex: sync.RWMutex{},
117 | MinLatency: defaultMinLatency,
118 | MaxLatency: DefaultMaxLatency,
119 | SigFigs: sigFigs,
120 | OpCurMeasurement: make(map[string]*Histogram, 16),
121 | OpSumMeasurement: make(map[string]*Histogram, 16),
122 | }
123 | for _, opt := range opts {
124 | if opt != nil {
125 | opt(m)
126 | }
127 | }
128 | return m
129 | }
130 |
--------------------------------------------------------------------------------
/pkg/sink/sql.go:
--------------------------------------------------------------------------------
1 | package sink
2 |
3 | import (
4 | "bytes"
5 | "context"
6 | "database/sql"
7 | "fmt"
8 | "reflect"
9 | "strings"
10 | "time"
11 | )
12 |
13 | // SQLSink inserts values to a database in batch.
14 | type SQLSink struct {
15 | maxBatchRows int
16 |
17 | insertHint string
18 | db *sql.DB
19 |
20 | buf bytes.Buffer
21 | bufferedRows int
22 |
23 | retryCount int
24 | retryInterval time.Duration
25 | }
26 |
27 | var _ Sink = &SQLSink{}
28 |
29 | // NewSQLSink creates a sink that inserts values to a database in batch.
30 | func NewSQLSink(db *sql.DB, hint string, retryCount int, retryInterval time.Duration) *SQLSink {
31 | return &SQLSink{
32 | maxBatchRows: 1024,
33 | insertHint: hint,
34 | db: db,
35 | retryCount: retryCount,
36 | retryInterval: retryInterval,
37 | }
38 | }
39 |
40 | func buildSQLRow(values []interface{}) string {
41 | var buf bytes.Buffer
42 | buf.WriteString("(")
43 | for i, v := range values {
44 | if i > 0 {
45 | buf.WriteString(",")
46 | }
47 | ty := reflect.TypeOf(v)
48 | if ty == nil {
49 | buf.WriteString("NULL")
50 | continue
51 | }
52 | switch ty.Kind() {
53 | case reflect.String:
54 | // TODO: Escape string correctly
55 | _, _ = fmt.Fprintf(&buf, "'%s'", v)
56 | continue
57 | case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
58 | _, _ = fmt.Fprintf(&buf, "%d", v)
59 | continue
60 | case reflect.Float32, reflect.Float64:
61 | _, _ = fmt.Fprintf(&buf, "%f", v)
62 | continue
63 | }
64 | switch v := v.(type) {
65 | case sql.NullString:
66 | if v.Valid {
67 | _, _ = fmt.Fprintf(&buf, "'%s'", v.String)
68 | } else {
69 | buf.WriteString("NULL")
70 | }
71 | case sql.NullInt64:
72 | if v.Valid {
73 | _, _ = fmt.Fprintf(&buf, "%d", v.Int64)
74 | } else {
75 | buf.WriteString("NULL")
76 | }
77 | case sql.NullFloat64:
78 | if v.Valid {
79 | _, _ = fmt.Fprintf(&buf, "%f", v.Float64)
80 | } else {
81 | buf.WriteString("NULL")
82 | }
83 | default:
84 | panic(fmt.Sprintf("unsupported type: %T", v))
85 | }
86 | }
87 | buf.WriteString(")")
88 | return buf.String()
89 | }
90 |
91 | // WriteRow writes a row to the database. The writing attempt may be deferred until reaching a batch.
92 | func (s *SQLSink) WriteRow(ctx context.Context, values ...interface{}) error {
93 | row := buildSQLRow(values)
94 |
95 | if s.bufferedRows == 0 {
96 | s.buf.WriteString(s.insertHint)
97 | s.buf.WriteString(" ")
98 | s.buf.WriteString(row)
99 | } else {
100 | s.buf.WriteString(", ")
101 | s.buf.WriteString(row)
102 | }
103 |
104 | s.bufferedRows++
105 | if s.bufferedRows >= s.maxBatchRows {
106 | return s.Flush(ctx)
107 | }
108 |
109 | return nil
110 | }
111 |
112 | // Flush writes any buffered data to the db.
113 | func (s *SQLSink) Flush(ctx context.Context) error {
114 | if s.buf.Len() == 0 {
115 | return nil
116 | }
117 |
118 | var err error
119 | for i := 0; i < 1+s.retryCount; i++ {
120 | _, err = s.db.ExecContext(ctx, s.buf.String())
121 | if err == nil {
122 | break
123 | }
124 | if strings.Contains(err.Error(), "Error 1062: Duplicate entry") {
125 | if i == 0 {
126 | return fmt.Errorf("exec statement error: %v", err)
127 | }
128 | break
129 | }
130 | if i < s.retryCount {
131 | fmt.Printf("exec statement error: %v, try again later...\n", err)
132 | time.Sleep(s.retryInterval)
133 | }
134 | }
135 |
136 | s.bufferedRows = 0
137 | s.buf.Reset()
138 |
139 | return nil
140 | }
141 |
142 | func (s *SQLSink) Close(ctx context.Context) error {
143 | return s.Flush(ctx)
144 | }
145 |
--------------------------------------------------------------------------------
/tpcc/ddl_test.go:
--------------------------------------------------------------------------------
1 | package tpcc
2 |
3 | import "testing"
4 |
5 | func TestAppendPartition(t *testing.T) {
6 | ddl := newDDLManager(4, false, 4, PartitionTypeHash, true)
7 | s := ddl.appendPartition("
", "Id")
8 | expected := `
9 | PARTITION BY HASH(Id)
10 | PARTITIONS 4`
11 | if s != expected {
12 | t.Errorf("got '%s' expected '%s'", s, expected)
13 | }
14 |
15 | ddl = newDDLManager(4, false, 4, PartitionTypeRange, true)
16 | s = ddl.appendPartition("", "Id")
17 | expected = `
18 | PARTITION BY RANGE (Id)
19 | (PARTITION p0 VALUES LESS THAN (2),
20 | PARTITION p1 VALUES LESS THAN (3),
21 | PARTITION p2 VALUES LESS THAN (4),
22 | PARTITION p3 VALUES LESS THAN (5))`
23 | if s != expected {
24 | t.Errorf("got '%s' expected '%s'", s, expected)
25 | }
26 |
27 | ddl = newDDLManager(4, false, 23, PartitionTypeRange, true)
28 | s = ddl.appendPartition("", "Id")
29 | expected = `
30 | PARTITION BY RANGE (Id)
31 | (PARTITION p0 VALUES LESS THAN (7),
32 | PARTITION p1 VALUES LESS THAN (13),
33 | PARTITION p2 VALUES LESS THAN (19),
34 | PARTITION p3 VALUES LESS THAN (25))`
35 | if s != expected {
36 | t.Errorf("got '%s' expected '%s'", s, expected)
37 | }
38 |
39 | ddl = newDDLManager(4, false, 12, PartitionTypeListAsHash, true)
40 | s = ddl.appendPartition("", "Id")
41 | expected = `
42 | PARTITION BY LIST (Id)
43 | (PARTITION p0 VALUES IN (1,5,9),
44 | PARTITION p1 VALUES IN (2,6,10),
45 | PARTITION p2 VALUES IN (3,7,11),
46 | PARTITION p3 VALUES IN (4,8,12))`
47 | if s != expected {
48 | t.Errorf("got '%s' expected '%s'", s, expected)
49 | }
50 |
51 | ddl = newDDLManager(3, false, 4, PartitionTypeListAsHash, true)
52 | s = ddl.appendPartition("", "Id")
53 | expected = `
54 | PARTITION BY LIST (Id)
55 | (PARTITION p0 VALUES IN (1,4),
56 | PARTITION p1 VALUES IN (2),
57 | PARTITION p2 VALUES IN (3))`
58 | if s != expected {
59 | t.Errorf("got '%s' expected '%s'", s, expected)
60 | }
61 |
62 | ddl = newDDLManager(4, false, 23, PartitionTypeListAsHash, true)
63 | s = ddl.appendPartition("", "Id")
64 | expected = `
65 | PARTITION BY LIST (Id)
66 | (PARTITION p0 VALUES IN (1,5,9,13,17,21),
67 | PARTITION p1 VALUES IN (2,6,10,14,18,22),
68 | PARTITION p2 VALUES IN (3,7,11,15,19,23),
69 | PARTITION p3 VALUES IN (4,8,12,16,20))`
70 | if s != expected {
71 | t.Errorf("got '%s' expected '%s'", s, expected)
72 | }
73 |
74 | ddl = newDDLManager(4, false, 12, PartitionTypeListAsRange, true)
75 | s = ddl.appendPartition("", "Id")
76 | expected = `
77 | PARTITION BY LIST (Id)
78 | (PARTITION p0 VALUES IN (1,2,3),
79 | PARTITION p1 VALUES IN (4,5,6),
80 | PARTITION p2 VALUES IN (7,8,9),
81 | PARTITION p3 VALUES IN (10,11,12))`
82 | if s != expected {
83 | t.Errorf("got '%s' expected '%s'", s, expected)
84 | }
85 |
86 | ddl = newDDLManager(3, false, 4, PartitionTypeListAsRange, true)
87 | s = ddl.appendPartition("", "Id")
88 | expected = `
89 | PARTITION BY LIST (Id)
90 | (PARTITION p0 VALUES IN (1,2),
91 | PARTITION p1 VALUES IN (3),
92 | PARTITION p2 VALUES IN (4))`
93 | if s != expected {
94 | t.Errorf("got '%s' expected '%s'", s, expected)
95 | }
96 |
97 | ddl = newDDLManager(4, false, 23, PartitionTypeListAsRange, true)
98 | s = ddl.appendPartition("", "Id")
99 | expected = `
100 | PARTITION BY LIST (Id)
101 | (PARTITION p0 VALUES IN (1,2,3,4,5,6),
102 | PARTITION p1 VALUES IN (7,8,9,10,11,12),
103 | PARTITION p2 VALUES IN (13,14,15,16,17,18),
104 | PARTITION p3 VALUES IN (19,20,21,22,23))`
105 | if s != expected {
106 | t.Errorf("got '%s' expected '%s'", s, expected)
107 | }
108 | }
109 |
--------------------------------------------------------------------------------
/pkg/sink/concurrent.go:
--------------------------------------------------------------------------------
1 | package sink
2 |
3 | import (
4 | "context"
5 | "sync"
6 |
7 | "go.uber.org/atomic"
8 | "golang.org/x/sync/errgroup"
9 | )
10 |
11 | // ConcurrentSink inserts values to one of the downstream sinks.
12 | // The insert will be blocked if all downstream sinks are working.
13 | //
14 | // WARN: Although this sink can transform serial Writes to multiple down stream sinks, this sink itself is not
15 | // concurrent safe. You must not call WriteRow and Flush concurrently.
16 | type ConcurrentSink struct {
17 | allSinks []Sink
18 |
19 | writeCh chan writeRowOp
20 | writeResultCh chan error
21 | writeWg sync.WaitGroup
22 |
23 | concurrentGuard atomic.Int32 // Used to check whether this struct is used concurrently
24 | }
25 |
26 | type writeRowOp struct {
27 | ctx context.Context
28 | values []interface{}
29 | }
30 |
31 | var _ Sink = &ConcurrentSink{}
32 |
33 | func NewConcurrentSink(downStreamBuilder func(idx int) Sink, concurrency int) *ConcurrentSink {
34 | sinks := make([]Sink, concurrency)
35 | for i := 0; i < concurrency; i++ {
36 | sinks[i] = downStreamBuilder(i)
37 | }
38 |
39 | cs := &ConcurrentSink{
40 | allSinks: sinks,
41 | writeCh: make(chan writeRowOp, concurrency),
42 | writeResultCh: make(chan error, 1),
43 | }
44 | for i := 0; i < concurrency; i++ {
45 | go cs.runConsumerLoop(i)
46 | }
47 | return cs
48 | }
49 |
50 | func (c *ConcurrentSink) runConsumerLoop(downStreamIdx int) {
51 | sink := c.allSinks[downStreamIdx]
52 |
53 | for {
54 | select {
55 | case op, ok := <-c.writeCh:
56 | if !ok {
57 | // Channel close
58 | return
59 | }
60 | err := sink.WriteRow(op.ctx, op.values...)
61 | c.writeWg.Add(-1)
62 | if err != nil {
63 | select {
64 | case c.writeResultCh <- err:
65 | default:
66 | }
67 | }
68 | }
69 | }
70 | }
71 |
72 | func (c *ConcurrentSink) WriteRow(ctx context.Context, values ...interface{}) error {
73 | v := c.concurrentGuard.Inc()
74 | if v > 1 {
75 | panic("ConcurrentSink cannot be called concurrently")
76 | }
77 | defer c.concurrentGuard.Dec()
78 |
79 | c.writeWg.Add(1)
80 | c.writeCh <- writeRowOp{
81 | ctx: ctx,
82 | values: values,
83 | }
84 | select {
85 | case err := <-c.writeResultCh:
86 | return err
87 | default:
88 | return nil
89 | }
90 | }
91 |
92 | // Flush flushes all downstream sinks concurrently, wait all sinks to be flushed and returns the first error
93 | // encountered.
94 | //
95 | // WARN: Flush() will wait until all existing write ops are finished.
96 | func (c *ConcurrentSink) Flush(ctx context.Context) error {
97 | v := c.concurrentGuard.Inc()
98 | if v > 1 {
99 | panic("ConcurrentSink cannot be called concurrently")
100 | }
101 | defer c.concurrentGuard.Dec()
102 |
103 | // Wait all writes to finish.
104 | c.writeWg.Wait()
105 |
106 | // At this time there is no running write ops, so we are safe to call sink.Flush() for each sink.
107 | g, ctx := errgroup.WithContext(ctx)
108 | for _, sink_ := range c.allSinks {
109 | sink := sink_
110 | g.Go(func() error {
111 | return sink.Flush(ctx)
112 | })
113 | }
114 | return g.Wait()
115 | }
116 |
117 | // Close closes all downstream sinks concurrently, wait all sinks to be closed and returns the first error
118 | // encountered.
119 | //
120 | // WARN: Close() will wait until all existing write ops are finished.
121 | func (c *ConcurrentSink) Close(ctx context.Context) error {
122 | v := c.concurrentGuard.Inc()
123 | if v > 1 {
124 | panic("ConcurrentSink cannot be called concurrently")
125 | }
126 | defer c.concurrentGuard.Dec()
127 |
128 | // Wait all writes to finish.
129 | c.writeWg.Wait()
130 |
131 | g := new(errgroup.Group)
132 | for _, sink_ := range c.allSinks {
133 | sink := sink_
134 | g.Go(func() error {
135 | return sink.Close(ctx)
136 | })
137 | }
138 | return g.Wait()
139 | }
140 |
--------------------------------------------------------------------------------
/tpcc/order_status.go:
--------------------------------------------------------------------------------
1 | package tpcc
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | "fmt"
7 | )
8 |
9 | const (
10 | orderStatusSelectCustomerCntByLast = `SELECT count(c_id) namecnt FROM customer WHERE c_w_id = ? AND c_d_id = ? AND c_last = ?`
11 | orderStatusSelectCustomerByLast = `SELECT c_balance, c_first, c_middle, c_id FROM customer WHERE c_w_id = ? AND c_d_id = ? AND c_last = ? ORDER BY c_first`
12 | orderStatusSelectCustomerByID = `SELECT c_balance, c_first, c_middle, c_last FROM customer WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?`
13 | orderStatusSelectLatestOrder = `SELECT o_id, o_carrier_id, o_entry_d FROM orders WHERE o_w_id = ? AND o_d_id = ? AND o_c_id = ? ORDER BY o_id DESC LIMIT 1`
14 | orderStatusSelectOrderLine = `SELECT ol_i_id, ol_supply_w_id, ol_quantity, ol_amount, ol_delivery_d FROM order_line WHERE ol_w_id = ? AND ol_d_id = ? AND ol_o_id = ?`
15 | )
16 |
17 | type orderStatusData struct {
18 | wID int
19 | dID int
20 |
21 | cID int
22 | cLast string
23 | cBalance float64
24 | cFirst string
25 | cMiddle string
26 |
27 | oID int
28 | oEntryD string
29 | oCarrierID sql.NullInt64
30 | }
31 |
32 | func (w *Workloader) runOrderStatus(ctx context.Context, thread int) error {
33 | s := getTPCCState(ctx)
34 | d := orderStatusData{
35 | wID: randInt(s.R, 1, w.cfg.Warehouses),
36 | dID: randInt(s.R, 1, districtPerWarehouse),
37 | }
38 |
39 | // refer 2.6.1.2
40 | if s.R.Intn(100) < 60 {
41 | d.cLast = randCLast(s.R, s.Buf)
42 | } else {
43 | d.cID = randCustomerID(s.R)
44 | }
45 |
46 | tx, err := w.beginTx(ctx)
47 | if err != nil {
48 | return err
49 | }
50 | defer tx.Rollback()
51 |
52 | if d.cID == 0 {
53 | // by name
54 | // SELECT count(c_id) INTO :namecnt FROM customer
55 | // WHERE c_last=:c_last AND c_d_id=:d_id AND c_w_id=:w_id
56 | var nameCnt int
57 | if err := s.orderStatusStmts[orderStatusSelectCustomerCntByLast].QueryRowContext(ctx, d.wID, d.dID, d.cLast).Scan(&nameCnt); err != nil {
58 | return fmt.Errorf("exec %s failed %v", orderStatusSelectCustomerCntByLast, err)
59 | }
60 | if nameCnt%2 == 1 {
61 | nameCnt++
62 | }
63 |
64 | rows, err := s.orderStatusStmts[orderStatusSelectCustomerByLast].QueryContext(ctx, d.wID, d.dID, d.cLast)
65 | if err != nil {
66 | return fmt.Errorf("exec %s failed %v", orderStatusSelectCustomerByLast, err)
67 | }
68 | for i := 0; i < nameCnt/2 && rows.Next(); i++ {
69 | if err := rows.Scan(&d.cBalance, &d.cFirst, &d.cMiddle, &d.cID); err != nil {
70 | return err
71 | }
72 | }
73 |
74 | rows.Close()
75 | if err := rows.Err(); err != nil {
76 | return err
77 | }
78 | } else {
79 | if err := s.orderStatusStmts[orderStatusSelectCustomerByID].QueryRowContext(ctx, d.wID, d.dID, d.cID).Scan(&d.cBalance, &d.cFirst, &d.cMiddle, &d.cLast); err != nil {
80 | return fmt.Errorf("exec %s failed %v", orderStatusSelectCustomerByID, err)
81 | }
82 | }
83 |
84 | // SELECT o_id, o_carrier_id, o_entry_d
85 | // INTO :o_id, :o_carrier_id, :entdate FROM orders
86 | // ORDER BY o_id DESC;
87 |
88 | // refer 2.6.2.2 - select the latest order
89 | if err := s.orderStatusStmts[orderStatusSelectLatestOrder].QueryRowContext(ctx, d.wID, d.dID, d.cID).Scan(&d.oID, &d.oCarrierID, &d.oEntryD); err != nil {
90 | return fmt.Errorf("exec %s failed %v", orderStatusSelectLatestOrder, err)
91 | }
92 |
93 | // SQL DECLARE c_line CURSOR FOR SELECT ol_i_id, ol_supply_w_id, ol_quantity,
94 | // ol_amount, ol_delivery_d
95 | // FROM order_line
96 | // WHERE ol_o_id=:o_id AND ol_d_id=:d_id AND ol_w_id=:w_id;
97 | // OPEN c_line;
98 | rows, err := s.orderStatusStmts[orderStatusSelectOrderLine].QueryContext(ctx, d.wID, d.dID, d.oID)
99 | if err != nil {
100 | return fmt.Errorf("exec %s failed %v", orderStatusSelectOrderLine, err)
101 | }
102 | defer rows.Close()
103 |
104 | items := make([]orderItem, 0, 4)
105 | for rows.Next() {
106 | var item orderItem
107 | if err := rows.Scan(&item.olIID, &item.olSupplyWID, &item.olQuantity, &item.olAmount, &item.olDeliveryD); err != nil {
108 | return err
109 | }
110 | items = append(items, item)
111 | }
112 | if err := rows.Err(); err != nil {
113 | return err
114 | }
115 |
116 | return tx.Commit()
117 | }
118 |
--------------------------------------------------------------------------------
/pkg/util/version_test.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/assert"
7 | )
8 |
9 | func TestNewTiDBSemVersion(t *testing.T) {
10 | testCases := []struct {
11 | name string
12 | input string
13 | expected SemVersion
14 | ok bool
15 | }{
16 | {
17 | name: "normal case with addition",
18 | input: "5.7.25-TiDB-v7.1.0-alpha",
19 | expected: SemVersion{Major: 7, Minor: 1, Patch: 0},
20 | ok: true,
21 | },
22 | {
23 | name: "version without addition",
24 | input: "5.7.25-TiDB-v7.4.1",
25 | expected: SemVersion{Major: 7, Minor: 4, Patch: 1},
26 | ok: true,
27 | },
28 | {
29 | name: "multi-part addition",
30 | input: "5.7.25-TiDB-v6.5.3-beta.2",
31 | expected: SemVersion{Major: 6, Minor: 5, Patch: 3},
32 | ok: true,
33 | },
34 | {
35 | name: "empty addition due to trailing hyphen",
36 | input: "5.7.25-TiDB-v7.1.0-",
37 | expected: SemVersion{Major: 7, Minor: 1, Patch: 0},
38 | ok: true,
39 | },
40 | {
41 | name: "non-tidb database",
42 | input: "MySQL 8.0.35",
43 | expected: SemVersion{},
44 | ok: false,
45 | },
46 | {
47 | name: "missing version prefix",
48 | input: "TiDB-7.2.0",
49 | expected: SemVersion{},
50 | ok: false,
51 | },
52 | {
53 | name: "invalid patch version",
54 | input: "5.7.25-TiDB-v7.1.x",
55 | expected: SemVersion{},
56 | ok: false,
57 | },
58 | {
59 | name: "insufficient version parts",
60 | input: "5.7.25-TiDB-v7.1",
61 | expected: SemVersion{},
62 | ok: false,
63 | },
64 | }
65 |
66 | for _, tc := range testCases {
67 | t.Run(tc.name, func(t *testing.T) {
68 | actual, ok := NewTiDBSemVersion(tc.input)
69 | assert.Equal(t, tc.ok, ok, "ok mismatch")
70 | if tc.ok {
71 | assert.Equal(t, tc.expected, actual, "version mismatch")
72 | }
73 | })
74 | }
75 | }
76 |
77 | func TestSemVersionCompare(t *testing.T) {
78 | testCases := []struct {
79 | name string
80 | version1 SemVersion
81 | version2 SemVersion
82 | expected int
83 | }{
84 | {
85 | name: "major version greater",
86 | version1: SemVersion{Major: 8, Minor: 0, Patch: 0},
87 | version2: SemVersion{Major: 7, Minor: 5, Patch: 10},
88 | expected: 1,
89 | },
90 | {
91 | name: "major version less",
92 | version1: SemVersion{Major: 6, Minor: 9, Patch: 9},
93 | version2: SemVersion{Major: 7, Minor: 0, Patch: 0},
94 | expected: -1,
95 | },
96 | {
97 | name: "major same, minor greater",
98 | version1: SemVersion{Major: 7, Minor: 2, Patch: 0},
99 | version2: SemVersion{Major: 7, Minor: 1, Patch: 5},
100 | expected: 1,
101 | },
102 | {
103 | name: "major same, minor less",
104 | version1: SemVersion{Major: 7, Minor: 1, Patch: 10},
105 | version2: SemVersion{Major: 7, Minor: 2, Patch: 0},
106 | expected: -1,
107 | },
108 | {
109 | name: "major and minor same, patch greater",
110 | version1: SemVersion{Major: 7, Minor: 1, Patch: 5},
111 | version2: SemVersion{Major: 7, Minor: 1, Patch: 0},
112 | expected: 1,
113 | },
114 | {
115 | name: "major and minor same, patch less",
116 | version1: SemVersion{Major: 7, Minor: 1, Patch: 0},
117 | version2: SemVersion{Major: 7, Minor: 1, Patch: 1},
118 | expected: -1,
119 | },
120 | {
121 | name: "identical versions",
122 | version1: SemVersion{Major: 7, Minor: 1, Patch: 0},
123 | version2: SemVersion{Major: 7, Minor: 1, Patch: 0},
124 | expected: 0,
125 | },
126 | {
127 | name: "extreme version differences",
128 | version1: SemVersion{Major: 10, Minor: 0, Patch: 0},
129 | version2: SemVersion{Major: 1, Minor: 99, Patch: 99},
130 | expected: 1,
131 | },
132 | }
133 |
134 | for _, tc := range testCases {
135 | t.Run(tc.name, func(t *testing.T) {
136 | result := tc.version1.Compare(tc.version2)
137 | if result != tc.expected {
138 | t.Errorf("Expected %v.Compare(%v) = %v, got %v",
139 | tc.version1, tc.version2, tc.expected, result)
140 | }
141 |
142 | reverseResult := tc.version2.Compare(tc.version1)
143 | expectedReverse := -tc.expected
144 | if reverseResult != expectedReverse {
145 | t.Errorf("Expected %v.Compare(%v) = %v, got %v",
146 | tc.version2, tc.version1, expectedReverse, reverseResult)
147 | }
148 | })
149 | }
150 | }
151 |
--------------------------------------------------------------------------------
/tpcc/rand.go:
--------------------------------------------------------------------------------
1 | package tpcc
2 |
3 | import (
4 | "fmt"
5 | "math/rand"
6 | "strings"
7 | "time"
8 |
9 | "github.com/pingcap/go-tpc/pkg/util"
10 | )
11 |
12 | // convert the default mysql query to pq format
13 | // https://go.dev/doc/database/querying
14 | // Note: Parameter placeholders in prepared statements vary depending on the DBMS and driver you’re using. For example, the pq driver for Postgres requires a placeholder like $1 instead of ?.
15 |
16 | func convertToPQ(query string, driver string) string {
17 | // return strings.Replace(query, "?", "", -1)
18 | if driver == "postgres" {
19 | i := 1
20 | for {
21 | prev := query
22 | query = strings.Replace(query, "?", fmt.Sprintf("$%d", i), 1)
23 | if prev == query {
24 | break
25 | }
26 | i++ // repeated forever
27 | }
28 | }
29 | return query
30 | }
31 |
32 | // randInt return a random int in [min, max]
33 | // refer 4.3.2.5
34 | func randInt(r *rand.Rand, min, max int) int {
35 | if min == max {
36 | return min
37 | }
38 | return r.Intn(max-min+1) + min
39 | }
40 |
41 | const (
42 | characters = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890`
43 | letters = `ABCDEFGHIJKLMNOPQRSTUVWXYZ`
44 | numbers = `1234567890`
45 | lenCharacters = 62
46 | lenLetters = 26
47 | lenNumbers = 10
48 | )
49 |
50 | func randBuffer(r *rand.Rand, b *util.BufAllocator, source string, min, max int, num int) []byte {
51 | buf := b.Alloc(randInt(r, min, max))
52 | for i := range buf {
53 | buf[i] = source[r.Intn(num)]
54 | }
55 | return buf
56 | }
57 |
58 | // refer 4.3.2.2
59 | func randChars(r *rand.Rand, b *util.BufAllocator, min, max int) string {
60 | return util.String(randBuffer(r, b, characters, min, max, lenCharacters))
61 | }
62 |
63 | // refer 4.3.2.2
64 | func randLetters(r *rand.Rand, b *util.BufAllocator, min, max int) string {
65 | return util.String(randBuffer(r, b, letters, min, max, lenLetters))
66 | }
67 |
68 | // refer 4.3.2.2
69 | func randNumbers(r *rand.Rand, b *util.BufAllocator, min, max int) string {
70 | return util.String(randBuffer(r, b, numbers, min, max, lenNumbers))
71 | }
72 |
73 | // refer 4.3.2.7
74 | func randZip(r *rand.Rand, b *util.BufAllocator) string {
75 | buf := randBuffer(r, b, numbers, 9, 9, lenNumbers)
76 | copy(buf[4:], `11111`)
77 | return util.String(buf)
78 | }
79 |
80 | func randState(r *rand.Rand, b *util.BufAllocator) string {
81 | buf := randBuffer(r, b, letters, 2, 2, lenLetters)
82 | return util.String(buf)
83 | }
84 |
85 | func randTax(r *rand.Rand) float64 {
86 | return float64(randInt(r, 0, 2000)) / 10000.0
87 | }
88 |
89 | const originalString = "ORIGINAL"
90 |
91 | // refer 4.3.3.1
92 | // random a-string [26 .. 50]. For 10% of the rows, selected at random,
93 | // the string "ORIGINAL" must be held by 8 consecutive characters starting at a random position within buf
94 | func randOriginalString(r *rand.Rand, b *util.BufAllocator) string {
95 | if r.Intn(10) == 0 {
96 | buf := randBuffer(r, b, characters, 26, 50, lenCharacters)
97 | index := r.Intn(len(buf) - 8)
98 | copy(buf[index:], originalString)
99 | return util.String(buf)
100 | }
101 |
102 | return randChars(r, b, 26, 50)
103 | }
104 |
105 | var (
106 | cLoad int
107 | cCustomerID int
108 | cItemID int
109 | )
110 |
111 | var cLastTokens = [...]string{
112 | "BAR", "OUGHT", "ABLE", "PRI", "PRES",
113 | "ESE", "ANTI", "CALLY", "ATION", "EING"}
114 |
115 | func randCLastSyllables(n int, b *util.BufAllocator) string {
116 | // 3 tokens * max len
117 | buf := b.Alloc(3 * 5)
118 | buf = buf[:0]
119 | buf = append(buf, cLastTokens[n/100]...)
120 | n = n % 100
121 | buf = append(buf, cLastTokens[n/10]...)
122 | n = n % 10
123 | buf = append(buf, cLastTokens[n]...)
124 | return util.String(buf)
125 | }
126 |
127 | func init() {
128 | r := rand.New(rand.NewSource(time.Now().UnixNano()))
129 | cLoad = r.Intn(256)
130 | cItemID = r.Intn(1024)
131 | cCustomerID = r.Intn(8192)
132 | }
133 |
134 | // refer 4.3.2.3 and 2.1.6
135 | func randCLast(r *rand.Rand, b *util.BufAllocator) string {
136 | return randCLastSyllables(((r.Intn(256)|r.Intn(1000))+cLoad)%1000, b)
137 | }
138 |
139 | // refer 2.1.6
140 | func randCustomerID(r *rand.Rand) int {
141 | return ((r.Intn(1024) | (r.Intn(3000) + 1) + cCustomerID) % 3000) + 1
142 | }
143 |
144 | // refer 2.1.6
145 | func randItemID(r *rand.Rand) int {
146 | return ((r.Intn(8190) | (r.Intn(100000) + 1) + cItemID) % 100000) + 1
147 | }
148 |
--------------------------------------------------------------------------------
/tpch/check.go:
--------------------------------------------------------------------------------
1 | package tpch
2 |
3 | import (
4 | "database/sql"
5 | "fmt"
6 | "math"
7 | "strconv"
8 | )
9 |
10 | type precision int
11 |
12 | const (
13 | str precision = iota
14 | sum
15 | avg
16 | cnt
17 | num
18 | rat
19 | )
20 |
21 | var queryColPrecisions = map[string][]precision{
22 | // Comment 4: In cases where validation output data is from the aggregate SUM(l_quantity) (e.g. queries 1 and 18),
23 | // the precision for this validation output data must exactly match the query validation data.
24 | "q1": {str, str, str, sum, sum, sum, avg, avg, avg, cnt},
25 | "q2": {num, str, str, str, str, str, str, str},
26 | "q3": {str, sum, str, str},
27 | "q4": {str, cnt},
28 | "q5": {str, sum},
29 | "q6": {sum},
30 | "q7": {str, str, str, sum},
31 | "q8": {str, rat},
32 | "q9": {str, str, sum},
33 | "q10": {str, str, sum, num, str, str, str, str},
34 | "q11": {str, sum},
35 | // Comment 2: In cases where validation output data resembles a row count operation by summing up 0 and 1 using a
36 | // SUM aggregate (e.g. query 12), the precision for this validation output data must adhere to bullet a) above.
37 | "q12": {str, cnt, cnt},
38 | "q13": {cnt, cnt},
39 | "q14": {rat},
40 | // Comment 3: In cases were validation output data is selected from views without any further computation (e.g. total
41 | // revenue in Query 15), the precision for this validation output data must adhere to bullet c) above.
42 | "q15": {str, str, str, str, sum},
43 | "q16": {str, str, num, cnt},
44 | "q17": {avg},
45 | // Comment 4: In cases where validation output data is from the aggregate SUM(l_quantity) (e.g. queries 1 and 18),
46 | // the precision for this validation output data must exactly match the query validation data.
47 | "q18": {str, str, str, str, num, str},
48 | "q19": {sum},
49 | "q20": {str, str},
50 | "q21": {str, cnt},
51 | "q22": {num, cnt, sum},
52 | }
53 |
54 | func (w *Workloader) scanQueryResult(queryName string, rows *sql.Rows) error {
55 | var got [][]string
56 |
57 | cols, err := rows.Columns()
58 | if err != nil {
59 | return err
60 | }
61 |
62 | for rows.Next() {
63 | rawResult := make([][]byte, len(cols))
64 | row := make([]string, len(cols))
65 | dest := make([]interface{}, len(cols))
66 |
67 | for i := range rawResult {
68 | dest[i] = &rawResult[i]
69 | }
70 |
71 | if err := rows.Scan(dest...); err != nil {
72 | return fmt.Errorf("scan %s failed %v", queryName, err)
73 | }
74 |
75 | for i, raw := range rawResult {
76 | if raw == nil {
77 | row[i] = "\\N"
78 | } else {
79 | row[i] = string(raw)
80 | }
81 | }
82 | got = append(got, row)
83 | }
84 | if w.cfg.ScaleFactor == 1 && w.cfg.EnableOutputCheck {
85 | return checkOutput(queryColPrecisions[queryName], ans[queryName], got)
86 | }
87 | return nil
88 | }
89 |
90 | func checkOutput(colPrecisions []precision, expect [][]string, got [][]string) (ret error) {
91 | if len(expect) != len(got) {
92 | return fmt.Errorf("expect %d rows, got %d rows", len(expect), len(got))
93 | }
94 | if len(expect) > 0 {
95 | if len(expect[0]) != len(got[0]) {
96 | return fmt.Errorf("expect %d columns, got %d columns", len(expect[0]), len(got[0]))
97 | }
98 | }
99 | for i, row := range got {
100 | for j, column := range row {
101 | expectStr := expect[i][j]
102 | ret = fmt.Errorf("expect %s at row %d column %d, got %s", expectStr, i, j, column)
103 |
104 | // 2.1.3.5
105 | switch colPrecisions[j] {
106 | case cnt:
107 | // For singleton column values and results from COUNT aggregates, the values must exactly match the query
108 | // validation output data.
109 | fallthrough
110 | case num:
111 | fallthrough
112 | case str:
113 | if expectStr != column {
114 | return
115 | }
116 | continue
117 | }
118 |
119 | expectFloat, err := strconv.ParseFloat(expectStr, 64)
120 | if err != nil {
121 | return
122 | }
123 | gotFloat, err := strconv.ParseFloat(column, 64)
124 | if err != nil {
125 | return
126 | }
127 |
128 | switch colPrecisions[j] {
129 | case sum:
130 | // For results from SUM aggregates, the resulting values must be within $100 of the query validation output
131 | // data
132 | if math.Abs(expectFloat-gotFloat) > 100.0 {
133 | return
134 | }
135 | case avg:
136 | // For results from AVG aggregates, the resulting values r must be within 1% of the query validation output
137 | // data when rounded to the nearest 1/100th. That is, 0.99*v<=round(r,2)<=1.01*v.
138 | fallthrough
139 | case rat:
140 | // For ratios, results r must be within 1% of the query validation output data v when rounded to the nearest
141 | // 1/100th. That is, 0.99*v<=round(r,2)<=1.01*v
142 | if math.Abs(math.Round(gotFloat*1000)/1000-math.Round(expectFloat*1000)/1000) > 0.01 {
143 | return
144 | }
145 | default:
146 | panic("unreachable")
147 | }
148 | }
149 | }
150 |
151 | return nil
152 | }
153 |
--------------------------------------------------------------------------------
/tpch/ddl.go:
--------------------------------------------------------------------------------
1 | package tpch
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | )
7 |
8 | var allTables []string
9 |
10 | func init() {
11 | allTables = []string{"lineitem", "partsupp", "supplier", "part", "orders", "customer", "region", "nation"}
12 | }
13 |
14 | func (w *Workloader) createTableDDL(ctx context.Context, query string, tableName string, action string) error {
15 | s := w.getState(ctx)
16 | fmt.Printf("%s %s\n", action, tableName)
17 | if _, err := s.Conn.ExecContext(ctx, query); err != nil {
18 | return err
19 | }
20 | if w.cfg.TiFlashReplica != 0 {
21 | fmt.Printf("creating tiflash replica for %s\n", tableName)
22 | replicaSQL := fmt.Sprintf("ALTER TABLE %s SET TIFLASH REPLICA %d", tableName, w.cfg.TiFlashReplica)
23 | if _, err := s.Conn.ExecContext(ctx, replicaSQL); err != nil {
24 | return err
25 | }
26 | }
27 | return nil
28 | }
29 |
30 | // createTables creates tables schema.
31 | func (w *Workloader) createTables(ctx context.Context) error {
32 | query := `
33 | CREATE TABLE IF NOT EXISTS nation (
34 | N_NATIONKEY BIGINT NOT NULL,
35 | N_NAME CHAR(25) NOT NULL,
36 | N_REGIONKEY BIGINT NOT NULL,
37 | N_COMMENT VARCHAR(152),
38 | PRIMARY KEY (N_NATIONKEY)
39 | )`
40 |
41 | if err := w.createTableDDL(ctx, query, "nation", "creating"); err != nil {
42 | return err
43 | }
44 |
45 | query = `
46 | CREATE TABLE IF NOT EXISTS region (
47 | R_REGIONKEY BIGINT NOT NULL,
48 | R_NAME CHAR(25) NOT NULL,
49 | R_COMMENT VARCHAR(152),
50 | PRIMARY KEY (R_REGIONKEY)
51 | )`
52 | if err := w.createTableDDL(ctx, query, "region", "creating"); err != nil {
53 | return err
54 | }
55 |
56 | query = `
57 | CREATE TABLE IF NOT EXISTS part (
58 | P_PARTKEY BIGINT NOT NULL,
59 | P_NAME VARCHAR(55) NOT NULL,
60 | P_MFGR CHAR(25) NOT NULL,
61 | P_BRAND CHAR(10) NOT NULL,
62 | P_TYPE VARCHAR(25) NOT NULL,
63 | P_SIZE BIGINT NOT NULL,
64 | P_CONTAINER CHAR(10) NOT NULL,
65 | P_RETAILPRICE DECIMAL(15, 2) NOT NULL,
66 | P_COMMENT VARCHAR(23) NOT NULL,
67 | PRIMARY KEY (P_PARTKEY)
68 | )`
69 | if err := w.createTableDDL(ctx, query, "part", "creating"); err != nil {
70 | return err
71 | }
72 |
73 | query = `
74 | CREATE TABLE IF NOT EXISTS supplier (
75 | S_SUPPKEY BIGINT NOT NULL,
76 | S_NAME CHAR(25) NOT NULL,
77 | S_ADDRESS VARCHAR(40) NOT NULL,
78 | S_NATIONKEY BIGINT NOT NULL,
79 | S_PHONE CHAR(15) NOT NULL,
80 | S_ACCTBAL DECIMAL(15, 2) NOT NULL,
81 | S_COMMENT VARCHAR(101) NOT NULL,
82 | PRIMARY KEY (S_SUPPKEY)
83 | )`
84 | if err := w.createTableDDL(ctx, query, "supplier", "creating"); err != nil {
85 | return err
86 | }
87 |
88 | query = `
89 | CREATE TABLE IF NOT EXISTS partsupp (
90 | PS_PARTKEY BIGINT NOT NULL,
91 | PS_SUPPKEY BIGINT NOT NULL,
92 | PS_AVAILQTY BIGINT NOT NULL,
93 | PS_SUPPLYCOST DECIMAL(15, 2) NOT NULL,
94 | PS_COMMENT VARCHAR(199) NOT NULL,
95 | PRIMARY KEY (PS_PARTKEY, PS_SUPPKEY)
96 | )`
97 | if err := w.createTableDDL(ctx, query, "partsupp", "creating"); err != nil {
98 | return err
99 | }
100 |
101 | query = `
102 | CREATE TABLE IF NOT EXISTS customer (
103 | C_CUSTKEY BIGINT NOT NULL,
104 | C_NAME VARCHAR(25) NOT NULL,
105 | C_ADDRESS VARCHAR(40) NOT NULL,
106 | C_NATIONKEY BIGINT NOT NULL,
107 | C_PHONE CHAR(15) NOT NULL,
108 | C_ACCTBAL DECIMAL(15, 2) NOT NULL,
109 | C_MKTSEGMENT CHAR(10) NOT NULL,
110 | C_COMMENT VARCHAR(117) NOT NULL,
111 | PRIMARY KEY (C_CUSTKEY)
112 | )`
113 | if err := w.createTableDDL(ctx, query, "customer", "creating"); err != nil {
114 | return err
115 | }
116 |
117 | query = `
118 | CREATE TABLE IF NOT EXISTS orders (
119 | O_ORDERKEY BIGINT NOT NULL,
120 | O_CUSTKEY BIGINT NOT NULL,
121 | O_ORDERSTATUS CHAR(1) NOT NULL,
122 | O_TOTALPRICE DECIMAL(15, 2) NOT NULL,
123 | O_ORDERDATE DATE NOT NULL,
124 | O_ORDERPRIORITY CHAR(15) NOT NULL,
125 | O_CLERK CHAR(15) NOT NULL,
126 | O_SHIPPRIORITY BIGINT NOT NULL,
127 | O_COMMENT VARCHAR(79) NOT NULL,
128 | PRIMARY KEY (O_ORDERKEY)
129 | )`
130 | if err := w.createTableDDL(ctx, query, "orders", "creating"); err != nil {
131 | return err
132 | }
133 |
134 | query = `
135 | CREATE TABLE IF NOT EXISTS lineitem (
136 | L_ORDERKEY BIGINT NOT NULL,
137 | L_PARTKEY BIGINT NOT NULL,
138 | L_SUPPKEY BIGINT NOT NULL,
139 | L_LINENUMBER BIGINT NOT NULL,
140 | L_QUANTITY DECIMAL(15, 2) NOT NULL,
141 | L_EXTENDEDPRICE DECIMAL(15, 2) NOT NULL,
142 | L_DISCOUNT DECIMAL(15, 2) NOT NULL,
143 | L_TAX DECIMAL(15, 2) NOT NULL,
144 | L_RETURNFLAG CHAR(1) NOT NULL,
145 | L_LINESTATUS CHAR(1) NOT NULL,
146 | L_SHIPDATE DATE NOT NULL,
147 | L_COMMITDATE DATE NOT NULL,
148 | L_RECEIPTDATE DATE NOT NULL,
149 | L_SHIPINSTRUCT CHAR(25) NOT NULL,
150 | L_SHIPMODE CHAR(10) NOT NULL,
151 | L_COMMENT VARCHAR(44) NOT NULL,
152 | PRIMARY KEY (L_ORDERKEY, L_LINENUMBER)
153 | )
154 | `
155 | if err := w.createTableDDL(ctx, query, "lineitem", "creating"); err != nil {
156 | return err
157 | }
158 | return nil
159 | }
160 |
161 | func (w *Workloader) dropTable(ctx context.Context) error {
162 | s := w.getState(ctx)
163 |
164 | for _, tbl := range allTables {
165 | fmt.Printf("DROP TABLE IF EXISTS %s\n", tbl)
166 | if _, err := s.Conn.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", tbl)); err != nil {
167 | return err
168 | }
169 | }
170 | return nil
171 | }
172 |
--------------------------------------------------------------------------------
/tpch/dbgen/order.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | import (
4 | "context"
5 | "io"
6 |
7 | "github.com/pingcap/go-tpc/pkg/sink"
8 | )
9 |
10 | const (
11 | oLcntMin = 1
12 | oLcntMax = 7
13 | oSuppSd = 10
14 | oClrkSd = 11
15 | oOdateSd = 13
16 | oPrioSd = 38
17 | oCkeySd = 40
18 | oLcntSd = 43
19 | oCmntLen = 49
20 | oCmntSd = 12
21 | oClrkScl = 1000
22 |
23 | lQtySd = 14
24 | lDcntSd = 15
25 | lTaxSd = 16
26 | lShipSd = 17
27 | lSmodeSd = 18
28 | lPkeySd = 19
29 | lSkeySd = 20
30 | lSdteSd = 21
31 | lCdteSd = 22
32 | lRdteSd = 23
33 | lRflgSd = 24
34 | lCmntLen = 27
35 | lCmntSd = 25
36 | pennies = 100
37 | ordersPerCust = 10
38 | )
39 |
40 | var (
41 | ockeyMin dssHuge
42 | ockeyMax dssHuge
43 | odateMin dssHuge
44 | odateMax dssHuge
45 | ascDate []string
46 | )
47 |
48 | type Order struct {
49 | OKey dssHuge
50 | CustKey dssHuge
51 | Status string
52 | TotalPrice dssHuge
53 | Date string
54 | OrderPriority string
55 | Clerk string
56 | ShipPriority int64
57 | Comment string
58 | Lines []LineItem
59 | }
60 |
61 | type orderLoader struct {
62 | *sink.CSVSink
63 | }
64 |
65 | func (o orderLoader) Load(item interface{}) error {
66 | order := item.(*Order)
67 | if err := o.WriteRow(context.TODO(),
68 | order.OKey,
69 | order.CustKey,
70 | order.Status,
71 | FmtMoney(order.TotalPrice),
72 | order.Date,
73 | order.OrderPriority,
74 | order.Clerk,
75 | order.ShipPriority,
76 | order.Comment); err != nil {
77 | return err
78 | }
79 | return nil
80 | }
81 |
82 | func (o orderLoader) Flush() error {
83 | return o.CSVSink.Flush(context.TODO())
84 | }
85 |
86 | func NewOrderLoader(w io.Writer) orderLoader {
87 | return orderLoader{sink.NewCSVSinkWithDelimiter(w, '|')}
88 | }
89 |
90 | func sdOrder(child Table, skipCount dssHuge) {
91 | advanceStream(oLcntSd, skipCount, false)
92 | advanceStream(oCkeySd, skipCount, false)
93 | advanceStream(oCmntSd, skipCount*2, false)
94 | advanceStream(oSuppSd, skipCount, false)
95 | advanceStream(oClrkSd, skipCount, false)
96 | advanceStream(oPrioSd, skipCount, false)
97 | advanceStream(oOdateSd, skipCount, false)
98 | }
99 |
100 | func makeOrder(idx dssHuge) *Order {
101 | delta := 1
102 | order := &Order{}
103 | order.OKey = makeSparse(idx)
104 | if scale >= 30000 {
105 | order.CustKey = random64(ockeyMin, ockeyMax, oCkeySd)
106 | } else {
107 | order.CustKey = random(ockeyMin, ockeyMax, oCkeySd)
108 | }
109 |
110 | // Comment: Orders are not present for all customers.
111 | // In fact, one-third of the customers do not have any order in the database.
112 | // The orders are assigned at random to two-thirds of the customers
113 | for order.CustKey%3 == 0 {
114 | order.CustKey += dssHuge(delta)
115 | order.CustKey = min(order.CustKey, ockeyMax)
116 | delta *= -1
117 | }
118 | tmpDate := random(odateMin, odateMax, oOdateSd)
119 | order.Date = ascDate[tmpDate-startDate]
120 | pickStr(&oPrioritySet, oPrioSd, &order.OrderPriority)
121 | order.Clerk = pickClerk()
122 | order.Comment = makeText(oCmntLen, oCmntSd)
123 | order.ShipPriority = 0
124 | order.TotalPrice = 0
125 | order.Status = "O"
126 | oCnt := 0
127 | lineCount := random(oLcntMin, oLcntMax, oLcntSd)
128 |
129 | for lCnt := dssHuge(0); lCnt < lineCount; lCnt++ {
130 | line := LineItem{}
131 | line.OKey = order.OKey
132 | line.LCnt = lCnt + 1
133 | line.Quantity = random(lQtyMin, lQtyMax, lQtySd)
134 | line.Discount = random(lDcntMin, lDcntMax, lDcntSd)
135 | line.Tax = random(lTaxMin, lTaxMax, lTaxSd)
136 |
137 | pickStr(&lInstructSet, lShipSd, &line.ShipInstruct)
138 | pickStr(&lSmodeSet, lSmodeSd, &line.ShipMode)
139 | line.Comment = makeText(lCmntLen, lCmntSd)
140 |
141 | if scale > 30000 {
142 | line.PartKey = random64(lPkeyMin, LPkeyMax, lPkeySd)
143 | } else {
144 | line.PartKey = random(lPkeyMin, LPkeyMax, lPkeySd)
145 | }
146 |
147 | rPrice := rpbRoutine(line.PartKey)
148 | suppNum := random(0, 3, lSkeySd)
149 | line.SuppKey = partSuppBridge(line.PartKey, suppNum)
150 | line.EPrice = rPrice * line.Quantity
151 |
152 | order.TotalPrice += ((line.EPrice * (100 - line.Discount)) / pennies) *
153 | (100 + line.Tax) / pennies
154 |
155 | sDate := random(lSdteMin, lSdteMax, lSdteSd)
156 | sDate += tmpDate
157 |
158 | cDate := random(lCdteMin, lCdteMax, lCdteSd)
159 | cDate += tmpDate
160 |
161 | rDate := random(lRdteMin, lRdteMax, lRdteSd)
162 | rDate += sDate
163 | line.SDate = ascDate[sDate-startDate]
164 | line.CDate = ascDate[cDate-startDate]
165 | line.RDate = ascDate[rDate-startDate]
166 |
167 | if julian(int(rDate)) <= currentDate {
168 | var tmpStr string
169 | pickStr(&lRflagSet, lRflgSd, &tmpStr)
170 | line.RFlag = tmpStr[0:1]
171 | } else {
172 | line.RFlag = "N"
173 | }
174 |
175 | if julian(int(sDate)) <= currentDate {
176 | oCnt++
177 | line.LStatus = "F"
178 | } else {
179 | line.LStatus = "O"
180 | }
181 |
182 | order.Lines = append(order.Lines, line)
183 | }
184 | if oCnt > 0 {
185 | order.Status = "P"
186 | }
187 | if oCnt == len(order.Lines) {
188 | order.Status = "F"
189 | }
190 |
191 | return order
192 | }
193 |
194 | func initOrder() {
195 | ockeyMin = 1
196 | ockeyMax = tDefs[TCust].base * scale
197 | ascDate = makeAscDate()
198 | odateMin = startDate
199 | odateMax = startDate + totDate - (lSdteMax + lRdteMax) - 1
200 | }
201 |
--------------------------------------------------------------------------------
/tpch/output/q9.out:
--------------------------------------------------------------------------------
1 | nation|o_year|sum_profit
2 | ALGERIA|1998|30411405.2909
3 | ALGERIA|1997|50920199.8042
4 | ALGERIA|1996|49039923.0686
5 | ALGERIA|1995|51884030.2886
6 | ALGERIA|1994|48375614.9251
7 | ALGERIA|1993|47093993.4593
8 | ALGERIA|1992|54189272.7819
9 | ARGENTINA|1998|28117946.0073
10 | ARGENTINA|1997|47274792.7901
11 | ARGENTINA|1996|46827577.7003
12 | ARGENTINA|1995|49483021.5026
13 | ARGENTINA|1994|48382755.5697
14 | ARGENTINA|1993|46818287.7908
15 | ARGENTINA|1992|48128704.6694
16 | BRAZIL|1998|28061196.1327
17 | BRAZIL|1997|46100279.7551
18 | BRAZIL|1996|48491505.5492
19 | BRAZIL|1995|45010513.9922
20 | BRAZIL|1994|45670109.6911
21 | BRAZIL|1993|46887926.0256
22 | BRAZIL|1992|46214013.1314
23 | CANADA|1998|28042036.3354
24 | CANADA|1997|49647958.9157
25 | CANADA|1996|50880780.3269
26 | CANADA|1995|48622679.3229
27 | CANADA|1994|46942190.3085
28 | CANADA|1993|49895113.2787
29 | CANADA|1992|48135100.1622
30 | CHINA|1998|28326737.5405
31 | CHINA|1997|46930101.9867
32 | CHINA|1996|46611136.9511
33 | CHINA|1995|46869592.4939
34 | CHINA|1994|46583766.1824
35 | CHINA|1993|45788331.4336
36 | CHINA|1992|46172326.2572
37 | EGYPT|1998|28441754.1016
38 | EGYPT|1997|48995146.0915
39 | EGYPT|1996|47883450.0547
40 | EGYPT|1995|49866046.2162
41 | EGYPT|1994|48405461.0665
42 | EGYPT|1993|50339405.0996
43 | EGYPT|1992|48496544.1132
44 | ETHIOPIA|1998|26370687.0763
45 | ETHIOPIA|1997|46030773.8394
46 | ETHIOPIA|1996|44253006.3398
47 | ETHIOPIA|1995|45357002.7973
48 | ETHIOPIA|1994|46568596.3615
49 | ETHIOPIA|1993|45196075.0466
50 | ETHIOPIA|1992|44453685.3079
51 | FRANCE|1998|25212152.1322
52 | FRANCE|1997|45103832.3389
53 | FRANCE|1996|43816903.3292
54 | FRANCE|1995|43997808.6208
55 | FRANCE|1994|45505537.4793
56 | FRANCE|1993|43981576.0657
57 | FRANCE|1992|42743391.2255
58 | GERMANY|1998|28546155.8431
59 | GERMANY|1997|49412975.2986
60 | GERMANY|1996|49050322.7571
61 | GERMANY|1995|50491789.9335
62 | GERMANY|1994|46286075.5502
63 | GERMANY|1993|46644120.8157
64 | GERMANY|1992|49476315.1308
65 | INDIA|1998|27447654.2631
66 | INDIA|1997|46505226.0482
67 | INDIA|1996|46433724.3849
68 | INDIA|1995|46971164.0624
69 | INDIA|1994|46395680.2986
70 | INDIA|1993|45607622.4012
71 | INDIA|1992|44634573.5553
72 | INDONESIA|1998|28927829.4979
73 | INDONESIA|1997|49671081.5152
74 | INDONESIA|1996|50249744.2153
75 | INDONESIA|1995|48658239.8559
76 | INDONESIA|1994|48663344.3550
77 | INDONESIA|1993|48402000.9330
78 | INDONESIA|1992|47550789.6593
79 | IRAN|1998|26867024.6090
80 | IRAN|1997|42199725.6375
81 | IRAN|1996|49299597.7210
82 | IRAN|1995|46636820.0794
83 | IRAN|1994|47660772.7274
84 | IRAN|1993|43899016.9055
85 | IRAN|1992|42879751.3387
86 | IRAQ|1998|29623280.0569
87 | IRAQ|1997|50724554.2913
88 | IRAQ|1996|48593456.7214
89 | IRAQ|1995|51615752.5364
90 | IRAQ|1994|49581010.8548
91 | IRAQ|1993|49573214.1192
92 | IRAQ|1992|51463079.1282
93 | JAPAN|1998|23193427.3718
94 | JAPAN|1997|43239656.3904
95 | JAPAN|1996|43030305.2158
96 | JAPAN|1995|44605584.2926
97 | JAPAN|1994|43070665.8748
98 | JAPAN|1993|43456587.9263
99 | JAPAN|1992|41908504.9588
100 | JORDAN|1998|25496977.7040
101 | JORDAN|1997|42468386.2927
102 | JORDAN|1996|43467160.8761
103 | JORDAN|1995|39824935.8308
104 | JORDAN|1994|41834677.7222
105 | JORDAN|1993|43316895.8224
106 | JORDAN|1992|43204460.3602
107 | KENYA|1998|26401818.6783
108 | KENYA|1997|43461638.4777
109 | KENYA|1996|42233389.3355
110 | KENYA|1995|45234185.6640
111 | KENYA|1994|43000519.3725
112 | KENYA|1993|42957341.8955
113 | KENYA|1992|44464888.5046
114 | MOROCCO|1998|26996931.7180
115 | MOROCCO|1997|46459640.8289
116 | MOROCCO|1996|43154531.9307
117 | MOROCCO|1995|48378387.6744
118 | MOROCCO|1994|43871290.6096
119 | MOROCCO|1993|46678534.6352
120 | MOROCCO|1992|44919453.6860
121 | MOZAMBIQUE|1998|31048586.5760
122 | MOZAMBIQUE|1997|52360539.6074
123 | MOZAMBIQUE|1996|50832568.1480
124 | MOZAMBIQUE|1995|52085430.1542
125 | MOZAMBIQUE|1994|52490635.1191
126 | MOZAMBIQUE|1993|49590971.2331
127 | MOZAMBIQUE|1992|51486367.6753
128 | PERU|1998|28877192.6049
129 | PERU|1997|46270525.9704
130 | PERU|1996|47902499.2576
131 | PERU|1995|46639332.0945
132 | PERU|1994|46843504.3639
133 | PERU|1993|49682400.5684
134 | PERU|1992|46524818.1461
135 | ROMANIA|1998|28384750.8220
136 | ROMANIA|1997|42347930.5059
137 | ROMANIA|1996|48293899.8559
138 | ROMANIA|1995|48736999.0932
139 | ROMANIA|1994|45841889.9648
140 | ROMANIA|1993|44704680.4615
141 | ROMANIA|1992|46955146.8349
142 | RUSSIA|1998|29799258.6562
143 | RUSSIA|1997|47387251.8098
144 | RUSSIA|1996|43690543.2927
145 | RUSSIA|1995|47996400.0451
146 | RUSSIA|1994|49363517.8123
147 | RUSSIA|1993|46622068.5866
148 | RUSSIA|1992|47980955.3738
149 | SAUDI ARABIA|1998|27262825.0196
150 | SAUDI ARABIA|1997|46172963.5616
151 | SAUDI ARABIA|1996|48980465.8828
152 | SAUDI ARABIA|1995|48886049.9633
153 | SAUDI ARABIA|1994|49415728.2778
154 | SAUDI ARABIA|1993|46827296.7773
155 | SAUDI ARABIA|1992|48383481.2893
156 | UNITED KINGDOM|1998|26994101.5471
157 | UNITED KINGDOM|1997|46468024.0865
158 | UNITED KINGDOM|1996|45843437.5804
159 | UNITED KINGDOM|1995|48756111.0554
160 | UNITED KINGDOM|1994|47981811.5416
161 | UNITED KINGDOM|1993|47712028.5537
162 | UNITED KINGDOM|1992|46965863.4021
163 | UNITED STATES|1998|27087511.9971
164 | UNITED STATES|1997|47932046.1443
165 | UNITED STATES|1996|47281076.9954
166 | UNITED STATES|1995|48170715.2533
167 | UNITED STATES|1994|48171361.0151
168 | UNITED STATES|1993|49211733.3695
169 | UNITED STATES|1992|45930064.1889
170 | VIETNAM|1998|27940943.4298
171 | VIETNAM|1997|47619898.2300
172 | VIETNAM|1996|49276741.1515
173 | VIETNAM|1995|45133943.1397
174 | VIETNAM|1994|44912311.8521
175 | VIETNAM|1993|49605403.2867
176 | VIETNAM|1992|48921345.2469
177 |
--------------------------------------------------------------------------------
/tpch/dbgen/misc.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "strings"
7 | "time"
8 | )
9 |
10 | const (
11 | startDate = 92001
12 | currentDate = 95168
13 | totDate = 2557
14 | textPoolSize = 300 * 1024 * 1024
15 | )
16 |
17 | var szTextPool []byte
18 |
19 | func makeAscDate() []string {
20 | var res []string
21 | date := time.Date(1992, 1, 1, 0, 0, 0, 0, time.UTC)
22 | for i := 0; i < totDate; i++ {
23 | newDate := date.AddDate(0, 0, i)
24 | ascDate := fmt.Sprintf("%4d-%02d-%02d", newDate.Year(), newDate.Month(), newDate.Day())
25 | res = append(res, ascDate)
26 | }
27 | return res
28 | }
29 |
30 | func makeSparse(idx dssHuge) dssHuge {
31 | return ((((idx >> 3) << 2) | (0 & 0x0003)) << 3) | (idx & 0x0007)
32 | }
33 |
34 | func pickStr(dist *distribution, c int, target *string) (pos int) {
35 | j := long(random(1, dssHuge(dist.members[len(dist.members)-1].weight), long(c)))
36 | for pos = 0; dist.members[pos].weight < j; pos++ {
37 | }
38 | *target = dist.members[pos].text
39 | return
40 | }
41 |
42 | func pickClerk() string {
43 | clkNum := random(1, max(scale*oClrkScl, oClrkScl), oClrkSd)
44 | return fmt.Sprintf("Clerk#%09d", clkNum)
45 | }
46 |
47 | func txtVp(sd int) string {
48 | var src *distribution
49 | var syntax string
50 | var buf bytes.Buffer
51 | pickStr(&vp, sd, &syntax)
52 |
53 | for _, item := range strings.Split(syntax, " ") {
54 | switch item[0] {
55 | case 'D':
56 | src = &adverbs
57 | case 'V':
58 | src = &verbs
59 | case 'X':
60 | src = &auxillaries
61 | default:
62 | panic("unreachable")
63 | }
64 | var tmp string
65 | pickStr(src, sd, &tmp)
66 | buf.WriteString(tmp)
67 | if len(item) > 1 {
68 | buf.Write([]byte{item[1]})
69 | }
70 |
71 | buf.WriteString(" ")
72 | }
73 |
74 | return buf.String()
75 | }
76 |
77 | func txtNp(sd int) string {
78 | var src *distribution
79 | var syntax string
80 | var buf bytes.Buffer
81 | pickStr(&np, sd, &syntax)
82 |
83 | for _, item := range strings.Split(syntax, " ") {
84 | switch item[0] {
85 | case 'A':
86 | src = &articles
87 | case 'J':
88 | src = &adjectives
89 | case 'D':
90 | src = &adverbs
91 | case 'N':
92 | src = &nouns
93 | default:
94 | panic("unreachable")
95 | }
96 | var tmp string
97 | pickStr(src, sd, &tmp)
98 | buf.WriteString(tmp)
99 | if len(item) > 1 {
100 | buf.Write([]byte{item[1]})
101 | }
102 | buf.WriteString(" ")
103 | }
104 |
105 | return buf.String()
106 | }
107 |
108 | func txtSentence(sd int) string {
109 | var syntax string
110 | var buf bytes.Buffer
111 | pickStr(&grammar, sd, &syntax)
112 |
113 | for _, item := range strings.Split(syntax, " ") {
114 | switch item[0] {
115 | case 'V':
116 | buf.WriteString(txtVp(sd))
117 | case 'N':
118 | buf.WriteString(txtNp(sd))
119 | case 'P':
120 | var tmp string
121 | pickStr(&prepositions, sd, &tmp)
122 | buf.WriteString(tmp)
123 | buf.WriteString(" the ")
124 | buf.WriteString(txtNp(sd))
125 | case 'T':
126 | sentence := buf.String()
127 | sentence = sentence[0 : len(sentence)-1]
128 | buf.Reset()
129 | buf.WriteString(sentence)
130 |
131 | var tmp string
132 | pickStr(&terminators, sd, &tmp)
133 | buf.WriteString(tmp)
134 | default:
135 | panic("unreachable")
136 | }
137 | if len(item) > 1 {
138 | buf.Write([]byte{item[1]})
139 | }
140 | }
141 | return buf.String()
142 | }
143 |
144 | func makeText(avg, sd int) string {
145 | min := int(float64(avg) * vStrLow)
146 | max := int(float64(avg) * vStrHgh)
147 |
148 | hgOffset := random(0, dssHuge(textPoolSize-max), long(sd))
149 | hgLength := random(dssHuge(min), dssHuge(max), long(sd))
150 |
151 | return string(szTextPool[hgOffset : hgOffset+hgLength])
152 | }
153 |
154 | func aggStr(set *distribution, count, col long) string {
155 | var buf bytes.Buffer
156 | permuteDist(set, col)
157 |
158 | for i := long(0); i < count; i++ {
159 | buf.WriteString(set.members[set.permute[i]].text)
160 | buf.WriteString(" ")
161 | }
162 |
163 | tmp := buf.String()
164 | return tmp[:len(tmp)-1]
165 | }
166 |
167 | func rpbRoutine(p dssHuge) dssHuge {
168 | price := dssHuge(90000)
169 | price += (p / 10) % 20001
170 | price += (p % 1000) * 100
171 | return price
172 | }
173 |
174 | func min(a, b dssHuge) dssHuge {
175 | if a < b {
176 | return a
177 | }
178 | return b
179 | }
180 | func max(a, b dssHuge) dssHuge {
181 | if a > b {
182 | return a
183 | }
184 | return b
185 | }
186 |
187 | func yeap(year int) int {
188 | if (year%4 == 0) && (year%100 != 0) {
189 | return 1
190 | }
191 | return 0
192 | }
193 |
194 | func julian(date int) int {
195 | offset := date - startDate
196 | result := startDate
197 |
198 | for true {
199 | yr := result / 1000
200 | yend := yr*1000 + 365 + yeap(yr)
201 |
202 | if result+offset > yend {
203 | offset -= yend - result + 1
204 | result += 1000
205 | continue
206 | } else {
207 | break
208 | }
209 | }
210 | return result + offset
211 | }
212 |
213 | func FmtMoney(m dssHuge) string {
214 | sign := ""
215 | if m < 0 {
216 | sign = "-"
217 | m = -m
218 | }
219 | return fmt.Sprintf("%s%d.%02d", sign, m/100, m%100)
220 | }
221 |
222 | func sdNull(_ Table, _ dssHuge) {
223 | }
224 |
225 | func initTextPool() {
226 | var buffer bytes.Buffer
227 |
228 | for buffer.Len() < textPoolSize {
229 | sentence := txtSentence(5)
230 | len := len(sentence)
231 |
232 | needed := textPoolSize - buffer.Len()
233 | if needed >= len+1 {
234 | buffer.WriteString(sentence + " ")
235 | } else {
236 | buffer.WriteString(sentence[0:needed])
237 | }
238 | }
239 |
240 | szTextPool = buffer.Bytes()
241 | }
242 |
--------------------------------------------------------------------------------
/tpcc/delivery.go:
--------------------------------------------------------------------------------
1 | package tpcc
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | "fmt"
7 | "time"
8 | )
9 |
10 | type deliveryData struct {
11 | wID int
12 | oCarrierID int
13 | olDeliveryD string
14 | }
15 |
16 | const (
17 | deliverySelectNewOrder = "SELECT no_o_id FROM new_order WHERE no_w_id = ? AND no_d_id = ? ORDER BY no_o_id ASC LIMIT 1 FOR UPDATE"
18 | deliveryDeleteNewOrder = `DELETE FROM new_order WHERE (no_w_id, no_d_id, no_o_id) IN (
19 | (?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?)
20 | )`
21 | deliveryUpdateOrder = `UPDATE orders SET o_carrier_id = ? WHERE (o_w_id, o_d_id, o_id) IN (
22 | (?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?)
23 | )`
24 | deliverySelectOrders = `SELECT o_d_id, o_c_id FROM orders WHERE (o_w_id, o_d_id, o_id) IN (
25 | (?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?)
26 | )`
27 | deliveryUpdateOrderLine = `UPDATE order_line SET ol_delivery_d = ? WHERE (ol_w_id, ol_d_id, ol_o_id) IN (
28 | (?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?)
29 | )`
30 | deliverySelectSumAmount = `SELECT ol_d_id, SUM(ol_amount) FROM order_line WHERE (ol_w_id, ol_d_id, ol_o_id) IN (
31 | (?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?),(?,?,?)
32 | ) GROUP BY ol_d_id`
33 | deliveryUpdateCustomer = `UPDATE customer SET c_balance = c_balance + ?, c_delivery_cnt = c_delivery_cnt + 1 WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?`
34 | )
35 |
36 | func (w *Workloader) runDelivery(ctx context.Context, thread int) error {
37 | s := getTPCCState(ctx)
38 |
39 | d := deliveryData{
40 | wID: randInt(s.R, 1, w.cfg.Warehouses),
41 | oCarrierID: randInt(s.R, 1, 10),
42 | }
43 |
44 | tx, err := w.beginTx(ctx)
45 | if err != nil {
46 | return err
47 | }
48 | defer tx.Rollback()
49 | type deliveryOrder struct {
50 | oID int
51 | cID int
52 | amount float64
53 | }
54 | orders := make([]deliveryOrder, 10)
55 | for i := 0; i < districtPerWarehouse; i++ {
56 | if err = s.deliveryStmts[deliverySelectNewOrder].QueryRowContext(ctx, d.wID, i+1).Scan(&orders[i].oID); err == sql.ErrNoRows {
57 | continue
58 | } else if err != nil {
59 | return fmt.Errorf("exec %s failed %v", deliverySelectNewOrder, err)
60 | }
61 | }
62 |
63 | if _, err = s.deliveryStmts[deliveryDeleteNewOrder].ExecContext(ctx,
64 | d.wID, 1, orders[0].oID,
65 | d.wID, 2, orders[1].oID,
66 | d.wID, 3, orders[2].oID,
67 | d.wID, 4, orders[3].oID,
68 | d.wID, 5, orders[4].oID,
69 | d.wID, 6, orders[5].oID,
70 | d.wID, 7, orders[6].oID,
71 | d.wID, 8, orders[7].oID,
72 | d.wID, 9, orders[8].oID,
73 | d.wID, 10, orders[9].oID,
74 | ); err != nil {
75 | return fmt.Errorf("exec %s failed %v", deliveryDeleteNewOrder, err)
76 | }
77 |
78 | if _, err = s.deliveryStmts[deliveryUpdateOrder].ExecContext(ctx, d.oCarrierID,
79 | d.wID, 1, orders[0].oID,
80 | d.wID, 2, orders[1].oID,
81 | d.wID, 3, orders[2].oID,
82 | d.wID, 4, orders[3].oID,
83 | d.wID, 5, orders[4].oID,
84 | d.wID, 6, orders[5].oID,
85 | d.wID, 7, orders[6].oID,
86 | d.wID, 8, orders[7].oID,
87 | d.wID, 9, orders[8].oID,
88 | d.wID, 10, orders[9].oID,
89 | ); err != nil {
90 | return fmt.Errorf("exec %s failed %v", deliveryUpdateOrder, err)
91 | }
92 |
93 | if rows, err := s.deliveryStmts[deliverySelectOrders].QueryContext(ctx,
94 | d.wID, 1, orders[0].oID,
95 | d.wID, 2, orders[1].oID,
96 | d.wID, 3, orders[2].oID,
97 | d.wID, 4, orders[3].oID,
98 | d.wID, 5, orders[4].oID,
99 | d.wID, 6, orders[5].oID,
100 | d.wID, 7, orders[6].oID,
101 | d.wID, 8, orders[7].oID,
102 | d.wID, 9, orders[8].oID,
103 | d.wID, 10, orders[9].oID,
104 | ); err != nil {
105 | return fmt.Errorf("exec %s failed %v", deliverySelectOrders, err)
106 | } else {
107 | for rows.Next() {
108 | var dID, cID int
109 | if err = rows.Scan(&dID, &cID); err != nil {
110 | return fmt.Errorf("exec %s failed %v", deliverySelectOrders, err)
111 | }
112 | orders[dID-1].cID = cID
113 | }
114 | }
115 |
116 | if _, err = s.deliveryStmts[deliveryUpdateOrderLine].ExecContext(ctx, time.Now().Format(timeFormat),
117 | d.wID, 1, orders[0].oID,
118 | d.wID, 2, orders[1].oID,
119 | d.wID, 3, orders[2].oID,
120 | d.wID, 4, orders[3].oID,
121 | d.wID, 5, orders[4].oID,
122 | d.wID, 6, orders[5].oID,
123 | d.wID, 7, orders[6].oID,
124 | d.wID, 8, orders[7].oID,
125 | d.wID, 9, orders[8].oID,
126 | d.wID, 10, orders[9].oID,
127 | ); err != nil {
128 | return fmt.Errorf("exec %s failed %v", deliveryUpdateOrderLine, err)
129 | }
130 |
131 | if rows, err := s.deliveryStmts[deliverySelectSumAmount].QueryContext(ctx,
132 | d.wID, 1, orders[0].oID,
133 | d.wID, 2, orders[1].oID,
134 | d.wID, 3, orders[2].oID,
135 | d.wID, 4, orders[3].oID,
136 | d.wID, 5, orders[4].oID,
137 | d.wID, 6, orders[5].oID,
138 | d.wID, 7, orders[6].oID,
139 | d.wID, 8, orders[7].oID,
140 | d.wID, 9, orders[8].oID,
141 | d.wID, 10, orders[9].oID,
142 | ); err != nil {
143 | return fmt.Errorf("exec %s failed %v", deliverySelectSumAmount, err)
144 | } else {
145 | for rows.Next() {
146 | var dID int
147 | var amount float64
148 | if err = rows.Scan(&dID, &amount); err != nil {
149 | return fmt.Errorf("exec %s failed %v", deliverySelectOrders, err)
150 | }
151 | orders[dID-1].amount = amount
152 | }
153 | }
154 |
155 | for i := 0; i < districtPerWarehouse; i++ {
156 | order := &orders[i]
157 | if order.oID == 0 {
158 | continue
159 | }
160 | if _, err = s.deliveryStmts[deliveryUpdateCustomer].ExecContext(ctx, order.amount, d.wID, i+1, order.cID); err != nil {
161 | return fmt.Errorf("exec %s failed %v", deliveryUpdateCustomer, err)
162 | }
163 | }
164 | return tx.Commit()
165 | }
166 |
--------------------------------------------------------------------------------
/cmd/go-tpc/tpcc.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "net/http"
7 | _ "net/http/pprof"
8 | "os"
9 | "runtime"
10 | "time"
11 |
12 | "github.com/pingcap/go-tpc/pkg/measurement"
13 | "github.com/pingcap/go-tpc/pkg/workload"
14 | "github.com/pingcap/go-tpc/tpcc"
15 | "github.com/prometheus/client_golang/prometheus/promhttp"
16 | "github.com/spf13/cobra"
17 | )
18 |
19 | var tpccConfig tpcc.Config
20 |
21 | func executeTpcc(action string) {
22 | if pprofAddr != "" {
23 | go func() {
24 | if err := http.ListenAndServe(pprofAddr, http.DefaultServeMux); err != nil {
25 | fmt.Printf("Failed to listen pprofAddr: %v\n", err)
26 | os.Exit(1)
27 | }
28 | }()
29 | }
30 | if metricsAddr != "" {
31 | go func() {
32 | s := http.Server{
33 | Addr: metricsAddr,
34 | Handler: promhttp.Handler(),
35 | }
36 | if err := s.ListenAndServe(); err != nil {
37 | fmt.Printf("Failed to listen metricsAddr: %v\n", err)
38 | os.Exit(1)
39 | }
40 | }()
41 | }
42 | if maxProcs != 0 {
43 | runtime.GOMAXPROCS(maxProcs)
44 | }
45 |
46 | openDB()
47 | defer closeDB()
48 |
49 | tpccConfig.OutputStyle = outputStyle
50 | tpccConfig.Driver = driver
51 | tpccConfig.DBName = dbName
52 | tpccConfig.Threads = threads
53 | tpccConfig.Isolation = isolationLevel
54 | var (
55 | w workload.Workloader
56 | err error
57 | )
58 | switch tpccConfig.OutputType {
59 | case "csv", "CSV":
60 | if tpccConfig.OutputDir == "" {
61 | fmt.Printf("Output Directory cannot be empty when generating files")
62 | os.Exit(1)
63 | }
64 | w, err = tpcc.NewCSVWorkloader(globalDB, &tpccConfig)
65 | default:
66 | // Set a reasonable connection max lifetime when auto-refresh is enabled
67 | // This ensures connections are actually closed and not just returned to pool
68 | if tpccConfig.ConnRefreshInterval > 0 {
69 | globalDB.SetConnMaxLifetime(tpccConfig.ConnRefreshInterval)
70 | fmt.Printf("Auto-setting connection max lifetime to %v (refresh interval)\n", tpccConfig.ConnRefreshInterval)
71 | }
72 |
73 | w, err = tpcc.NewWorkloader(globalDB, &tpccConfig)
74 | }
75 |
76 | if err != nil {
77 | fmt.Printf("Failed to init work loader: %v\n", err)
78 | os.Exit(1)
79 | }
80 |
81 | timeoutCtx, cancel := context.WithTimeout(globalCtx, totalTime)
82 | defer cancel()
83 |
84 | executeWorkload(timeoutCtx, w, threads, action)
85 |
86 | fmt.Println("Finished")
87 | w.OutputStats(true)
88 | }
89 |
90 | func registerTpcc(root *cobra.Command) {
91 | cmd := &cobra.Command{
92 | Use: "tpcc",
93 | }
94 |
95 | cmd.PersistentFlags().IntVar(&tpccConfig.Parts, "parts", 1, "Number to partition warehouses")
96 | cmd.PersistentFlags().IntVar(&tpccConfig.PartitionType, "partition-type", 1, "Partition type (1 - HASH, 2 - RANGE, 3 - LIST (like HASH), 4 - LIST (like RANGE)")
97 | cmd.PersistentFlags().IntVar(&tpccConfig.Warehouses, "warehouses", 10, "Number of warehouses")
98 | cmd.PersistentFlags().BoolVar(&tpccConfig.CheckAll, "check-all", false, "Run all consistency checks")
99 | var cmdPrepare = &cobra.Command{
100 | Use: "prepare",
101 | Short: "Prepare data for TPCC",
102 | Run: func(cmd *cobra.Command, _ []string) {
103 | executeTpcc("prepare")
104 | },
105 | }
106 | cmdPrepare.PersistentFlags().BoolVar(&tpccConfig.NoCheck, "no-check", false, "TPCC prepare check, default false")
107 | cmdPrepare.PersistentFlags().BoolVar(&tpccConfig.UseFK, "use-fk", false, "TPCC using foreign key, default false")
108 | cmdPrepare.PersistentFlags().BoolVar(&tpccConfig.UseClusteredIndex, "use-clustered-index", true, "TPCC use clustered index, default true")
109 | cmdPrepare.PersistentFlags().StringVar(&tpccConfig.OutputType, "output-type", "", "Output file type."+
110 | " If empty, then load data to db. Current only support csv")
111 | cmdPrepare.PersistentFlags().StringVar(&tpccConfig.OutputDir, "output-dir", "", "Output directory for generating file if specified")
112 | cmdPrepare.PersistentFlags().StringVar(&tpccConfig.SpecifiedTables, "tables", "", "Specified tables for "+
113 | "generating file, separated by ','. Valid only if output is set. If this flag is not set, generate all tables by default")
114 | cmdPrepare.PersistentFlags().IntVar(&tpccConfig.PrepareRetryCount, "retry-count", 50, "Retry count when errors occur")
115 | cmdPrepare.PersistentFlags().DurationVar(&tpccConfig.PrepareRetryInterval, "retry-interval", 10*time.Second, "The interval for each retry")
116 |
117 | var cmdRun = &cobra.Command{
118 | Use: "run",
119 | Short: "Run workload",
120 | Run: func(cmd *cobra.Command, _ []string) {
121 | executeTpcc("run")
122 | },
123 | }
124 | cmdRun.PersistentFlags().BoolVar(&tpccConfig.Wait, "wait", false, "including keying & thinking time described on TPC-C Standard Specification")
125 | cmdRun.PersistentFlags().DurationVar(&tpccConfig.MaxMeasureLatency, "max-measure-latency", measurement.DefaultMaxLatency, "max measure latency in millisecond")
126 | cmdRun.PersistentFlags().IntSliceVar(&tpccConfig.Weight, "weight", []int{45, 43, 4, 4, 4}, "Weight for NewOrder, Payment, OrderStatus, Delivery, StockLevel")
127 | cmdRun.Flags().DurationVar(&tpccConfig.ConnRefreshInterval, "conn-refresh-interval", 0, "automatically refresh database connections at specified intervals to balance traffic across new replicas (0 = disabled, e.g., 10s)")
128 |
129 | var cmdCleanup = &cobra.Command{
130 | Use: "cleanup",
131 | Short: "Cleanup data for the workload",
132 | Run: func(cmd *cobra.Command, _ []string) {
133 | executeTpcc("cleanup")
134 | },
135 | }
136 |
137 | var cmdCheck = &cobra.Command{
138 | Use: "check",
139 | Short: "Check data consistency for the workload",
140 | Run: func(cmd *cobra.Command, _ []string) {
141 | executeTpcc("check")
142 | },
143 | }
144 |
145 | cmd.AddCommand(cmdRun, cmdPrepare, cmdCleanup, cmdCheck)
146 |
147 | root.AddCommand(cmd)
148 | }
149 |
--------------------------------------------------------------------------------
/cmd/go-tpc/misc.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "sync"
7 | "time"
8 |
9 | "github.com/pingcap/go-tpc/pkg/workload"
10 | )
11 |
12 | func checkPrepare(ctx context.Context, w workload.Workloader) {
13 | // skip preparation check in csv case
14 | if w.Name() == "tpcc-csv" {
15 | fmt.Println("Skip preparing checking. Please load CSV data into database and check later.")
16 | return
17 | }
18 | if w.Name() == "tpcc" && tpccConfig.NoCheck {
19 | return
20 | }
21 |
22 | var wg sync.WaitGroup
23 | wg.Add(threads)
24 | for i := 0; i < threads; i++ {
25 | go func(index int) {
26 | defer wg.Done()
27 |
28 | ctx = w.InitThread(ctx, index)
29 | defer w.CleanupThread(ctx, index)
30 |
31 | if err := w.CheckPrepare(ctx, index); err != nil {
32 | fmt.Printf("check prepare failed, err %v\n", err)
33 | return
34 | }
35 | }(i)
36 | }
37 | wg.Wait()
38 | }
39 |
40 | func execute(timeoutCtx context.Context, w workload.Workloader, action string, threads, index int) error {
41 | count := totalCount / threads
42 |
43 | // For prepare, cleanup and check operations, use background context to avoid timeout constraints
44 | // Only run phases should be limited by timeout
45 | var ctx context.Context
46 | if action == "prepare" || action == "cleanup" || action == "check" {
47 | ctx = w.InitThread(context.Background(), index)
48 | } else {
49 | ctx = w.InitThread(timeoutCtx, index)
50 | }
51 | defer w.CleanupThread(ctx, index)
52 |
53 | switch action {
54 | case "prepare":
55 | // Do cleanup only if dropData is set and not generate csv data.
56 | if dropData {
57 | if err := w.Cleanup(ctx, index); err != nil {
58 | return err
59 | }
60 | }
61 | return w.Prepare(ctx, index)
62 | case "cleanup":
63 | return w.Cleanup(ctx, index)
64 | case "check":
65 | return w.Check(ctx, index)
66 | }
67 |
68 | // This loop is only reached for "run" action since other actions return earlier
69 | for i := 0; i < count || count <= 0; i++ {
70 | // Check if timeout has occurred before starting next query
71 | select {
72 | case <-ctx.Done():
73 | if !silence {
74 | fmt.Printf("[%s] %s worker %d stopped due to timeout after %d iterations\n",
75 | time.Now().Format("2006-01-02 15:04:05"), action, index, i)
76 | }
77 | return nil
78 | default:
79 | }
80 |
81 | err := w.Run(ctx, index)
82 | if err != nil {
83 | // Check if the error is due to timeout/cancellation
84 | if ctx.Err() != nil {
85 | if !silence {
86 | fmt.Printf("[%s] %s worker %d stopped due to timeout: %v\n",
87 | time.Now().Format("2006-01-02 15:04:05"), action, index, err)
88 | }
89 | return nil // Don't treat timeout as an error
90 | }
91 |
92 | if !silence {
93 | fmt.Printf("[%s] execute %s failed, err %v\n", time.Now().Format("2006-01-02 15:04:05"), action, err)
94 | }
95 | if !ignoreError {
96 | return err
97 | }
98 | }
99 | }
100 |
101 | return nil
102 | }
103 |
104 | func executeWorkload(ctx context.Context, w workload.Workloader, threads int, action string) {
105 | var wg sync.WaitGroup
106 | wg.Add(threads)
107 |
108 | outputCtx, outputCancel := context.WithCancel(ctx)
109 | ch := make(chan struct{}, 1)
110 | go func() {
111 | ticker := time.NewTicker(outputInterval)
112 | defer ticker.Stop()
113 |
114 | for {
115 | select {
116 | case <-outputCtx.Done():
117 | ch <- struct{}{}
118 | return
119 | case <-ticker.C:
120 | w.OutputStats(false)
121 | }
122 | }
123 | }()
124 | if w.Name() == "tpch" && action == "run" {
125 | err := w.Exec(`create or replace view revenue0 (supplier_no, total_revenue) as
126 | select
127 | l_suppkey,
128 | sum(l_extendedprice * (1 - l_discount))
129 | from
130 | lineitem
131 | where
132 | l_shipdate >= '1997-07-01'
133 | and l_shipdate < date_add('1997-07-01', interval '3' month)
134 | group by
135 | l_suppkey;`)
136 | if err != nil {
137 | panic(fmt.Sprintf("a fatal occurred when preparing view data: %v", err))
138 | }
139 | }
140 | // CH benchmark requires the revenue1 view for analytical queries.
141 | // During normal prepare flow, this view is created in prepareView() method.
142 | // However, when using CSV data ingestion, the prepare stage is skipped and
143 | // the view won't exist. So we create it here when action is "run" to ensure
144 | // the view is available regardless of how data was loaded.
145 | if w.Name() == "ch" && action == "run" {
146 | err := w.Exec(`create or replace view revenue1 (supplier_no, total_revenue) as (
147 | select mod((s_w_id * s_i_id),10000) as supplier_no,
148 | sum(ol_amount) as total_revenue
149 | from order_line, stock
150 | where ol_i_id = s_i_id and ol_supply_w_id = s_w_id
151 | and ol_delivery_d >= '2007-01-02 00:00:00.000000'
152 | group by mod((s_w_id * s_i_id),10000));`)
153 | if err != nil {
154 | panic(fmt.Sprintf("a fatal occurred when preparing view data: %v", err))
155 | }
156 | }
157 | enabledDumpPlanReplayer := w.IsPlanReplayerDumpEnabled()
158 | if enabledDumpPlanReplayer {
159 | err := w.PreparePlanReplayerDump()
160 | if err != nil {
161 | fmt.Printf("[%s] prepare plan replayer failed, err%v\n",
162 | time.Now().Format("2006-01-02 15:04:05"), err)
163 | }
164 | defer func() {
165 | err = w.FinishPlanReplayerDump()
166 | if err != nil {
167 | fmt.Printf("[%s] dump plan replayer failed, err%v\n",
168 | time.Now().Format("2006-01-02 15:04:05"), err)
169 | }
170 | }()
171 | }
172 |
173 | for i := 0; i < threads; i++ {
174 | go func(index int) {
175 | defer wg.Done()
176 | if err := execute(ctx, w, action, threads, index); err != nil {
177 | if action == "prepare" {
178 | panic(fmt.Sprintf("a fatal occurred when preparing data: %v", err))
179 | }
180 | fmt.Printf("execute %s failed, err %v\n", action, err)
181 | return
182 | }
183 | }(i)
184 | }
185 |
186 | wg.Wait()
187 |
188 | if action == "prepare" {
189 | // For prepare, we must check the data consistency after all prepare finished
190 | checkPrepare(ctx, w)
191 | }
192 | outputCancel()
193 |
194 | <-ch
195 | }
196 |
--------------------------------------------------------------------------------
/rawsql/workload.go:
--------------------------------------------------------------------------------
1 | package rawsql
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | "fmt"
7 | "sort"
8 | "strings"
9 | "time"
10 |
11 | "github.com/pingcap/go-tpc/pkg/measurement"
12 | replayer "github.com/pingcap/go-tpc/pkg/plan-replayer"
13 | "github.com/pingcap/go-tpc/pkg/util"
14 | "github.com/pingcap/go-tpc/pkg/workload"
15 | )
16 |
17 | type contextKey string
18 |
19 | const stateKey = contextKey("rawsql")
20 |
21 | type Config struct {
22 | DBName string
23 | Queries map[string]string // query name: query SQL
24 | QueryNames []string
25 | ExecExplainAnalyze bool
26 | RefreshWait time.Duration
27 |
28 | // output style
29 | OutputStyle string
30 | EnablePlanReplayer bool
31 | PlanReplayerConfig replayer.PlanReplayerConfig
32 | }
33 |
34 | type rawsqlState struct {
35 | queryIdx int
36 | *workload.TpcState
37 | }
38 |
39 | type Workloader struct {
40 | cfg *Config
41 | db *sql.DB
42 |
43 | measurement *measurement.Measurement
44 |
45 | PlanReplayerRunner *replayer.PlanReplayerRunner
46 | }
47 |
48 | var _ workload.Workloader = &Workloader{}
49 |
50 | func NewWorkloader(db *sql.DB, cfg *Config) workload.Workloader {
51 | return &Workloader{
52 | db: db,
53 | cfg: cfg,
54 | measurement: measurement.NewMeasurement(func(m *measurement.Measurement) {
55 | m.MinLatency = 100 * time.Microsecond
56 | m.MaxLatency = 20 * time.Minute
57 | m.SigFigs = 3
58 | }),
59 | }
60 | }
61 |
62 | func (w *Workloader) Name() string {
63 | return "rawsql"
64 | }
65 |
66 | func (w *Workloader) InitThread(ctx context.Context, threadID int) context.Context {
67 | s := &rawsqlState{
68 | queryIdx: threadID,
69 | TpcState: workload.NewTpcState(ctx, w.db),
70 | }
71 |
72 | ctx = context.WithValue(ctx, stateKey, s)
73 | return ctx
74 | }
75 |
76 | func (w *Workloader) getState(ctx context.Context) *rawsqlState {
77 | s := ctx.Value(stateKey).(*rawsqlState)
78 | return s
79 | }
80 |
81 | func (w *Workloader) updateState(ctx context.Context) {
82 | s := w.getState(ctx)
83 | s.queryIdx++
84 | }
85 |
86 | func (w *Workloader) CleanupThread(ctx context.Context, threadID int) {
87 | s := w.getState(ctx)
88 | s.Conn.Close()
89 | }
90 |
91 | func (w *Workloader) Run(ctx context.Context, threadID int) error {
92 | s := w.getState(ctx)
93 | defer w.updateState(ctx)
94 |
95 | if err := s.Conn.PingContext(ctx); err != nil {
96 | time.Sleep(w.cfg.RefreshWait) // I feel it silly to sleep, but don't come up with better idea
97 | if err := s.RefreshConn(ctx); err != nil {
98 | return err
99 | }
100 | }
101 |
102 | queryName := w.cfg.QueryNames[s.queryIdx%len(w.cfg.QueryNames)]
103 | query := w.cfg.Queries[queryName]
104 |
105 | if w.cfg.EnablePlanReplayer {
106 | w.dumpPlanReplayer(ctx, s, query, queryName)
107 | }
108 |
109 | if w.cfg.ExecExplainAnalyze {
110 | query = "explain analyze\n" + query
111 | }
112 |
113 | start := time.Now()
114 | rows, err := s.Conn.QueryContext(ctx, query)
115 | w.measurement.Measure(queryName, time.Since(start), err)
116 | if err != nil {
117 | return fmt.Errorf("execute query %s failed %v", queryName, err)
118 | }
119 | if w.cfg.ExecExplainAnalyze {
120 | table, err := util.RenderExplainAnalyze(rows)
121 | if err != nil {
122 | return err
123 | }
124 | util.StdErrLogger.Printf("explain analyze result of query %s:\n%s\n", queryName, table)
125 | return nil
126 | }
127 |
128 | defer rows.Close()
129 | return nil
130 | }
131 |
132 | func outputMeasurement(outputStyle string, prefix string, opMeasurement map[string]*measurement.Histogram) {
133 | keys := make([]string, len(opMeasurement))
134 | var i = 0
135 | for k := range opMeasurement {
136 | keys[i] = k
137 | i += 1
138 | }
139 | sort.Strings(keys)
140 |
141 | lines := [][]string{}
142 | for _, op := range keys {
143 | hist := opMeasurement[op]
144 | if !hist.Empty() {
145 | lines = append(lines, []string{prefix, strings.ToUpper(op), util.FloatToTwoString(float64(hist.GetInfo().Avg)/1000) + "s"})
146 | }
147 | }
148 |
149 | switch outputStyle {
150 | case util.OutputStylePlain:
151 | util.RenderString("%s%s: %s\n", nil, lines)
152 | case util.OutputStyleTable:
153 | util.RenderTable([]string{"Prefix", "Operation", "Avg(s)"}, lines)
154 | case util.OutputStyleJson:
155 | util.RenderJson([]string{"Prefix", "Operation", "Avg(s)"}, lines)
156 | }
157 | }
158 |
159 | func (w *Workloader) OutputStats(ifSummaryReport bool) {
160 | w.measurement.Output(ifSummaryReport, w.cfg.OutputStyle, outputMeasurement)
161 | }
162 |
163 | func (w *Workloader) DBName() string {
164 | return w.cfg.DBName
165 | }
166 |
167 | func (w *Workloader) Prepare(ctx context.Context, threadID int) error {
168 | // how to prepare data is undecided
169 | panic("not implemented") // TODO: Implement
170 | }
171 |
172 | func (w *Workloader) CheckPrepare(ctx context.Context, threadID int) error {
173 | panic("not implemented") // TODO: Implement
174 | }
175 |
176 | func (w *Workloader) Cleanup(ctx context.Context, threadID int) error {
177 | panic("not implemented") // TODO: Implement
178 | }
179 |
180 | func (w *Workloader) Check(ctx context.Context, threadID int) error {
181 | panic("not implemented") // TODO: Implement
182 | }
183 |
184 | func (w *Workloader) dumpPlanReplayer(ctx context.Context, s *rawsqlState, query, queryName string) {
185 | query = "plan replayer dump explain " + query
186 | err := w.PlanReplayerRunner.Dump(ctx, s.Conn, query, queryName)
187 | if err != nil {
188 | fmt.Printf("dump query %s plan replayer failed %v", queryName, err)
189 | }
190 | }
191 |
192 | func (w *Workloader) IsPlanReplayerDumpEnabled() bool {
193 | return w.cfg.EnablePlanReplayer
194 | }
195 |
196 | func (w *Workloader) PreparePlanReplayerDump() error {
197 | w.cfg.PlanReplayerConfig.WorkloadName = w.Name()
198 | if w.PlanReplayerRunner == nil {
199 | w.PlanReplayerRunner = &replayer.PlanReplayerRunner{
200 | Config: w.cfg.PlanReplayerConfig,
201 | }
202 | }
203 | return w.PlanReplayerRunner.Prepare()
204 | }
205 |
206 | func (w *Workloader) FinishPlanReplayerDump() error {
207 | return w.PlanReplayerRunner.Finish()
208 | }
209 |
210 | func (w *Workloader) Exec(sql string) error {
211 | return nil
212 | }
213 |
--------------------------------------------------------------------------------
/tpch/dbgen/rand.go:
--------------------------------------------------------------------------------
1 | package dbgen
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "math"
7 | )
8 |
9 | const (
10 | maxStream = 47
11 | rngA dssHuge = 6364136223846793005
12 | rngC dssHuge = 1
13 | maxLong = math.MaxInt32
14 | dM = 2147483647.0
15 | alphaNum = "0123456789abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ,"
16 | vStrLow = 0.4
17 | vStrHgh = 1.6
18 | )
19 |
20 | var (
21 | multiplier = dssHuge(16807)
22 | modulus = dssHuge(2147483647)
23 | )
24 |
25 | type Seed struct {
26 | Table
27 | value dssHuge
28 | usage dssHuge
29 | boundary dssHuge
30 | }
31 |
32 | var seeds [maxStream + 1]Seed
33 |
34 | func nextRand(nSeed dssHuge) dssHuge {
35 | return (nSeed * 16807) % 2147483647
36 | }
37 |
38 | func nextRand64(nSeed dssHuge) dssHuge {
39 | a := rngA
40 | c := rngC
41 | return nSeed*a + c
42 | }
43 |
44 | func unifInt(nLow dssHuge, nHigh dssHuge, nStream long) dssHuge {
45 | var dRange float64
46 | var nTemp dssHuge
47 | nLow32 := int32(nLow)
48 | nHigh32 := int32(nHigh)
49 |
50 | if nStream < 0 || nStream > maxStream {
51 | nStream = 0
52 | }
53 | if (nHigh == maxLong) && (nLow == 0) {
54 | dRange = float64(nHigh32 - nLow32 + 1)
55 | _ = dssHuge(nHigh32 - nLow32 + 1)
56 | } else {
57 | dRange = float64(nHigh - nLow + 1)
58 | _ = nHigh - nLow + 1
59 | }
60 | seeds[nStream].value = nextRand(seeds[nStream].value)
61 | nTemp = dssHuge(float64(seeds[nStream].value) / dM * dRange)
62 | return nLow + nTemp
63 | }
64 |
65 | func random64(lower, upper dssHuge, nStream long) dssHuge {
66 |
67 | if nStream < 0 || nStream > maxStream {
68 | nStream = 0
69 | }
70 | if lower > upper {
71 | lower, upper = upper, lower
72 | }
73 | seeds[nStream].value = nextRand64(seeds[nStream].value)
74 |
75 | nTemp := seeds[nStream].value
76 | if nTemp < 0 {
77 | nTemp = -nTemp
78 | }
79 | nTemp %= upper - lower + 1
80 | seeds[nStream].usage += 1
81 | return lower + nTemp
82 | }
83 |
84 | func random(lower, upper dssHuge, nStream long) dssHuge {
85 | seeds[nStream].usage += 1
86 | return unifInt(lower, upper, nStream)
87 | }
88 |
89 | func advanceRand64(nSeed, nCount dssHuge) dssHuge {
90 | a := rngA
91 | c := rngC
92 | var nBit int
93 | aPow := a
94 | dSum := c
95 | if nCount == 0 {
96 | return nSeed
97 | }
98 |
99 | for nBit = 0; (nCount >> nBit) != rngC; nBit++ {
100 | }
101 | for {
102 | nBit -= 1
103 | if nBit < 0 {
104 | break
105 | }
106 | dSum *= aPow + 1
107 | aPow = aPow * aPow
108 | if (nCount>>nBit)%2 == 1 {
109 | dSum += aPow
110 | aPow *= a
111 | }
112 | }
113 | nSeed = nSeed*aPow + dSum*c
114 | return nSeed
115 | }
116 |
117 | func nthElement(n dssHuge, startSeed *dssHuge) {
118 | var z, mult dssHuge
119 |
120 | mult = multiplier
121 | z = *startSeed
122 | for n > 0 {
123 | if n%2 != 0 {
124 | z = (mult * z) % modulus
125 | }
126 | n = n / 2
127 | mult = (mult * mult) % modulus
128 | }
129 | *startSeed = z
130 | }
131 |
132 | func advanceStream(nStream int, nCalls dssHuge, bUse64Bit bool) {
133 | if bUse64Bit {
134 | seeds[nStream].value = advanceRand64(seeds[nStream].value, nCalls)
135 | } else {
136 | nthElement(nCalls, &seeds[nStream].value)
137 | }
138 | }
139 |
140 | func rowStart(_ Table) {
141 | for i := 0; i < maxStream; i++ {
142 | seeds[i].usage = 0
143 | }
144 | }
145 | func rowStop(t Table) {
146 | if t == TOrderLine {
147 | t = TOrder
148 | }
149 | if t == TPartPsupp {
150 | t = TPart
151 | }
152 |
153 | for i := 0; i < maxStream; i++ {
154 | if seeds[i].Table == t || seeds[i].Table == tDefs[t].child {
155 | nthElement(seeds[i].boundary-seeds[i].usage, &seeds[i].value)
156 | }
157 | }
158 | }
159 |
160 | func aRand(min, max, column int) string {
161 | var buf bytes.Buffer
162 | var charInt dssHuge
163 | len := random(dssHuge(min), dssHuge(max), long(column))
164 | for i := dssHuge(0); i < len; i++ {
165 | if i%5 == 0 {
166 | charInt = random(0, maxLong, long(column))
167 | }
168 | buf.Write([]byte{alphaNum[charInt&0o77]})
169 | charInt >>= 6
170 | }
171 | return buf.String()
172 | }
173 |
174 | func vStr(avg, sd int) string {
175 | return aRand((int)(float64(avg)*vStrLow), (int)(float64(avg)*vStrHgh), sd)
176 | }
177 |
178 | func genPhone(idx dssHuge, sd int) string {
179 | aCode := random(100, 999, long(sd))
180 | exChg := random(100, 999, long(sd))
181 | number := random(1000, 9999, long(sd))
182 |
183 | return fmt.Sprintf("%02d-%03d-%03d-%04d",
184 | 10+(idx%nationsMax),
185 | aCode,
186 | exChg,
187 | number)
188 | }
189 |
190 | func initSeeds() {
191 | seeds = [maxStream + 1]Seed{
192 | {TPart, 1, 0, 1},
193 | {TPart, 46831694, 0, 1},
194 | {TPart, 1841581359, 0, 1},
195 | {TPart, 1193163244, 0, 1},
196 | {TPart, 727633698, 0, 1},
197 | {TNone, 933588178, 0, 1},
198 | {TPart, 804159733, 0, 2},
199 | {TPsupp, 1671059989, 0, suppPerPart},
200 | {TPsupp, 1051288424, 0, suppPerPart},
201 | {TPsupp, 1961692154, 0, suppPerPart * 2},
202 | {TOrder, 1227283347, 0, 1},
203 | {TOrder, 1171034773, 0, 1},
204 | {TOrder, 276090261, 0, 2},
205 | {TOrder, 1066728069, 0, 1},
206 | {TLine, 209208115, 0, oLcntMax},
207 | {TLine, 554590007, 0, oLcntMax},
208 | {TLine, 721958466, 0, oLcntMax},
209 | {TLine, 1371272478, 0, oLcntMax},
210 | {TLine, 675466456, 0, oLcntMax},
211 | {TLine, 1808217256, 0, oLcntMax},
212 | {TLine, 2095021727, 0, oLcntMax},
213 | {TLine, 1769349045, 0, oLcntMax},
214 | {TLine, 904914315, 0, oLcntMax},
215 | {TLine, 373135028, 0, oLcntMax},
216 | {TLine, 717419739, 0, oLcntMax},
217 | {TLine, 1095462486, 0, oLcntMax * 2},
218 | {TCust, 881155353, 0, 9},
219 | {TCust, 1489529863, 0, 1},
220 | {TCust, 1521138112, 0, 3},
221 | {TCust, 298370230, 0, 1},
222 | {TCust, 1140279430, 0, 1},
223 | {TCust, 1335826707, 0, 2},
224 | {TSupp, 706178559, 0, 9},
225 | {TSupp, 110356601, 0, 1},
226 | {TSupp, 884434366, 0, 3},
227 | {TSupp, 962338209, 0, 1},
228 | {TSupp, 1341315363, 0, 2},
229 | {TPart, 709314158, 0, 92},
230 | {TOrder, 591449447, 0, 1},
231 | {TLine, 431918286, 0, 1},
232 | {TOrder, 851767375, 0, 1},
233 | {TNation, 606179079, 0, 2},
234 | {TRegion, 1500869201, 0, 2},
235 | {TOrder, 1434868289, 0, 1},
236 | {TSupp, 263032577, 0, 1},
237 | {TSupp, 753643799, 0, 1},
238 | {TSupp, 202794285, 0, 1},
239 | {TSupp, 715851524, 0, 1},
240 | }
241 | }
242 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Go TPC
2 |
3 | A toolbox to benchmark workloads in [TPC](http://www.tpc.org/) for TiDB and almost MySQL compatible databases, and PostgreSQL compatible database, such as PostgreSQL / CockroachDB / AlloyDB / Yugabyte.
4 |
5 | ## Install
6 |
7 | You can use one of the three approaches
8 |
9 | ### Install using script(recommend)
10 |
11 | ```bash
12 | curl --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/pingcap/go-tpc/master/install.sh | sh
13 | ```
14 |
15 | And then open a new terminal to try `go-tpc`
16 |
17 | ### Download binary
18 |
19 | You can download the pre-built binary [here](https://github.com/pingcap/go-tpc/releases) and then gunzip it
20 |
21 | ### Build from source
22 |
23 | ```bash
24 | git clone https://github.com/pingcap/go-tpc.git
25 | cd go-tpc
26 | make build
27 | ```
28 |
29 | Then you can find the `go-tpc` binary file in the `./bin` directory.
30 |
31 | ## Usage
32 |
33 | If you have `go-tpc` in your PATH, the command below you should replace `./bin/go-tpc` with `go-tpc`
34 |
35 | By default, go-tpc uses `root::@tcp(127.0.0.1:4000)/test` as the default dsn address, you can override it by setting below flags:
36 |
37 | ```bash
38 | -D, --db string Database name (default "test")
39 | -H, --host string Database host (default "127.0.0.1")
40 | -p, --password string Database password
41 | -P, --port int Database port (default 4000)
42 | -U, --user string Database user (default "root")
43 |
44 | ```
45 |
46 | > **Note:**
47 | >
48 | > When exporting csv files to a directory, `go-tpc` will also create the necessary tables for further data input if
49 | > the provided database address is accessible.
50 |
51 | For example:
52 |
53 | ```bash
54 | ./bin/go-tpc -H 127.0.0.1 -P 3306 -D tpcc ...
55 | ```
56 |
57 | ### TPC-C
58 |
59 | #### Prepare
60 |
61 | ##### TiDB & MySQL
62 |
63 | ```bash
64 | # Create 4 warehouses with 4 threads
65 | ./bin/go-tpc tpcc --warehouses 4 prepare -T 4
66 | ```
67 |
68 | ##### PostgreSQL & CockroachDB & AlloyDB & Yugabyte
69 |
70 |
71 | ```
72 | ./bin/go-tpc tpcc prepare -d postgres -U myuser -p '12345678' -D test -H 127.0.0.1 -P 5432 --conn-params sslmode=disable
73 | ```
74 |
75 | #### Run
76 |
77 | ##### TiDB & MySQL
78 |
79 | ```bash
80 | # Run TPCC workloads, you can just run or add --wait option to including wait times
81 | ./bin/go-tpc tpcc --warehouses 4 run -T 4
82 | # Run TPCC including wait times(keying & thinking time) on every transactions
83 | ./bin/go-tpc tpcc --warehouses 4 run -T 4 --wait
84 | ```
85 |
86 | ##### PostgreSQL & CockroachDB & AlloyDB & Yugabyte
87 |
88 | ```
89 | ./bin/go-tpc tpcc run -d postgres -U myuser -p '12345678' -D test -H 127.0.0.1 -P 5432 --conn-params sslmode=disable
90 | ```
91 |
92 | #### Check
93 |
94 | ```bash
95 | # Check consistency. you can check after prepare or after run
96 | ./bin/go-tpc tpcc --warehouses 4 check
97 | ```
98 |
99 | #### Clean up
100 |
101 | ```bash
102 | # Cleanup
103 | ./bin/go-tpc tpcc --warehouses 4 cleanup
104 | ```
105 |
106 | #### Other usages
107 |
108 | ```bash
109 | # Generate csv files (split to 100 files each table)
110 | ./bin/go-tpc tpcc --warehouses 4 prepare -T 100 --output-type csv --output-dir data
111 | # Specified tables when generating csv files
112 | ./bin/go-tpc tpcc --warehouses 4 prepare -T 100 --output-type csv --output-dir data --tables history,orders
113 | # Start pprof
114 | ./bin/go-tpc tpcc --warehouses 4 prepare --output-type csv --output-dir data --pprof :10111
115 | ```
116 |
117 | If you want to import tpcc data into TiDB, please refer to [import-to-tidb](docs/import-to-tidb.md).
118 |
119 | ### TPC-H
120 |
121 | #### Prepare
122 |
123 | ##### TiDB & MySQL
124 |
125 | ```bash
126 | # Prepare data with scale factor 1
127 | ./bin/go-tpc tpch --sf=1 prepare
128 | # Prepare data with scale factor 1, create tiflash replica, and analyze table after data loaded
129 | ./bin/go-tpc tpch --sf 1 --analyze --tiflash-replica 1 prepare
130 | ```
131 |
132 | ##### PostgreSQL & CockroachDB & AlloyDB & Yugabyte
133 |
134 | ```
135 | ./bin/go-tpc tpch prepare -d postgres -U myuser -p '12345678' -D test -H 127.0.0.1 -P 5432 --conn-params sslmode=disable
136 | ```
137 |
138 | #### Run
139 | ##### TiDB & MySQL
140 |
141 | ```bash
142 | # Run TPCH workloads with result checking
143 | ./bin/go-tpc tpch --sf=1 --check=true run
144 | # Run TPCH workloads without result checking
145 | ./bin/go-tpc tpch --sf=1 run
146 | ```
147 |
148 | ##### PostgreSQL & CockroachDB & AlloyDB & Yugabyte
149 |
150 | ```
151 | ./bin/go-tpc tpch run -d postgres -U myuser -p '12345678' -D test -H 127.0.0.1 -P 5432 --conn-params sslmode=disable
152 | ```
153 | #### Clean up
154 |
155 | ```bash
156 | # Cleanup
157 | ./bin/go-tpc tpch cleanup
158 | ```
159 |
160 | ### CH-benCHmark
161 |
162 | #### Prepare
163 |
164 | 1. First please refer to the above instruction(`go-tpc tpcc --warehouses $warehouses prepare`) to prepare the TP part schema and populate data
165 |
166 | 2. Then uses `go-tpc ch prepare` to prepare the AP part schema and data
167 |
168 | A detail example to run CH workload on TiDB can be refered to [TiDB Doc](https://docs.pingcap.com/tidb/dev/benchmark-tidb-using-ch)
169 |
170 | ##### TiDB & MySQL
171 | ```bash
172 | # Prepare TP data
173 | ./bin/go-tpc tpcc --warehouses 10 prepare -T 4 -D test -H 127.0.0.1 -P 4000
174 | # Prepare AP data, create tiflash replica, and analyze table after data loaded
175 | ./bin/go-tpc ch --analyze --tiflash-replica 1 prepare -D test -H 127.0.0.1 -P 4000
176 | ```
177 | ##### PostgreSQL & CockroachDB & AlloyDB & Yugabyte
178 |
179 | ``` bash
180 | # Prepare TP data
181 | ./bin/go-tpc tpcc prepare -d postgres -U myuser -p '12345678' -D test -H 127.0.0.1 -P 5432 --conn-params sslmode=disable -T 4
182 | # Prepare AP data
183 | ./bin/go-tpc ch prepare -d postgres -U myuser -p '12345678' -D test -H 127.0.0.1 -P 5432 --conn-params sslmode=disable
184 | ```
185 |
186 | #### Run
187 |
188 | ##### TiDB & MySQL
189 | ```bash
190 | ./bin/go-tpc ch --warehouses $warehouses -T $tpWorkers -t $apWorkers --time $measurement-time run
191 | ```
192 | ##### PostgreSQL & CockroachDB & AlloyDB & Yugabyte
193 |
194 | ```
195 | ./bin/go-tpc ch run -d postgres -U myuser -p '12345678' -D test -H 127.0.0.1 -P 5432 --conn-params sslmode=disable
196 | ```
197 |
198 | ### Raw SQL
199 | `rawsql` command is used to execute sql from given sql files.
200 |
201 | #### Run
202 | ```bash
203 | ./bin/go-tpc rawsql run --query-files $path-to-query-files
204 | ```
205 |
--------------------------------------------------------------------------------
/tpch/loader.go:
--------------------------------------------------------------------------------
1 | package tpch
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 |
7 | "github.com/pingcap/go-tpc/pkg/sink"
8 | "github.com/pingcap/go-tpc/tpch/dbgen"
9 | )
10 |
11 | type sqlLoader struct {
12 | *sink.ConcurrentSink
13 | context.Context
14 | }
15 |
16 | func (s *sqlLoader) WriteRow(values ...interface{}) error {
17 | return s.ConcurrentSink.WriteRow(s.Context, values...)
18 | }
19 |
20 | func (s *sqlLoader) Flush() error {
21 | return s.ConcurrentSink.Flush(s.Context)
22 | }
23 |
24 | type orderLoader struct {
25 | sqlLoader
26 | }
27 |
28 | func (o *orderLoader) Load(item interface{}) error {
29 | order := item.(*dbgen.Order)
30 | return o.WriteRow(
31 | order.OKey,
32 | order.CustKey,
33 | order.Status,
34 | dbgen.FmtMoney(order.TotalPrice),
35 | order.Date,
36 | order.OrderPriority,
37 | order.Clerk,
38 | order.ShipPriority,
39 | order.Comment,
40 | )
41 | }
42 |
43 | type custLoader struct {
44 | sqlLoader
45 | }
46 |
47 | func (c *custLoader) Load(item interface{}) error {
48 | cust := item.(*dbgen.Cust)
49 | return c.WriteRow(
50 | cust.CustKey,
51 | cust.Name,
52 | cust.Address,
53 | cust.NationCode,
54 | cust.Phone,
55 | dbgen.FmtMoney(cust.Acctbal),
56 | cust.MktSegment,
57 | cust.Comment,
58 | )
59 | }
60 |
61 | type lineItemloader struct {
62 | sqlLoader
63 | }
64 |
65 | func (l *lineItemloader) Load(item interface{}) error {
66 | order := item.(*dbgen.Order)
67 | for _, line := range order.Lines {
68 | if err := l.WriteRow(
69 | line.OKey,
70 | line.PartKey,
71 | line.SuppKey,
72 | line.LCnt,
73 | line.Quantity,
74 | dbgen.FmtMoney(line.EPrice),
75 | dbgen.FmtMoney(line.Discount),
76 | dbgen.FmtMoney(line.Tax),
77 | line.RFlag,
78 | line.LStatus,
79 | line.SDate,
80 | line.CDate,
81 | line.RDate,
82 | line.ShipInstruct,
83 | line.ShipMode,
84 | line.Comment,
85 | ); err != nil {
86 | return nil
87 | }
88 | }
89 | return nil
90 | }
91 |
92 | type nationLoader struct {
93 | sqlLoader
94 | }
95 |
96 | func (n *nationLoader) Load(item interface{}) error {
97 | nation := item.(*dbgen.Nation)
98 | return n.WriteRow(
99 | nation.Code,
100 | nation.Text,
101 | nation.Join,
102 | nation.Comment,
103 | )
104 | }
105 |
106 | type partLoader struct {
107 | sqlLoader
108 | }
109 |
110 | func (p *partLoader) Load(item interface{}) error {
111 | part := item.(*dbgen.Part)
112 | return p.WriteRow(
113 | part.PartKey,
114 | part.Name,
115 | part.Mfgr,
116 | part.Brand,
117 | part.Type,
118 | part.Size,
119 | part.Container,
120 | dbgen.FmtMoney(part.RetailPrice),
121 | part.Comment,
122 | )
123 | }
124 |
125 | type partSuppLoader struct {
126 | sqlLoader
127 | }
128 |
129 | func (p *partSuppLoader) Load(item interface{}) error {
130 | part := item.(*dbgen.Part)
131 | for _, supp := range part.S {
132 | if err := p.WriteRow(
133 | supp.PartKey,
134 | supp.SuppKey,
135 | supp.Qty,
136 | dbgen.FmtMoney(supp.SCost),
137 | supp.Comment,
138 | ); err != nil {
139 | return err
140 | }
141 | }
142 | return nil
143 | }
144 |
145 | type suppLoader struct {
146 | sqlLoader
147 | }
148 |
149 | func (s *suppLoader) Load(item interface{}) error {
150 | supp := item.(*dbgen.Supp)
151 | return s.WriteRow(
152 | supp.SuppKey,
153 | supp.Name,
154 | supp.Address,
155 | supp.NationCode,
156 | supp.Phone,
157 | dbgen.FmtMoney(supp.Acctbal),
158 | supp.Comment,
159 | )
160 | }
161 |
162 | type regionLoader struct {
163 | sqlLoader
164 | }
165 |
166 | func (r *regionLoader) Load(item interface{}) error {
167 | region := item.(*dbgen.Region)
168 | return r.WriteRow(
169 | region.Code,
170 | region.Text,
171 | region.Comment,
172 | )
173 | }
174 |
175 | func NewOrderLoader(ctx context.Context, db *sql.DB, concurrency int) *orderLoader {
176 | return &orderLoader{sqlLoader{
177 | sink.NewConcurrentSink(func(idx int) sink.Sink {
178 | return sink.NewSQLSink(db,
179 | `INSERT INTO orders (O_ORDERKEY, O_CUSTKEY, O_ORDERSTATUS, O_TOTALPRICE, O_ORDERDATE, O_ORDERPRIORITY, O_CLERK, O_SHIPPRIORITY, O_COMMENT) VALUES `, 0, 0)
180 | }, concurrency), ctx}}
181 | }
182 | func NewLineItemLoader(ctx context.Context, db *sql.DB, concurrency int) *lineItemloader {
183 | return &lineItemloader{sqlLoader{
184 | sink.NewConcurrentSink(func(idx int) sink.Sink {
185 | return sink.NewSQLSink(db,
186 | `INSERT INTO lineitem (L_ORDERKEY, L_PARTKEY, L_SUPPKEY, L_LINENUMBER, L_QUANTITY, L_EXTENDEDPRICE, L_DISCOUNT, L_TAX, L_RETURNFLAG, L_LINESTATUS, L_SHIPDATE, L_COMMITDATE, L_RECEIPTDATE, L_SHIPINSTRUCT, L_SHIPMODE, L_COMMENT) VALUES `, 0, 0)
187 | }, concurrency), ctx}}
188 | }
189 | func NewCustLoader(ctx context.Context, db *sql.DB, concurrency int) *custLoader {
190 | return &custLoader{sqlLoader{
191 | sink.NewConcurrentSink(func(idx int) sink.Sink {
192 | return sink.NewSQLSink(db,
193 | `INSERT INTO customer (C_CUSTKEY, C_NAME, C_ADDRESS, C_NATIONKEY, C_PHONE, C_ACCTBAL, C_MKTSEGMENT, C_COMMENT) VALUES `, 0, 0)
194 | }, concurrency), ctx}}
195 | }
196 | func NewPartLoader(ctx context.Context, db *sql.DB, concurrency int) *partLoader {
197 | return &partLoader{sqlLoader{
198 | sink.NewConcurrentSink(func(idx int) sink.Sink {
199 | return sink.NewSQLSink(db,
200 | `INSERT INTO part (P_PARTKEY, P_NAME, P_MFGR, P_BRAND, P_TYPE, P_SIZE, P_CONTAINER, P_RETAILPRICE, P_COMMENT) VALUES `, 0, 0)
201 | }, concurrency), ctx}}
202 | }
203 | func NewPartSuppLoader(ctx context.Context, db *sql.DB, concurrency int) *partSuppLoader {
204 | return &partSuppLoader{sqlLoader{
205 | sink.NewConcurrentSink(func(idx int) sink.Sink {
206 | return sink.NewSQLSink(db,
207 | `INSERT INTO partsupp (PS_PARTKEY, PS_SUPPKEY, PS_AVAILQTY, PS_SUPPLYCOST, PS_COMMENT) VALUES `, 0, 0)
208 | }, concurrency), ctx}}
209 | }
210 | func NewSuppLoader(ctx context.Context, db *sql.DB, concurrency int) *suppLoader {
211 | return &suppLoader{sqlLoader{
212 | sink.NewConcurrentSink(func(idx int) sink.Sink {
213 | return sink.NewSQLSink(db,
214 | `INSERT INTO supplier (S_SUPPKEY, S_NAME, S_ADDRESS, S_NATIONKEY, S_PHONE, S_ACCTBAL, S_COMMENT) VALUES `, 0, 0)
215 | }, concurrency), ctx}}
216 | }
217 | func NewNationLoader(ctx context.Context, db *sql.DB, concurrency int) *nationLoader {
218 | return &nationLoader{sqlLoader{
219 | sink.NewConcurrentSink(func(idx int) sink.Sink {
220 | return sink.NewSQLSink(db,
221 | `INSERT INTO nation (N_NATIONKEY, N_NAME, N_REGIONKEY, N_COMMENT) VALUES `, 0, 0)
222 | }, concurrency), ctx}}
223 | }
224 | func NewRegionLoader(ctx context.Context, db *sql.DB, concurrency int) *regionLoader {
225 | return ®ionLoader{sqlLoader{
226 | sink.NewConcurrentSink(func(idx int) sink.Sink {
227 | return sink.NewSQLSink(db,
228 | `INSERT INTO region (R_REGIONKEY, R_NAME, R_COMMENT) VALUES `, 0, 0)
229 | }, concurrency), ctx}}
230 | }
231 |
--------------------------------------------------------------------------------
/tpcc/payment.go:
--------------------------------------------------------------------------------
1 | package tpcc
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "time"
7 | )
8 |
9 | const (
10 | paymentUpdateDistrict = `UPDATE district SET d_ytd = d_ytd + ? WHERE d_w_id = ? AND d_id = ?`
11 | paymentSelectDistrict = `SELECT d_street_1, d_street_2, d_city, d_state, d_zip, d_name FROM district WHERE d_w_id = ? AND d_id = ?`
12 | paymentUpdateWarehouse = `UPDATE warehouse SET w_ytd = w_ytd + ? WHERE w_id = ?`
13 | paymentSelectWarehouse = `SELECT w_street_1, w_street_2, w_city, w_state, w_zip, w_name FROM warehouse WHERE w_id = ?`
14 | paymentSelectCustomerListByLast = `SELECT c_id FROM customer WHERE c_w_id = ? AND c_d_id = ? AND c_last = ? ORDER BY c_first`
15 | paymentSelectCustomerForUpdate = `SELECT c_first, c_middle, c_last, c_street_1, c_street_2, c_city, c_state, c_zip, c_phone,
16 | c_credit, c_credit_lim, c_discount, c_balance, c_since FROM customer WHERE c_w_id = ? AND c_d_id = ?
17 | AND c_id = ? FOR UPDATE`
18 | paymentUpdateCustomer = `UPDATE customer SET c_balance = c_balance - ?, c_ytd_payment = c_ytd_payment + ?,
19 | c_payment_cnt = c_payment_cnt + 1 WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?`
20 | paymentSelectCustomerData = `SELECT c_data FROM customer WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?`
21 | paymentUpdateCustomerWithData = `UPDATE customer SET c_balance = c_balance - ?, c_ytd_payment = c_ytd_payment + ?,
22 | c_payment_cnt = c_payment_cnt + 1, c_data = ? WHERE c_w_id = ? AND c_d_id = ? AND c_id = ?`
23 | paymentInsertHistory = `INSERT INTO history (h_c_d_id, h_c_w_id, h_c_id, h_d_id, h_w_id, h_date, h_amount, h_data)
24 | VALUES (?, ?, ?, ?, ?, ?, ?, ?)`
25 | )
26 |
27 | type paymentData struct {
28 | wID int
29 | dID int
30 | cWID int
31 | cDID int
32 | hAmount float64
33 |
34 | wStreet1 string
35 | wStreet2 string
36 | wCity string
37 | wState string
38 | wZip string
39 | wName string
40 |
41 | dStreet1 string
42 | dStreet2 string
43 | dCity string
44 | dState string
45 | dZip string
46 | dName string
47 |
48 | cID int
49 | cFirst string
50 | cMiddle string
51 | cLast string
52 | cStreet1 string
53 | cStreet2 string
54 | cCity string
55 | cState string
56 | cZip string
57 | cPhone string
58 | cSince string
59 | cCredit string
60 | cCreditLim float64
61 | cDiscount float64
62 | cBalance float64
63 | cData string
64 | }
65 |
66 | func (w *Workloader) runPayment(ctx context.Context, thread int) error {
67 | s := getTPCCState(ctx)
68 |
69 | d := paymentData{
70 | wID: randInt(s.R, 1, w.cfg.Warehouses),
71 | dID: randInt(s.R, 1, districtPerWarehouse),
72 | hAmount: float64(randInt(s.R, 100, 500000)) / float64(100.0),
73 | }
74 |
75 | // Refer 2.5.1.2, 60% by last name, 40% by customer ID
76 | if s.R.Intn(100) < 60 {
77 | d.cLast = randCLast(s.R, s.Buf)
78 | } else {
79 | d.cID = randCustomerID(s.R)
80 | }
81 |
82 | // Refer 2.5.1.2, 85% by local, 15% by remote
83 | if w.cfg.Warehouses == 1 || s.R.Intn(100) < 85 {
84 | d.cWID = d.wID
85 | d.cDID = d.dID
86 | } else {
87 | d.cWID = w.otherWarehouse(ctx, d.wID)
88 | d.cDID = randInt(s.R, 1, districtPerWarehouse)
89 | }
90 |
91 | tx, err := w.beginTx(ctx)
92 | if err != nil {
93 | return err
94 | }
95 | defer tx.Rollback()
96 |
97 | // Process 1
98 | if _, err := s.paymentStmts[paymentUpdateDistrict].ExecContext(ctx, d.hAmount, d.wID, d.dID); err != nil {
99 | return fmt.Errorf("exec %s failed %v", paymentUpdateDistrict, err)
100 | }
101 |
102 | // Process 2
103 | if err := s.paymentStmts[paymentSelectDistrict].QueryRowContext(ctx, d.wID, d.dID).Scan(&d.dStreet1, &d.dStreet2,
104 | &d.dCity, &d.dState, &d.dZip, &d.dName); err != nil {
105 | return fmt.Errorf("exec %s failed %v", paymentSelectDistrict, err)
106 | }
107 |
108 | // Process 3
109 | if _, err := s.paymentStmts[paymentUpdateWarehouse].ExecContext(ctx, d.hAmount, d.wID); err != nil {
110 | return fmt.Errorf("exec %s failed %v", paymentUpdateWarehouse, err)
111 | }
112 |
113 | // Process 4
114 | if err := s.paymentStmts[paymentSelectWarehouse].QueryRowContext(ctx, d.wID).Scan(&d.wStreet1, &d.wStreet2,
115 | &d.wCity, &d.wState, &d.wZip, &d.wName); err != nil {
116 | return fmt.Errorf("exec %s failed %v", paymentSelectDistrict, err)
117 | }
118 |
119 | if d.cID == 0 {
120 | // Process 5
121 | rows, err := s.paymentStmts[paymentSelectCustomerListByLast].QueryContext(ctx, d.cWID, d.cDID, d.cLast)
122 | if err != nil {
123 | return fmt.Errorf("exec %s failed %v", paymentSelectCustomerListByLast, err)
124 | }
125 | var ids []int
126 | for rows.Next() {
127 | var id int
128 | if err = rows.Scan(&id); err != nil {
129 | return fmt.Errorf("exec %s failed %v", paymentSelectCustomerListByLast, err)
130 | }
131 | ids = append(ids, id)
132 | }
133 | if len(ids) == 0 {
134 | return fmt.Errorf("customer for (%d, %d, %s) not found", d.cWID, d.cDID, d.cLast)
135 | }
136 | d.cID = ids[(len(ids)+1)/2-1]
137 | }
138 |
139 | // Process 6
140 | if err := s.paymentStmts[paymentSelectCustomerForUpdate].QueryRowContext(ctx, d.cWID, d.cDID, d.cID).Scan(&d.cFirst, &d.cMiddle, &d.cLast,
141 | &d.cStreet1, &d.cStreet2, &d.cCity, &d.cState, &d.cZip, &d.cPhone, &d.cCredit, &d.cCreditLim,
142 | &d.cDiscount, &d.cBalance, &d.cSince); err != nil {
143 | return fmt.Errorf("exec %s failed %v", paymentSelectCustomerForUpdate, err)
144 | }
145 |
146 | if d.cCredit == "BC" {
147 | // Process 7
148 | if err := s.paymentStmts[paymentSelectCustomerData].QueryRowContext(ctx, d.cWID, d.cDID, d.cID).Scan(&d.cData); err != nil {
149 | return fmt.Errorf("exec %s failed %v", paymentSelectCustomerData, err)
150 | }
151 |
152 | newData := fmt.Sprintf("| %4d %2d %4d %2d %4d $%7.2f %12s %24s", d.cID, d.cDID, d.cWID,
153 | d.dID, d.wID, d.hAmount, time.Now().Format(timeFormat), d.cData)
154 | if len(newData) >= 500 {
155 | newData = newData[0:500]
156 | } else {
157 | newData += d.cData[0 : 500-len(newData)]
158 | }
159 |
160 | // Process 8
161 | if _, err := s.paymentStmts[paymentUpdateCustomerWithData].ExecContext(ctx, d.hAmount, d.hAmount, newData, d.cWID, d.cDID, d.cID); err != nil {
162 | return fmt.Errorf("exec %s failed %v", paymentUpdateCustomerWithData, err)
163 | }
164 | } else {
165 | // Process 9
166 | if _, err := s.paymentStmts[paymentUpdateCustomer].ExecContext(ctx, d.hAmount, d.hAmount, d.cWID, d.cDID, d.cID); err != nil {
167 | return fmt.Errorf("exec %s failed %v", paymentUpdateCustomer, err)
168 | }
169 | }
170 |
171 | // Process 10
172 | hData := fmt.Sprintf("%10s %10s", d.wName, d.dName)
173 | if _, err := s.paymentStmts[paymentInsertHistory].ExecContext(ctx, d.cDID, d.cWID, d.cID, d.dID, d.wID, time.Now().Format(timeFormat), d.hAmount, hData); err != nil {
174 | return fmt.Errorf("exec %s failed %v", paymentInsertHistory, err)
175 | }
176 |
177 | return tx.Commit()
178 | }
179 |
--------------------------------------------------------------------------------
/cmd/go-tpc/tpch.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | "fmt"
7 | "os"
8 | "runtime"
9 | "strings"
10 |
11 | "github.com/pingcap/go-tpc/pkg/util"
12 | "github.com/pingcap/go-tpc/tpch"
13 | "github.com/spf13/cobra"
14 | )
15 |
16 | var tpchConfig tpch.Config
17 |
18 | var queryTuningVars = []struct {
19 | name string
20 | value string
21 | }{
22 | // For optimal join order, esp. for q9.
23 | {"tidb_default_string_match_selectivity", "0.1"},
24 | // For optimal join order for all queries.
25 | {"tidb_opt_join_reorder_threshold", "60"},
26 | // For optimal join type between broadcast and hash partition join.
27 | {"tidb_prefer_broadcast_join_by_exchange_data_size", "ON"},
28 | }
29 |
30 | // isSysVarSupported determines if a system variable is supported in given TiDB version
31 | // TODO: Every known sys var should have a minimal supported version and be checked individually. For now we just assume all sys vars are supported since 7.1.0.
32 | func isSysVarSupported(ver util.SemVersion, sysVar string) bool {
33 | return ver.Compare(util.SemVersion{Major: 7, Minor: 1, Patch: 0}) >= 0
34 | }
35 |
36 | func executeTpch(action string) {
37 | openDB()
38 | defer closeDB()
39 |
40 | if globalDB == nil {
41 | util.StdErrLogger.Printf("cannot connect to the database")
42 | os.Exit(1)
43 | }
44 | if maxProcs != 0 {
45 | runtime.GOMAXPROCS(maxProcs)
46 | }
47 |
48 | if action == "run" && driver == mysqlDriver && tpchConfig.EnableQueryTuning {
49 | serverVer, err := getServerVersion(globalDB)
50 | if err != nil {
51 | panic(fmt.Errorf("get server version failed: %v", err))
52 | }
53 | fmt.Printf("Server version: %s\n", serverVer)
54 |
55 | if semVer, ok := util.NewTiDBSemVersion(serverVer); ok {
56 | fmt.Printf("Enabling query tuning for TiDB version %s.\n", semVer.String())
57 | if err := setTiDBQueryTuningVars(globalDB, semVer); err != nil {
58 | panic(fmt.Errorf("set session variables failed: %v", err))
59 | }
60 | } else {
61 | fmt.Printf("Query tuning is enabled(by default) but server version doesn't appear to be TiDB, skipping tuning.\n")
62 | }
63 | }
64 |
65 | tpchConfig.PlanReplayerConfig.Host = hosts[0]
66 | tpchConfig.PlanReplayerConfig.StatusPort = statusPort
67 |
68 | tpchConfig.OutputStyle = outputStyle
69 | tpchConfig.Driver = driver
70 | tpchConfig.DBName = dbName
71 | tpchConfig.PrepareThreads = threads
72 | tpchConfig.QueryNames = strings.Split(tpchConfig.RawQueries, ",")
73 | w := tpch.NewWorkloader(globalDB, &tpchConfig)
74 | timeoutCtx, cancel := context.WithTimeout(globalCtx, totalTime)
75 | defer cancel()
76 |
77 | executeWorkload(timeoutCtx, w, threads, action)
78 | fmt.Println("Finished")
79 | w.OutputStats(true)
80 | }
81 |
82 | func getServerVersion(db *sql.DB) (string, error) {
83 | var version string
84 | err := db.QueryRow("SELECT VERSION()").Scan(&version)
85 | return version, err
86 | }
87 |
88 | func setTiDBQueryTuningVars(db *sql.DB, ver util.SemVersion) error {
89 | for _, v := range queryTuningVars {
90 | if isSysVarSupported(ver, v.name) {
91 | if _, err := db.Exec(fmt.Sprintf("SET SESSION %s = %s", v.name, v.value)); err != nil {
92 | return err
93 | }
94 | } else {
95 | fmt.Printf("Unsupported query tunning var %s for TiDB version %s \n", v.name, ver.String())
96 | }
97 | }
98 | return nil
99 | }
100 |
101 | func registerTpch(root *cobra.Command) {
102 | cmd := &cobra.Command{
103 | Use: "tpch",
104 | }
105 |
106 | cmd.PersistentFlags().StringVar(&tpchConfig.RawQueries,
107 | "queries",
108 | "q1,q2,q3,q4,q5,q6,q7,q8,q9,q10,q11,q12,q13,q14,q15,q16,q17,q18,q19,q20,q21,q22",
109 | "All queries")
110 |
111 | cmd.PersistentFlags().IntVar(&tpchConfig.ScaleFactor,
112 | "sf",
113 | 1,
114 | "scale factor")
115 |
116 | cmd.PersistentFlags().BoolVar(&tpchConfig.ExecExplainAnalyze,
117 | "use-explain",
118 | false,
119 | "execute explain analyze")
120 |
121 | cmd.PersistentFlags().BoolVar(&tpchConfig.EnableOutputCheck,
122 | "check",
123 | false,
124 | "Check output data, only when the scale factor equals 1")
125 |
126 | var cmdPrepare = &cobra.Command{
127 | Use: "prepare",
128 | Short: "Prepare data for the workload",
129 | Run: func(cmd *cobra.Command, args []string) {
130 | executeTpch("prepare")
131 | },
132 | }
133 |
134 | cmdPrepare.PersistentFlags().IntVar(&tpchConfig.TiFlashReplica,
135 | "tiflash-replica",
136 | 0,
137 | "Number of tiflash replica")
138 |
139 | cmdPrepare.PersistentFlags().BoolVar(&tpchConfig.AnalyzeTable.Enable,
140 | "analyze",
141 | false,
142 | "After data loaded, analyze table to collect column statistics")
143 | // https://pingcap.com/docs/stable/reference/performance/statistics/#control-analyze-concurrency
144 | cmdPrepare.PersistentFlags().IntVar(&tpchConfig.AnalyzeTable.BuildStatsConcurrency,
145 | "tidb_build_stats_concurrency",
146 | 4,
147 | "tidb_build_stats_concurrency param for analyze jobs")
148 | cmdPrepare.PersistentFlags().IntVar(&tpchConfig.AnalyzeTable.DistsqlScanConcurrency,
149 | "tidb_distsql_scan_concurrency",
150 | 15,
151 | "tidb_distsql_scan_concurrency param for analyze jobs")
152 | cmdPrepare.PersistentFlags().IntVar(&tpchConfig.AnalyzeTable.IndexSerialScanConcurrency,
153 | "tidb_index_serial_scan_concurrency",
154 | 1,
155 | "tidb_index_serial_scan_concurrency param for analyze jobs")
156 | cmdPrepare.PersistentFlags().StringVar(&tpchConfig.OutputType,
157 | "output-type",
158 | "",
159 | "Output file type. If empty, then load data to db. Current only support csv")
160 | cmdPrepare.PersistentFlags().StringVar(&tpchConfig.OutputDir,
161 | "output-dir",
162 | "",
163 | "Output directory for generating file if specified")
164 |
165 | var cmdRun = &cobra.Command{
166 | Use: "run",
167 | Short: "Run workload",
168 | Run: func(cmd *cobra.Command, args []string) {
169 | executeTpch("run")
170 | },
171 | }
172 |
173 | cmdRun.PersistentFlags().BoolVar(&tpchConfig.EnablePlanReplayer,
174 | "use-plan-replayer",
175 | false,
176 | "Use Plan Replayer to dump stats and variables before running queries")
177 |
178 | cmdRun.PersistentFlags().StringVar(&tpchConfig.PlanReplayerConfig.PlanReplayerDir,
179 | "plan-replayer-dir",
180 | "",
181 | "Dir of Plan Replayer file dumps")
182 |
183 | cmdRun.PersistentFlags().StringVar(&tpchConfig.PlanReplayerConfig.PlanReplayerFileName,
184 | "plan-replayer-file",
185 | "",
186 | "Name of plan Replayer file dumps")
187 |
188 | cmdRun.PersistentFlags().BoolVar(&tpchConfig.EnableQueryTuning,
189 | "enable-query-tuning",
190 | true,
191 | "Tune queries by setting some session variables known effective for tpch")
192 |
193 | var cmdCleanup = &cobra.Command{
194 | Use: "cleanup",
195 | Short: "Cleanup data for the workload",
196 | Run: func(cmd *cobra.Command, args []string) {
197 | executeTpch("cleanup")
198 | },
199 | }
200 |
201 | cmd.AddCommand(cmdRun, cmdPrepare, cmdCleanup)
202 |
203 | root.AddCommand(cmd)
204 | }
205 |
--------------------------------------------------------------------------------
/cmd/go-tpc/ch_benchmark.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "database/sql"
6 | "fmt"
7 | "os"
8 | "runtime"
9 | "strings"
10 | "sync"
11 | "time"
12 |
13 | "github.com/spf13/cobra"
14 |
15 | "github.com/pingcap/go-tpc/ch"
16 | "github.com/pingcap/go-tpc/pkg/workload"
17 | "github.com/pingcap/go-tpc/tpcc"
18 | )
19 |
20 | var chConfig ch.Config
21 | var (
22 | apConnParams string
23 | apHosts []string
24 | apPorts []int
25 | )
26 |
27 | func registerCHBenchmark(root *cobra.Command) {
28 | cmd := &cobra.Command{
29 | Use: "ch",
30 | }
31 | cmd.PersistentFlags().IntVar(&tpccConfig.Parts, "parts", 1, "Number to partition warehouses")
32 | cmd.PersistentFlags().IntVar(&tpccConfig.Warehouses, "warehouses", 10, "Number of warehouses")
33 | cmd.PersistentFlags().BoolVar(&tpccConfig.CheckAll, "check-all", false, "Run all consistency checks")
34 | cmd.PersistentFlags().StringVar(&chConfig.RawQueries,
35 | "queries",
36 | "q1,q2,q3,q4,q5,q6,q7,q8,q9,q10,q11,q12,q13,q14,q15,q16,q17,q18,q19,q20,q21,q22",
37 | "All queries")
38 | cmd.PersistentFlags().DurationVar(&chConfig.RefreshConnWait, "refresh-conn-wait", 5*time.Second, "duration to wait before refreshing sql connection")
39 |
40 | var cmdPrepare = &cobra.Command{
41 | Use: "prepare",
42 | Short: "Prepare data for the workload",
43 | Run: func(cmd *cobra.Command, args []string) {
44 | executeCH("prepare", nil)
45 | },
46 | }
47 | cmdPrepare.PersistentFlags().IntVar(&chConfig.TiFlashReplica,
48 | "tiflash-replica",
49 | 0,
50 | "Number of tiflash replica")
51 |
52 | cmdPrepare.PersistentFlags().BoolVar(&chConfig.AnalyzeTable.Enable,
53 | "analyze",
54 | false,
55 | "After data loaded, analyze table to collect column statistics")
56 | // https://pingcap.com/docs/stable/reference/performance/statistics/#control-analyze-concurrency
57 | cmdPrepare.PersistentFlags().IntVar(&chConfig.AnalyzeTable.BuildStatsConcurrency,
58 | "tidb_build_stats_concurrency",
59 | 4,
60 | "tidb_build_stats_concurrency param for analyze jobs")
61 | cmdPrepare.PersistentFlags().IntVar(&chConfig.AnalyzeTable.DistsqlScanConcurrency,
62 | "tidb_distsql_scan_concurrency",
63 | 15,
64 | "tidb_distsql_scan_concurrency param for analyze jobs")
65 | cmdPrepare.PersistentFlags().IntVar(&chConfig.AnalyzeTable.IndexSerialScanConcurrency,
66 | "tidb_index_serial_scan_concurrency",
67 | 1,
68 | "tidb_index_serial_scan_concurrency param for analyze jobs")
69 |
70 | var cmdRun = &cobra.Command{
71 | Use: "run",
72 | Short: "Run workload",
73 | PreRun: func(cmd *cobra.Command, args []string) {
74 | if len(apConnParams) == 0 {
75 | apConnParams = connParams
76 | }
77 | if len(apHosts) == 0 {
78 | apHosts = hosts
79 | }
80 | if len(apPorts) == 0 {
81 | apPorts = ports
82 | }
83 | },
84 | Run: func(cmd *cobra.Command, _ []string) {
85 | executeCH("run", func() (*sql.DB, error) {
86 | return newDB(makeTargets(apHosts, apPorts), driver, user, password, dbName, apConnParams)
87 | })
88 | },
89 | }
90 | cmdRun.PersistentFlags().BoolVar(&chConfig.EnablePlanReplayer,
91 | "use-plan-replayer",
92 | false,
93 | "Use Plan Replayer to dump stats and variables before running queries")
94 |
95 | cmdRun.PersistentFlags().StringVar(&chConfig.PlanReplayerConfig.PlanReplayerDir,
96 | "plan-replayer-dir",
97 | "",
98 | "Dir of Plan Replayer file dumps")
99 |
100 | cmdRun.PersistentFlags().StringVar(&chConfig.PlanReplayerConfig.PlanReplayerFileName,
101 | "plan-replayer-file",
102 | "",
103 | "Name of plan Replayer file dumps")
104 |
105 | cmdRun.PersistentFlags().BoolVar(&chConfig.ExecExplainAnalyze,
106 | "use-explain",
107 | false,
108 | "execute explain analyze")
109 |
110 | cmdRun.PersistentFlags().IntSliceVar(&tpccConfig.Weight, "weight", []int{45, 43, 4, 4, 4}, "Weight for NewOrder, Payment, OrderStatus, Delivery, StockLevel")
111 | cmdRun.Flags().DurationVar(&tpccConfig.ConnRefreshInterval, "conn-refresh-interval", 0, "automatically refresh database connections at specified intervals to balance traffic across new replicas (0 = disabled, e.g., 10s)")
112 | cmdRun.Flags().StringVar(&apConnParams, "ap-conn-params", "", "Connection parameters for analytical processing")
113 | cmdRun.Flags().StringSliceVar(&apHosts, "ap-host", nil, "Database host for analytical processing")
114 | cmdRun.Flags().IntSliceVar(&apPorts, "ap-port", nil, "Database port for analytical processing")
115 |
116 | cmd.AddCommand(cmdRun, cmdPrepare)
117 | root.AddCommand(cmd)
118 | }
119 |
120 | func executeCH(action string, openAP func() (*sql.DB, error)) {
121 | if maxProcs != 0 {
122 | runtime.GOMAXPROCS(maxProcs)
123 | }
124 |
125 | openDB()
126 | defer closeDB()
127 |
128 | tpccConfig.OutputStyle = outputStyle
129 | tpccConfig.Driver = driver
130 | tpccConfig.DBName = dbName
131 | tpccConfig.Threads = threads
132 | tpccConfig.Isolation = isolationLevel
133 | chConfig.OutputStyle = outputStyle
134 | chConfig.Driver = driver
135 | chConfig.DBName = dbName
136 | chConfig.QueryNames = strings.Split(chConfig.RawQueries, ",")
137 | if action == "run" {
138 | chConfig.PlanReplayerConfig.Host = apHosts[0]
139 | } else {
140 | chConfig.PlanReplayerConfig.Host = hosts[0]
141 | }
142 | chConfig.PlanReplayerConfig.StatusPort = statusPort
143 |
144 | var (
145 | tp, ap workload.Workloader
146 | err error
147 | )
148 |
149 | // Set a reasonable connection max lifetime when auto-refresh is enabled
150 | // This ensures connections are actually closed and not just returned to pool
151 | if tpccConfig.ConnRefreshInterval > 0 {
152 | globalDB.SetConnMaxLifetime(tpccConfig.ConnRefreshInterval)
153 | fmt.Printf("Auto-setting connection max lifetime to %v (refresh interval)\n", tpccConfig.ConnRefreshInterval)
154 | }
155 | tp, err = tpcc.NewWorkloader(globalDB, &tpccConfig)
156 | if err != nil {
157 | fmt.Printf("Failed to init tp work loader: %v\n", err)
158 | os.Exit(1)
159 | }
160 | if openAP == nil {
161 | ap = ch.NewWorkloader(globalDB, &chConfig)
162 | } else {
163 | db, err := openAP()
164 | if err != nil {
165 | fmt.Printf("Failed to open db for analytical processing: %v\n", err)
166 | os.Exit(1)
167 | }
168 | db.SetMaxIdleConns(acThreads + 1)
169 | ap = ch.NewWorkloader(db, &chConfig)
170 | }
171 | if err != nil {
172 | fmt.Printf("Failed to init tp work loader: %v\n", err)
173 | os.Exit(1)
174 | }
175 | timeoutCtx, cancel := context.WithTimeout(globalCtx, totalTime)
176 | defer cancel()
177 |
178 | if action == "prepare" {
179 | executeWorkload(timeoutCtx, ap, 1, "prepare")
180 | return
181 | }
182 |
183 | type workLoaderSetting struct {
184 | workLoader workload.Workloader
185 | threads int
186 | }
187 | var doneWg sync.WaitGroup
188 | for _, workLoader := range []workLoaderSetting{{workLoader: tp, threads: threads}, {workLoader: ap, threads: acThreads}} {
189 | doneWg.Add(1)
190 | go func(workLoader workload.Workloader, threads int) {
191 | executeWorkload(timeoutCtx, workLoader, threads, "run")
192 | doneWg.Done()
193 | }(workLoader.workLoader, workLoader.threads)
194 | }
195 | doneWg.Wait()
196 | fmt.Printf("Finished: %d OLTP workers, %d OLAP workers\n", threads, acThreads)
197 | for _, workLoader := range []workLoaderSetting{{workLoader: tp, threads: threads}, {workLoader: ap, threads: acThreads}} {
198 | workLoader.workLoader.OutputStats(true)
199 | }
200 | }
201 |
--------------------------------------------------------------------------------
/docs/tidb-lightning.toml:
--------------------------------------------------------------------------------
1 | ### tidb-lightning configuration
2 | [lightning]
3 |
4 | # Listening address for the HTTP server (set to empty string to disable).
5 | # The server is responsible for the web interface, submitting import tasks,
6 | # serving Prometheus metrics and exposing debug profiling data.
7 | status-addr = ""
8 |
9 | # Toggle server mode.
10 | # If "false", running Lightning will immediately start the import job, and exits
11 | # after the job is finished.
12 | # If "true", running Lightning will wait for user to submit tasks, via the HTTP API
13 | # (`curl http://lightning-ip:8289/tasks --data-binary @tidb-lightning.toml`).
14 | # The program will keep running and waiting for more tasks, until receiving the SIGINT signal.
15 | server-mode = false
16 |
17 | # check if the cluster satisfies the minimum requirement before starting
18 | # check-requirements = true
19 |
20 | # index-concurrency controls the maximum handled index concurrently while reading Mydumper SQL files. It can affect the tikv-importer disk usage.
21 | index-concurrency = 2
22 | # table-concurrency controls the maximum handled tables concurrently while reading Mydumper SQL files. It can affect the tikv-importer memory usage.
23 | table-concurrency = 6
24 |
25 | # logging
26 | level = "info"
27 | file = "tidb-lightning.log"
28 | max-size = 128 # MB
29 | max-days = 28
30 | max-backups = 14
31 |
32 |
33 | [checkpoint]
34 | # Whether to enable checkpoints.
35 | # While importing, Lightning will record which tables have been imported, so even if Lightning or other component
36 | # crashed, we could start from a known good state instead of redoing everything.
37 | enable = true
38 | # The schema name (database name) to store the checkpoints
39 | schema = "tidb_lightning_checkpoint"
40 | # Where to store the checkpoints.
41 | # Set to "file" to store as a local file.
42 | # Set to "mysql" to store into a remote MySQL-compatible database
43 | driver = "file"
44 | # The data source name (DSN) indicating the location of the checkpoint storage.
45 | # For "file" driver, the DSN is a path. If not specified, Lightning would default to "/tmp/CHKPTSCHEMA.pb".
46 | # For "mysql" driver, the DSN is a URL in the form "USER:PASS@tcp(HOST:PORT)/".
47 | # If not specified, the TiDB server from the [tidb] section will be used to store the checkpoints.
48 | #dsn = "/tmp/tidb_lightning_checkpoint.pb"
49 | # Whether to keep the checkpoints after all data are imported. If false, the checkpoints will be deleted. The schema
50 | # needs to be dropped manually, however.
51 | #keep-after-success = false
52 |
53 |
54 | [tikv-importer]
55 | # Delivery backend, can be "importer" or "tidb".
56 | backend = "tidb"
57 |
58 | # What to do on duplicated record (unique key conflict) when the backend is 'tidb'. Possible values are:
59 | # - replace: replace the old record by the new record (i.e. insert rows using "REPLACE INTO")
60 | # - ignore: keep the old record and ignore the new record (i.e. insert rows using "INSERT IGNORE INTO")
61 | # - error: stop Lightning and report an error (i.e. insert rows using "INSERT INTO")
62 | #on-duplicate = "replace"
63 |
64 | [mydumper]
65 | # block size of file reading
66 | read-block-size = 65536 # Byte (default = 64 KB)
67 | # minimum size (in terms of source data file) of each batch of import.
68 | # Lightning will split a large table into multiple engine files according to this size.
69 | batch-size = 107_374_182_400 # Byte (default = 100 GiB)
70 |
71 | # Engine file needs to be imported sequentially. Due to table-concurrency, multiple engines will be
72 | # imported nearly the same time, and this will create a queue and this wastes resources. Therefore,
73 | # Lightning will slightly increase the size of the first few batches to properly distribute
74 | # resources. The scale up is controlled by this parameter, which expresses the ratio of duration
75 | # between the "import" and "write" steps with full concurrency. This can be calculated as the ratio
76 | # (import duration / write duration) of a single table of size around 1 GB. The exact timing can be
77 | # found in the log. If "import" is faster, the batch size anomaly is smaller, and a ratio of
78 | # zero means uniform batch size. This value should be in the range (0 <= batch-import-ratio < 1).
79 | batch-import-ratio = 0.75
80 |
81 | # mydumper local source data directory, please change to the directory of your csv file path
82 | data-source-dir = "/data"
83 | # if no-schema is set true, lightning will get schema information from tidb-server directly without creating them.
84 | no-schema=true
85 | # the character set of the schema files; only supports one of:
86 | # - utf8mb4: the schema files must be encoded as UTF-8, otherwise will emit errors
87 | # - gb18030: the schema files must be encoded as GB-18030, otherwise will emit errors
88 | # - auto: (default) automatically detect if the schema is UTF-8 or GB-18030, error if the encoding is neither
89 | # - binary: do not try to decode the schema files
90 | # note that the *data* files are always parsed as binary regardless of schema encoding.
91 | #character-set = "auto"
92 |
93 | # make table and database names case-sensitive, i.e. treats `DB`.`TBL` and `db`.`tbl` as two
94 | # different objects. Currently only affects [[routes]].
95 | case-sensitive = false
96 |
97 | # CSV files are imported according to MySQL's LOAD DATA INFILE rules.
98 | [mydumper.csv]
99 | # separator between fields, should be an ASCII character.
100 | separator = ','
101 | # string delimiter, can either be an ASCII character or empty string.
102 | delimiter = ""
103 | # whether the CSV files contain a header. If true, the first line will be skipped
104 | header = false
105 | # whether the CSV contains any NULL value. If true, all columns from CSV cannot be NULL.
106 | not-null = false
107 | # if non-null = false (i.e. CSV can contain NULL), fields equal to this value will be treated as NULL
108 | null = "NULL"
109 | # whether to interpret backslash-escape inside strings.
110 | backslash-escape = false
111 | # if a line ends with a separator, remove it.
112 | trim-last-separator = false
113 |
114 | # configuration for tidb server address(one is enough) and pd server address(one is enough).
115 | [tidb]
116 | host = "127.0.0.1"
117 | port = 4000
118 | user = "root"
119 | password = ""
120 | # table schema information is fetched from tidb via this status-port.
121 | status-port = 10080
122 | pd-addr = "127.0.0.1:2379"
123 | # lightning uses some code of tidb(used as library), and the flag controls it's log level.
124 | log-level = "error"
125 |
126 | # set tidb session variables to speed up checksum/analyze table.
127 | # see https://pingcap.com/docs/sql/statistics/#control-analyze-concurrency for the meaning of each setting
128 | build-stats-concurrency = 20
129 | distsql-scan-concurrency = 100
130 | index-serial-scan-concurrency = 20
131 | checksum-table-concurrency = 16
132 |
133 |
134 | # post-restore provide some options which will be executed after all kv data has been imported into the tikv cluster.
135 | # the execution order are(if set true): checksum -> analyze
136 | [post-restore]
137 | # if set true, checksum will do ADMIN CHECKSUM TABLE for each table.
138 | checksum = true
139 | # if set to true, compact will do level 1 compaction to tikv data.
140 | # if this setting is missing, the default value is false.
141 | level-1-compact = false
142 | # if set true, compact will do full compaction to tikv data.
143 | # if this setting is missing, the default value is false.
144 | compact = false
145 | # if set true, analyze will do ANALYZE TABLE for each table.
146 | analyze = true
147 |
148 | # cron performs some periodic actions in background
149 | [cron]
150 | # duration between which Lightning will automatically refresh the import mode status.
151 | # should be shorter than the corresponding TiKV setting
152 | switch-mode = "5m"
153 | # the duration which the an import progress will be printed to the log.
154 | log-progress = "5m"
155 |
156 |
157 |
--------------------------------------------------------------------------------
/tpch/output/q20.out:
--------------------------------------------------------------------------------
1 | s_name|s_address
2 | Supplier#000000024|C4nPvLrVmKPPabFCj
3 | Supplier#000000037|cqjyB5h1nV
4 | Supplier#000000118|BYtvNtFpQAHHoBFWF
5 | Supplier#000000183|zAAIv68BEXvllrfgsW,i8e
6 | Supplier#000000261|vUT2UDI,GAqIA
7 | Supplier#000000291|0qDDQst1b1bznHQh5jsmOq8nxf8Pz1Kn
8 | Supplier#000000368|3o5w6T5HzjFmSf1
9 | Supplier#000000370|yyNSJAG9UXcWit4SeMkEIrNcdVq5
10 | Supplier#000000463|XOb4DatMUyqMuFM92ZRaapwsEQ
11 | Supplier#000000474|USHBMdX8iFodU
12 | Supplier#000000491|mTbDcJHQ7d
13 | Supplier#000000683|W0rFJpyes6atCIuwAmktnK
14 | Supplier#000000701|ijyXEKJPjoVzpXY9g
15 | Supplier#000000764|2qcwW0V7q3Ipei1tPW3
16 | Supplier#000000774|XVYeiG4,BopCyYAQwld4l0scarsoe8J0cQ
17 | Supplier#000000784|Or3 KncT1AHPPb
18 | Supplier#000000800|Z4 hpmBjpjBXREqzixsBCIaF
19 | Supplier#000000817|0GTKh7JybR8sVahPoJT8kbNtDV0TzA79Q
20 | Supplier#000000983|XOYb xohl2j0U7wTTUaT4F6DShKfH4Hv3p,hnP
21 | Supplier#000001097|1FeaDqTTemSqxWvrsh58K3YCwiG
22 | Supplier#000001143|Lm h,MliyudNgZYMKKEE1,CUh21kOOdrqdDPZqm
23 | Supplier#000001146|UKWRbMRr47499Kta
24 | Supplier#000001158|, tVY8orI3
25 | Supplier#000001203|jHCBBvYw1DqzKRAV7T1bGz
26 | Supplier#000001206|pFMSZD5MiEv
27 | Supplier#000001311|RkSRZhG0WUBIvJSU1X9js0hOmfx6SL6b1hmKW4bf
28 | Supplier#000001342|PLHRQAf4AK okYJGKWODElzeupU4y,ijWOd0Q2q,
29 | Supplier#000001362|3l5jqvUos9Zbu
30 | Supplier#000001363|gn CXPzlZVpy
31 | Supplier#000001418|FVmNlHh2wrXJMQUDZqnp8GNIlQCnmHHzplBS,CF
32 | Supplier#000001422|J48g9qobTEuBQPvZa6DH3TEHlL1VD11xtutv36pF
33 | Supplier#000001493|MEIytTTK27Z1YIyJ4fRh3FsLUJEzQxaM
34 | Supplier#000001549|MYsM43isIm8Y5u
35 | Supplier#000001592|R7WTSZx6J5eE0QdCP6Xrvwuz
36 | Supplier#000001689|eLZWHr5DsW8
37 | Supplier#000001717|iLO76fgaDH6DFsBfb6ZMnOK7F9LnqoZRvjQzVpj
38 | Supplier#000001720|ZTDR6fE4HR3fDSKs
39 | Supplier#000002024|8PEKfAVOdGCspXSXMAZln
40 | Supplier#000002099|xpHU3PIuVz5UfoiaKiQtIxqbmln5zf
41 | Supplier#000002177|6O,8q,u1FLWiKGGZmfhGg9ooXl1AHARWZIti
42 | Supplier#000002239|NL9w9GFCzq6N
43 | Supplier#000002258|bXE2ygKSjCOsmijmUy5TbL
44 | Supplier#000002445|gVkPmZC9v7zjro
45 | Supplier#000002458|c8eZIly7LJrl7buLs7OTvNeEStmEFHIJ4hOk
46 | Supplier#000002564|lZUJcDjtVMbdaBJ0w82CN
47 | Supplier#000002767|TH8T96SZPPsJOC5
48 | Supplier#000002962|gRQXWtrUwB6Al99PmX1O
49 | Supplier#000003096|,ZraWb5SVMxGHmoNlmKLSqKjyC Q9JSlujpQbW49
50 | Supplier#000003137|dlR1SeOSy6FG
51 | Supplier#000003162|ELupM21SsqcCJOgwvOl0V9j5uulbE13R
52 | Supplier#000003278|e2IO3KGtSZl18kn2rh6BNMe9U7LL7CW7CjCj
53 | Supplier#000003292|rdZxuvAOnGA5TGTgAZlaor Ah7pd1xqVUz4V7nYL
54 | Supplier#000003293|filufiVzCBVot7vAwPRvCimnQ
55 | Supplier#000003296|0c318ax2Hbuqd9qDJwxDVmMpTM9RLSFeXsXlLHck
56 | Supplier#000003504|FHr0MRFGEDInYaqPsqVOU24TLJCiJiR0UrVcmap
57 | Supplier#000003506|kJoSJKvCYigjYeOhM74tpsnkAdKto7u7jEE8B
58 | Supplier#000003539|uPKu8p0Vv2MzTU8y POo19yNgM4Hz6JB
59 | Supplier#000003612|bAV9ZD QRt2WxJGltie8o3ihwffMrqMrkvN95
60 | Supplier#000003718|VPtTYIzJwSIEvyOSe0BCtrY6I
61 | Supplier#000003771|fKMoUiWDIp8y mwFuVj7,K
62 | Supplier#000003824|m7fIwVues7ktkv5aSrQz1BJCAcnZXzNm
63 | Supplier#000003843|y 5Wl63UOfOUpa4l5no8poPlQ4PJJPlKE7s
64 | Supplier#000003853|,XhDB3dz2sL1PFx2BR4NnSVsoA sBiqj8pJKHvj9
65 | Supplier#000003871|0l9vr6DDagytSNo,SD2mkG4XezH8L1loBW7uXV
66 | Supplier#000003897|AOtckHviCl 1XE0HfVh
67 | Supplier#000004002|MbWjR,serF9TsLjrAnK
68 | Supplier#000004047|YH7KD9kGfJ4zQSm4r9fxlTwOg,MB,7c
69 | Supplier#000004067|ip9T3DacGd9CT79BTFzQKIOiF7AJaM
70 | Supplier#000004149|yMUXO0,YCZcmG
71 | Supplier#000004215|NEDU5ZeLyyn,EDkL
72 | Supplier#000004245|1oTU7eTB3MT5QmFK8pghsb1UC
73 | Supplier#000004259|GozazL5xiDkcFKmg04MYTxri
74 | Supplier#000004287|jxPJkPGiyju9JoiI7SZpsH
75 | Supplier#000004357|uzdO3uspHY 53emWnBc3eaiMxexRnlN8
76 | Supplier#000004363|l4P3TdjquM8tDcE
77 | Supplier#000004390|FaJZ b KdjxA06ZdUW3sdWEAddDR
78 | Supplier#000004405|YzK43TC6Bi,5glkw6gQoaIpn8FCt
79 | Supplier#000004417|ULVd9moW2Bb4QSaqPmgbEGqoPR0T6TJkA
80 | Supplier#000004440|k7bBHpIHLsEP0ITJDYNPLIdoO4WBU
81 | Supplier#000004447|PogpwSZyu8k
82 | Supplier#000004494|WCQNf4k5wKj1l
83 | Supplier#000004620|z,Qym,C,goAXpPmM0L9s1Slv4
84 | Supplier#000004637|CLqkPPUi,tCY8YoGXEXv9WTGM8rNMXZ7KLRykj
85 | Supplier#000004734|MAOjF4S,gSeBiN
86 | Supplier#000004800|uy,dK eND3Ogaubkl,otcLPvdISomww9btn1s
87 | Supplier#000004902|YdQIzRgwNou4BRRect7We
88 | Supplier#000004930|w0kSsrbeC1uvnXyo06Yqlf
89 | Supplier#000004943|x8duiqMOaVAjMxHCMbroi dMau2Kx1PgI72K8o,
90 | Supplier#000004964|OMsqenynW3u7XxruccNIC
91 | Supplier#000004970|wytxC,bck4YitciVKox5
92 | Supplier#000004977|VYeEHmvwp0D43z
93 | Supplier#000005021|qtiNfDL dv7lsLA
94 | Supplier#000005022|LtH4qtqST6dY,v,Bs
95 | Supplier#000005094|35COOPYtrR4fjqCH
96 | Supplier#000005113|8Oa4 hYPK2IY2XtgGmJ3OSO42KKYhYMm0JT
97 | Supplier#000005130|GhwYHR8ZBbrM28CrR,fMdeH8wgO2la
98 | Supplier#000005139|uxbu7kp0 6ntQzpSLIGhcpRZJqWfZVfrS8Mnu
99 | Supplier#000005183|DGEg4PqYMLuJbTf AmfG3zvPcQ,F
100 | Supplier#000005210|Gb1SnzOsnMke d5AIDzIJEooElYL
101 | Supplier#000005311|m2kwKAYNIe9n5ysrLXIVkPd
102 | Supplier#000005392|80rNQXvYgc8oa6Vud3f
103 | Supplier#000005394|FxETfUU3xA7Jk8dC4lU4dxEcGgNdG69
104 | Supplier#000005467|NfJBlcUJVG8lGL
105 | Supplier#000005489|XSNO3NJWPmIPRMBou7PJodUmTF6
106 | Supplier#000005518|DZpIFpba1VZ5Qn6 BLSrxFrXfytT
107 | Supplier#000005545|a9vTkE8sVY
108 | Supplier#000005549|BJ3aqeHYo7psALHM12UaVYr37xlsAnd
109 | Supplier#000005576|UXqU25sDkQRbz7YI8U
110 | Supplier#000005665|UQEs3xf5LpmhFLx4CIHM7JHYkGkYF
111 | Supplier#000005734|1hNLPg3gwSld5nRJJHoC
112 | Supplier#000005800|vgZ4An2oUXmsh
113 | Supplier#000005830|AFO48PCLrBgkBY3iSbpA5uvVTx ju8d oA
114 | Supplier#000005838|4SzERY e8s
115 | Supplier#000005867|Xr,svOB0nR0
116 | Supplier#000005894|x,96zxP3FAMchh8yAIWJq
117 | Supplier#000005948|,AVsj0Dbver
118 | Supplier#000006094|wq6EZr3njrSo0bUx50jU4cBWH14CEmNd
119 | Supplier#000006140|93EycC3P9tiKu5XEkgb3duqG0
120 | Supplier#000006314|JX6JZ7eMyZ,9JGsEOXA
121 | Supplier#000006389|hLJ01QW0RBHCuKNDT7iiiKz
122 | Supplier#000006412|Oy9uRyGgsCmWmwJeCS OL
123 | Supplier#000006417| JQv0FIkmE82xpAkcPMfYM91zJesK3qmruv83
124 | Supplier#000006461|yCnkZZac6nTbjBq6
125 | Supplier#000006551|MNg1Qmb3xuq
126 | Supplier#000006597|1672 fSWkBvfAP38n1wAx,u5XNCSd4Q0J9m9
127 | Supplier#000006685|E4KuBERFrh9BKiU
128 | Supplier#000006721|06QvzeHSKW
129 | Supplier#000006744|ybeO9yuPgFQ2W4CSQw
130 | Supplier#000006816|I9XsfxxkTTxiIleWJ
131 | Supplier#000006860|,,wvwtQ,iPnSGUDE9mJ45MUVBjx4oRyI
132 | Supplier#000006907|zeis6gjDRIeBkhAIK,SCDt55y0RAtAg05YY A
133 | Supplier#000006922|293uG72TawGSJ
134 | Supplier#000006932|R4ikTRIOmmuFaC,wGmx6iCQa
135 | Supplier#000006981|M5kfDPiO dfJfqWKrRmEUxFLK8rCLkSQ01Guj
136 | Supplier#000007057|y7FptKFu3YCmDFsZf6b,6xbmWP
137 | Supplier#000007150|FP7U7cbhQEybeMzPjQ2jatyYcp4
138 | Supplier#000007167|O,uP7xO,SCDILkyczq4VaoeotMvBC
139 | Supplier#000007225|F ykA n53QblHRXYMEOZr4WwJBF5T7JFLn
140 | Supplier#000007242|WQMe6sy53jRalzf
141 | Supplier#000007464|Q9WBcTjgT1okf3sTwJGmnYU3QAc
142 | Supplier#000007488|cATpp9zHpsa4Y1wdpbbxHA6tZm,gI
143 | Supplier#000007518|JbzpdeQoBLXf,2MURMK7l,3e
144 | Supplier#000007542|Pz IGM,Qrc5OV0 lETgInzHbIr6Uw
145 | Supplier#000007623|d9cXb8a3pGjdlc,DOkl,I8,aUr
146 | Supplier#000007631|eSImoUDXAQp
147 | Supplier#000007709|SmHoGfrBDxzZR2I9K
148 | Supplier#000007727|p6rs44lEg4AfmlK4oi4fLqEQJLcIfaGvGBy
149 | Supplier#000007853|MK40bQXnu0Z, zDH2q9n0tRlCrTTerJGAWy
150 | Supplier#000007928|nHaEdI 5SQDgEolYw
151 | Supplier#000007935|N2vTacDfwSUl2DP6h0,YyV8
152 | Supplier#000007993|r,FoztdN1 ,YCDgea5o
153 | Supplier#000008001|gGU5ucMbIv44xIiypL5KK9ZDgbMFpS4JmJQn7qg
154 | Supplier#000008125|eKfrrdSQ1g
155 | Supplier#000008172|nkVf4sERrJKgUGkvamajC
156 | Supplier#000008218|7hQ5Yk,vC6a C1Kir7YhdN9E8n2t8K70
157 | Supplier#000008335|dsmwQ616A2Fg7frTRzWtXys54mXmb
158 | Supplier#000008379|GeHYbtj,EQacbKAWsyz7SilH5BJ7cY6Pq
159 | Supplier#000008554|UVtVFbwh7wjCm
160 | Supplier#000008591|3G86tepvfRxn
161 | Supplier#000008766|RhYLwyTlHoNkIB ZqE8M,IBQlU PaLh
162 | Supplier#000008784|BZP73YHtEXj08SHlm5n0XuBYIhR35
163 | Supplier#000008819|c9mTo4u5,PzinQycB4SG0L7ob3 crGkyb0
164 | Supplier#000008871|ek1wFrXzUZd
165 | Supplier#000008898|qkoZBx4m 72,Svpfn8C1a b5bee0wYqUlqv,nl
166 | Supplier#000008927|LgwVct4dJnK59FBF50U4lvb6lNGeO9iD9lHkz
167 | Supplier#000008936|OkNmkz3NUGOKZ7j1uTJkR,
168 | Supplier#000009058|JDi9yv70YQ,5GEB6
169 | Supplier#000009060|4i8KTf6YCwNcKtshi2m52R8NtJYxLpAHlCn
170 | Supplier#000009250|9Zdg4CKQ4,AACYfH
171 | Supplier#000009265|El1z,MYnP5ckp
172 | Supplier#000009270|OVPczHDUsYBQGzLbG4dG3G 6Re9y8TqGyZ8F4
173 | Supplier#000009358|Ds5FfdEuXPXVjjRGeq
174 | Supplier#000009418|chJOHEWwW2Iec5roXAeLiyhe8fKYus
175 | Supplier#000009427|clo4KAvzhCQVT,N
176 | Supplier#000009502|eGKoX8yZf6qHMJdqQ0XRhH8,9gNs3z4
177 | Supplier#000009518|,GTEu,DAA9M8H,IDjKvLha80v
178 | Supplier#000009597|NzBmqXeWCAaQcPnn,nldM5XeD7md6G5qBqGQ
179 | Supplier#000009642|H6Mp3wghJzc61Rb
180 | Supplier#000009663|VNN6byIi5Ad1LPgRo8JcqwLsB8kb6ajig5s
181 | Supplier#000009754|EeJyCPZ5qf0BLodby91xROEJPvfGA7ayI
182 | Supplier#000009818|6jPfNtL1KnMUnRZdI0,TfvsiGmJbSTM4SCzEA3L
183 | Supplier#000009893|b85XhVuxBTTZTychS
184 | Supplier#000009913|Xv9xCmjx5N ms
185 | Supplier#000009979|Ote0EB9LmVAeCZHyK K
186 |
--------------------------------------------------------------------------------