├── .gitignore ├── Makefile ├── .github ├── issue_commands.json ├── dependabot.yml ├── CODEOWNERS └── workflows │ ├── issue_commands.yml │ └── ci.yml ├── response.go ├── CONTRIBUTING.md ├── macros.go ├── mock ├── mock_driver.go ├── sqlmock.go └── csv │ ├── csv_mock.go │ └── csv_data.go ├── README.md ├── errors.go ├── health.go ├── driver-mock.go ├── query_integration_test.go ├── metrics.go ├── driver.go ├── completion.go ├── health_test.go ├── datasource_connect_test.go ├── data-mock.go ├── dataframe_test.go ├── go.mod ├── connector.go ├── test └── driver.go ├── completion_test.go ├── CHANGELOG.md ├── datasource_rowlimit_test.go ├── query.go ├── LICENSE ├── query_test.go ├── datasource_test.go ├── datasource.go └── go.sum /.gitignore: -------------------------------------------------------------------------------- 1 | # Editor 2 | .idea 3 | .vscode 4 | .idea/ 5 | *.iml 6 | 7 | mock-data -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: drone 2 | 3 | drone: 4 | drone lint 5 | drone --server https://drone.grafana.net sign --save grafana/sqlds -------------------------------------------------------------------------------- /.github/issue_commands.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "type": "label", 4 | "name": "type/docs", 5 | "action": "addToProject", 6 | "addToProject": { 7 | "url": "https://github.com/orgs/grafana/projects/69" 8 | } 9 | } 10 | ] 11 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: 'gomod' 4 | directory: '/' 5 | schedule: 6 | interval: 'weekly' 7 | groups: 8 | all-go-dependencies: 9 | patterns: 10 | - '*' 11 | - package-ecosystem: 'github-actions' 12 | directory: '/' 13 | schedule: 14 | interval: 'weekly' 15 | groups: 16 | all-github-action-dependencies: 17 | patterns: 18 | - '*' 19 | -------------------------------------------------------------------------------- /response.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/grafana/grafana-plugin-sdk-go/backend" 7 | ) 8 | 9 | type Response struct { 10 | res *backend.QueryDataResponse 11 | mtx *sync.Mutex 12 | } 13 | 14 | func (r *Response) Set(refID string, res backend.DataResponse) { 15 | r.mtx.Lock() 16 | r.res.Responses[refID] = res 17 | r.mtx.Unlock() 18 | } 19 | 20 | func (r *Response) Response() *backend.QueryDataResponse { 21 | return r.res 22 | } 23 | 24 | func NewResponse(res *backend.QueryDataResponse) *Response { 25 | return &Response{ 26 | res: res, 27 | mtx: &sync.Mutex{}, 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to sqlds 2 | 3 | ## Releasing 4 | 5 | If you want to create a new version of the sqlds for release, follow these steps: 6 | 7 | - Checkout the commit you want to tag (`git checkout `) 8 | - Run `git tag ` (For example **v3.3.0**) 9 | - NOTE: We're using Lightweight Tags, so no other options are required 10 | - Run `git push origin ` 11 | - Verify that the tag was create successfully [here](https://github.com/grafana/sqlds/tags) 12 | - Create a release from the tag on GitHub. 13 | - Use the tag name as title. 14 | - Click on the _Generate release notes_ button. 15 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Lines starting with '#' are comments. 2 | # Each line is a file pattern followed by one or more owners. 3 | 4 | # More details are here: https://help.github.com/articles/about-codeowners/ 5 | 6 | # The '*' pattern is global owners. 7 | 8 | # Order is important. The last matching pattern has the most precedence. 9 | # The folders are ordered as follows: 10 | 11 | # In each subsection folders are ordered first by depth, then alphabetically. 12 | # This should make it easy to add new rules without breaking existing ones. 13 | 14 | * @grafana/aws-datasources 15 | * @grafana/oss-big-tent 16 | * @grafana/partner-datasources 17 | * @grafana/enterprise-datasources 18 | -------------------------------------------------------------------------------- /.github/workflows/issue_commands.yml: -------------------------------------------------------------------------------- 1 | name: Run commands when issues are labeled 2 | permissions: {} 3 | 4 | on: 5 | issues: 6 | types: [labeled] 7 | jobs: 8 | main: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Checkout Actions 12 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 13 | with: 14 | repository: "grafana/grafana-github-actions" 15 | persist-credentials: false 16 | path: ./actions 17 | ref: main 18 | - name: Install Actions 19 | run: npm install --production --prefix ./actions 20 | - name: Run Commands 21 | uses: ./actions/commands 22 | env: 23 | ISSUE_COMMANDS_TOKEN: ${{secrets.ISSUE_COMMANDS_TOKEN}} 24 | with: 25 | token: ${ISSUE_COMMANDS_TOKEN} 26 | configPath: issue_commands 27 | -------------------------------------------------------------------------------- /macros.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" 7 | ) 8 | 9 | var ( 10 | ErrorParsingMacroBrackets = errors.New("failed to parse macro arguments (missing close bracket?)") 11 | ) 12 | 13 | // MacroFunc defines a signature for applying a query macro 14 | // Query macro implementations are defined by users / consumers of this package 15 | // Deprecated: use sqlutil.MacroFunc directly 16 | type MacroFunc = sqlutil.MacroFunc 17 | 18 | // Macros is a list of MacroFuncs. 19 | // The "string" key is the name of the macro function. This name has to be regex friendly. 20 | // Deprecated: use sqlutil.Macros directly 21 | type Macros = sqlutil.Macros 22 | 23 | // Deprecated: use sqlutil.DefaultMacros directly 24 | var DefaultMacros = sqlutil.DefaultMacros 25 | 26 | // Interpolate wraps sqlutil.Interpolate for temporary backwards-compatibility 27 | // Deprecated: use sqlutil.Interpolate directly 28 | func Interpolate(driver Driver, query *Query) (string, error) { 29 | return sqlutil.Interpolate(query, driver.Macros()) 30 | } 31 | -------------------------------------------------------------------------------- /mock/mock_driver.go: -------------------------------------------------------------------------------- 1 | package mock 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "database/sql/driver" 7 | "encoding/json" 8 | "errors" 9 | "sync" 10 | 11 | "github.com/grafana/grafana-plugin-sdk-go/backend" 12 | ) 13 | 14 | var pool *mockDriver 15 | 16 | func RegisterDriver(name string, handler DBHandler) *mockDriver { 17 | pool = &mockDriver{ 18 | conns: make(map[string]*sqlmock), 19 | handler: handler, 20 | } 21 | sql.Register(name, pool) 22 | return pool 23 | } 24 | 25 | type DBHandler interface { 26 | Ping(ctx context.Context) error 27 | Query(args []driver.Value) (driver.Rows, error) 28 | Columns() []string 29 | Next(dest []driver.Value) error 30 | } 31 | 32 | type mockDriver struct { 33 | sync.Mutex 34 | conns map[string]*sqlmock 35 | handler DBHandler 36 | } 37 | 38 | func (d *mockDriver) Open(dsn string) (driver.Conn, error) { 39 | if len(d.conns) == 0 { 40 | mock := &sqlmock{ 41 | drv: d, 42 | } 43 | d.conns = map[string]*sqlmock{ 44 | dsn: mock, 45 | } 46 | } 47 | return d.conns[dsn], nil 48 | } 49 | 50 | func (d *mockDriver) Connect(backend.DataSourceInstanceSettings, json.RawMessage) (db *sql.DB, err error) { 51 | return nil, errors.New("context deadline exceeded") 52 | } 53 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://drone.grafana.net/api/badges/grafana/sqlds/status.svg)](https://drone.grafana.net/grafana/sqlds) 2 | 3 | # sqlds 4 | 5 | `sqlds` stands for `SQL Datasource`. 6 | 7 | Most SQL-driven datasources, like `Postgres`, `MySQL`, and `MSSQL` share extremely similar codebases. 8 | 9 | The `sqlds` package is intended to remove the repetition of these datasources and centralize the datasource logic. The only thing that the datasources themselves should have to define is connecting to the database, and what driver to use, and the plugin frontend. 10 | 11 | **Usage** 12 | 13 | ```go 14 | if err := datasource.Manage("my-datasource", datasourceFactory, datasource.ManageOpts{}); err != nil { 15 | log.DefaultLogger.Error(err.Error()) 16 | os.Exit(1) 17 | } 18 | 19 | func datasourceFactory(ctx context.Context, s backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) { 20 | ds := sqlds.NewDatasource(&myDatasource{}) 21 | return ds.NewDatasource(ctx, s) 22 | } 23 | ``` 24 | 25 | ## Standardization 26 | 27 | ### Macros 28 | 29 | The `sqlds` package formerly defined a set of default macros, but those have been migrated to `grafana-plugin-sdk-go`: see [the code](https://github.com/grafana/grafana-plugin-sdk-go/blob/main/data/sqlutil/macros.go) for details. 30 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: 3 | pull_request: 4 | push: 5 | branches: 6 | - main 7 | permissions: 8 | contents: read 9 | env: 10 | GO_VERSION: 1.24 11 | jobs: 12 | tests: 13 | runs-on: ubuntu-latest 14 | services: 15 | mysql: 16 | image: mysql:9.5@sha256:569c4128dfa625ac2ac62cdd8af588a3a6a60a049d1a8d8f0fac95880ecdbbe5 17 | env: 18 | MYSQL_ALLOW_EMPTY_PASSWORD: yes 19 | MYSQL_DATABASE: mysql 20 | MYSQL_USER: mysql 21 | MYSQL_PASSWORD: mysql 22 | MYSQL_HOST: 127.0.0.1 23 | ports: 24 | - 3306:3306 25 | options: --health-cmd="mysqladmin ping" --health-interval=10s --health-timeout=5s --health-retries=3 26 | steps: 27 | - name: checkout 28 | uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 29 | with: 30 | persist-credentials: false 31 | - name: setup Go environment 32 | uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 33 | with: 34 | go-version: ${{ env.GO_VERSION }} 35 | cache-dependency-path: "**/*.sum" 36 | - name: Test 37 | run: go test -v ./... 38 | - name: Integration tests 39 | run: go test -v ./... 40 | env: 41 | INTEGRATION_TESTS: "true" 42 | MYSQL_URL: "mysql:mysql@tcp(127.0.0.1:3306)/mysql" 43 | -------------------------------------------------------------------------------- /errors.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/grafana/grafana-plugin-sdk-go/backend" 7 | ) 8 | 9 | var ( 10 | // ErrorBadDatasource is returned if the data source could not be asserted to the correct type (this should basically never happen?) 11 | ErrorBadDatasource = errors.New("type assertion to datasource failed") 12 | // ErrorJSON is returned when json.Unmarshal fails 13 | ErrorJSON = errors.New("error unmarshaling query JSON the Query Model") 14 | // ErrorQuery is returned when the query could not complete / execute 15 | ErrorQuery = errors.New("error querying the database") 16 | // ErrorTimeout is returned if the query has timed out 17 | ErrorTimeout = errors.New("query timeout exceeded") 18 | // ErrorNoResults is returned if there were no results returned 19 | ErrorNoResults = errors.New("no results returned from query") 20 | // ErrorRowValidation is returned when SQL rows validation fails (e.g., connection issues, corrupt results) 21 | ErrorRowValidation = errors.New("SQL rows validation failed") 22 | // ErrorConnectionClosed is returned when the database connection is unexpectedly closed 23 | ErrorConnectionClosed = errors.New("database connection closed") 24 | ) 25 | 26 | func ErrorSource(err error) backend.ErrorSource { 27 | if backend.IsDownstreamError(err) { 28 | return backend.ErrorSourceDownstream 29 | } 30 | return backend.ErrorSourcePlugin 31 | } 32 | -------------------------------------------------------------------------------- /health.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/grafana/grafana-plugin-sdk-go/backend" 8 | ) 9 | 10 | type HealthChecker struct { 11 | Connector *Connector 12 | Metrics Metrics 13 | PreCheckHealth func(ctx context.Context, req *backend.CheckHealthRequest) *backend.CheckHealthResult 14 | PostCheckHealth func(ctx context.Context, req *backend.CheckHealthRequest) *backend.CheckHealthResult 15 | } 16 | 17 | func (hc *HealthChecker) Check(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { 18 | start := time.Now() 19 | if hc.PreCheckHealth != nil { 20 | if res := hc.PreCheckHealth(ctx, req); res != nil && res.Status == backend.HealthStatusError { 21 | hc.Metrics.CollectDuration(SourceDownstream, StatusError, time.Since(start).Seconds()) 22 | return res, nil 23 | } 24 | } 25 | if _, err := hc.Connector.Connect(ctx, req.GetHTTPHeaders()); err != nil { 26 | hc.Metrics.CollectDuration(SourceDownstream, StatusError, time.Since(start).Seconds()) 27 | return &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: err.Error()}, nil 28 | } 29 | if hc.PostCheckHealth != nil { 30 | if res := hc.PostCheckHealth(ctx, req); res != nil && res.Status == backend.HealthStatusError { 31 | hc.Metrics.CollectDuration(SourceDownstream, StatusError, time.Since(start).Seconds()) 32 | return res, nil 33 | } 34 | } 35 | hc.Metrics.CollectDuration(SourceDownstream, StatusOK, time.Since(start).Seconds()) 36 | return &backend.CheckHealthResult{Status: backend.HealthStatusOk, Message: "Data source is working"}, nil 37 | } 38 | -------------------------------------------------------------------------------- /mock/sqlmock.go: -------------------------------------------------------------------------------- 1 | package mock 2 | 3 | import ( 4 | "context" 5 | "database/sql/driver" 6 | ) 7 | 8 | type sqlmock struct { 9 | drv *mockDriver 10 | } 11 | 12 | // Begin meets http://golang.org/pkg/database/sql/driver/#Conn interface 13 | func (c *sqlmock) Begin() (driver.Tx, error) { 14 | return c, nil 15 | } 16 | 17 | // Prepare meets http://golang.org/pkg/database/sql/driver/#Conn interface 18 | func (c *sqlmock) Prepare(query string) (driver.Stmt, error) { 19 | return &statement{c, query}, nil 20 | } 21 | 22 | // Prepare meets http://golang.org/pkg/database/sql/driver/#Conn interface 23 | func (c *sqlmock) Commit() error { 24 | return nil 25 | } 26 | 27 | // Prepare meets http://golang.org/pkg/database/sql/driver/#Conn interface 28 | func (c *sqlmock) Rollback() error { 29 | return nil 30 | } 31 | 32 | // Prepare meets http://golang.org/pkg/database/sql/driver/#Conn interface 33 | func (c *sqlmock) Close() error { 34 | return nil 35 | } 36 | 37 | func (c *sqlmock) Ping(ctx context.Context) error { 38 | return c.drv.handler.Ping(ctx) 39 | } 40 | 41 | // statement 42 | type statement struct { 43 | conn *sqlmock 44 | query string 45 | } 46 | 47 | func (stmt *statement) Exec(args []driver.Value) (driver.Result, error) { 48 | return nil, nil 49 | } 50 | 51 | func (stmt *statement) Query(args []driver.Value) (driver.Rows, error) { 52 | if stmt.conn.drv.handler != nil { 53 | return stmt.conn.drv.handler.Query(args) 54 | } 55 | return nil, nil 56 | } 57 | 58 | func (stmt *statement) Close() error { 59 | return nil 60 | } 61 | 62 | func (stmt *statement) NumInput() int { 63 | return -1 64 | } 65 | 66 | type rows struct { 67 | conn *sqlmock 68 | } 69 | 70 | func (r rows) Columns() []string { 71 | if r.conn.drv.handler != nil { 72 | return r.conn.drv.handler.Columns() 73 | } 74 | return []string{} 75 | } 76 | 77 | func (r rows) Close() error { 78 | return nil 79 | } 80 | 81 | func (r rows) Next(dest []driver.Value) error { 82 | if r.conn.drv.handler != nil { 83 | return r.conn.drv.handler.Next(dest) 84 | } 85 | return nil 86 | } 87 | -------------------------------------------------------------------------------- /driver-mock.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "encoding/json" 7 | "errors" 8 | "os" 9 | "path/filepath" 10 | "strings" 11 | "time" 12 | 13 | "github.com/grafana/grafana-plugin-sdk-go/backend" 14 | "github.com/grafana/grafana-plugin-sdk-go/data" 15 | "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" 16 | _ "github.com/mithrandie/csvq-driver" 17 | ) 18 | 19 | // SQLMock connects to a local folder with csv files 20 | type SQLMock struct { 21 | folder string 22 | ShouldFailToConnect bool 23 | } 24 | 25 | func (h *SQLMock) Settings(_ context.Context, _ backend.DataSourceInstanceSettings) DriverSettings { 26 | return DriverSettings{ 27 | FillMode: &data.FillMissing{ 28 | Mode: data.FillModeNull, 29 | }, 30 | Timeout: time.Second * time.Duration(30), 31 | } 32 | } 33 | 34 | // Connect opens a sql.DB connection using datasource settings 35 | func (h *SQLMock) Connect(_ context.Context, _ backend.DataSourceInstanceSettings, msg json.RawMessage) (*sql.DB, error) { 36 | if h.ShouldFailToConnect { 37 | return nil, errors.New("failed to create mock") 38 | } 39 | backend.Logger.Debug("connecting to mock data") 40 | folder := h.folder 41 | if folder == "" { 42 | folder = MockDataFolder 43 | } 44 | if !strings.HasPrefix(folder, "/") { 45 | folder = "/" + folder 46 | } 47 | err := CreateMockTable("users", folder) 48 | if err != nil { 49 | backend.Logger.Error("failed creating mock data: " + err.Error()) 50 | return nil, err 51 | } 52 | ex, err := os.Executable() 53 | if err != nil { 54 | backend.Logger.Error("failed accessing Mock path: " + err.Error()) 55 | } 56 | exPath := filepath.Dir(ex) 57 | db, err := sql.Open("csvq", exPath+folder) 58 | if err != nil { 59 | backend.Logger.Error("failed opening Mock sql: " + err.Error()) 60 | return nil, err 61 | } 62 | err = db.Ping() 63 | if err != nil { 64 | backend.Logger.Error("failed connecting to Mock: " + err.Error()) 65 | } 66 | return db, nil 67 | } 68 | 69 | // Converters defines list of string convertors 70 | func (h *SQLMock) Converters() []sqlutil.Converter { 71 | return []sqlutil.Converter{} 72 | } 73 | 74 | // Macros returns list of macro functions convert the macros of raw query 75 | func (h *SQLMock) Macros() Macros { 76 | return Macros{} 77 | } 78 | -------------------------------------------------------------------------------- /query_integration_test.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "errors" 7 | "os" 8 | "strings" 9 | "testing" 10 | "time" 11 | 12 | "github.com/grafana/grafana-plugin-sdk-go/backend" 13 | "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" 14 | 15 | _ "github.com/go-sql-driver/mysql" 16 | ) 17 | 18 | type testArgs struct { 19 | MySQLURL string 20 | RunIntegrationTests bool 21 | } 22 | 23 | func testEnvArgs(t *testing.T) testArgs { 24 | t.Helper() 25 | var args testArgs 26 | if val, ok := os.LookupEnv("MYSQL_URL"); ok { 27 | args.MySQLURL = val 28 | } else { 29 | args.MySQLURL = "mysql:mysql@/mysql" 30 | } 31 | 32 | if _, ok := os.LookupEnv("INTEGRATION_TESTS"); ok { 33 | args.RunIntegrationTests = true 34 | } 35 | 36 | return args 37 | } 38 | 39 | func TestQuery_MySQL(t *testing.T) { 40 | var ( 41 | args = testEnvArgs(t) 42 | ctx = context.Background() 43 | 44 | db *sql.DB 45 | ) 46 | 47 | if !args.RunIntegrationTests { 48 | t.SkipNow() 49 | } 50 | 51 | ticker := time.NewTicker(time.Second * 5) 52 | defer ticker.Stop() 53 | 54 | // Attempt to connect multiple times because these tests are ran in Drone, where the mysql server may not be immediately available when this test is ran. 55 | limit := 10 56 | for i := 0; i < limit; i++ { 57 | t.Log("Attempting mysql connection...") 58 | d, err := sql.Open("mysql", args.MySQLURL) 59 | if err == nil { 60 | if err := d.Ping(); err == nil { 61 | db = d 62 | break 63 | } 64 | } 65 | 66 | <-ticker.C 67 | } 68 | defer db.Close() 69 | 70 | t.Run("The query should return a context.Canceled if it exceeds the timeout", func(t *testing.T) { 71 | ctx, cancel := context.WithTimeout(ctx, time.Second) 72 | defer cancel() 73 | 74 | q := &Query{ 75 | RawSQL: "SELECT SLEEP(5)", 76 | } 77 | 78 | settings := backend.DataSourceInstanceSettings{ 79 | Name: "foo", 80 | } 81 | 82 | sqlQuery := NewQuery(db, settings, []sqlutil.Converter{}, nil, defaultRowLimit) 83 | _, err := sqlQuery.Run(ctx, q, nil) 84 | if err == nil { 85 | t.Fatal("expected an error but received none") 86 | } 87 | if !(errors.Is(err, context.Canceled) || strings.Contains(err.Error(), "context deadline exceeded")) { 88 | t.Fatal("expected a context.Canceled error but received:", err) 89 | } 90 | }) 91 | } 92 | -------------------------------------------------------------------------------- /metrics.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/grafana/grafana-plugin-sdk-go/backend" 8 | "github.com/prometheus/client_golang/prometheus" 9 | "github.com/prometheus/client_golang/prometheus/promauto" 10 | ) 11 | 12 | type Metrics struct { 13 | DSName string 14 | DSType string 15 | Endpoint Endpoint 16 | } 17 | 18 | type Status string 19 | type Endpoint string 20 | type Source string 21 | 22 | const ( 23 | StatusOK Status = "ok" 24 | StatusError Status = "error" 25 | EndpointHealth Endpoint = "health" 26 | EndpointQuery Endpoint = "query" 27 | SourceDownstream Source = "downstream" 28 | SourcePlugin Source = "plugin" 29 | ) 30 | 31 | var durationMetric = promauto.NewHistogramVec(prometheus.HistogramOpts{ 32 | Namespace: "plugins", 33 | Name: "plugin_request_duration_seconds", 34 | Help: "Duration of plugin execution", 35 | }, []string{"datasource_name", "datasource_type", "source", "endpoint", "status"}) 36 | 37 | func NewMetrics(dsName, dsType string, endpoint Endpoint) Metrics { 38 | dsName, ok := sanitizeLabelName(dsName) 39 | if !ok { 40 | backend.Logger.Warn("Failed to sanitize datasource name for prometheus label", dsName) 41 | } 42 | return Metrics{DSName: dsName, DSType: dsType, Endpoint: endpoint} 43 | } 44 | 45 | func (m *Metrics) WithEndpoint(endpoint Endpoint) Metrics { 46 | return Metrics{DSName: m.DSName, DSType: m.DSType, Endpoint: endpoint} 47 | } 48 | 49 | func (m *Metrics) CollectDuration(source Source, status Status, duration float64) { 50 | durationMetric.WithLabelValues(m.DSName, m.DSType, string(source), string(m.Endpoint), string(status)).Observe(duration) 51 | } 52 | 53 | // sanitizeLabelName removes all invalid chars from the label name. 54 | // If the label name is empty or contains only invalid chars, it will return false indicating it was not sanitized. 55 | // copied from https://github.com/grafana/grafana/blob/main/pkg/infra/metrics/metricutil/utils.go#L14 56 | func sanitizeLabelName(name string) (string, bool) { 57 | if len(name) == 0 { 58 | backend.Logger.Warn(fmt.Sprintf("label name cannot be empty: %s", name)) 59 | return "", false 60 | } 61 | 62 | out := strings.Builder{} 63 | for i, b := range name { 64 | if (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0) { 65 | out.WriteRune(b) 66 | } else if b == ' ' { 67 | out.WriteRune('_') 68 | } 69 | } 70 | 71 | if out.Len() == 0 { 72 | backend.Logger.Warn(fmt.Sprintf("label name only contains invalid chars: %q", name)) 73 | return "", false 74 | } 75 | 76 | return out.String(), true 77 | } 78 | -------------------------------------------------------------------------------- /mock/csv/csv_mock.go: -------------------------------------------------------------------------------- 1 | package csv 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "encoding/json" 7 | "errors" 8 | "os" 9 | "path/filepath" 10 | "strings" 11 | "time" 12 | 13 | "github.com/grafana/grafana-plugin-sdk-go/backend" 14 | "github.com/grafana/grafana-plugin-sdk-go/data" 15 | "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" 16 | "github.com/grafana/sqlds/v5" 17 | _ "github.com/mithrandie/csvq-driver" 18 | ) 19 | 20 | // SQLCSVMock connects to a local folder with csv files 21 | type SQLCSVMock struct { 22 | folder string 23 | } 24 | 25 | func (h *SQLCSVMock) Settings(_ context.Context, _ backend.DataSourceInstanceSettings) sqlds.DriverSettings { 26 | return sqlds.DriverSettings{ 27 | FillMode: &data.FillMissing{ 28 | Mode: data.FillModeNull, 29 | }, 30 | Timeout: time.Second * time.Duration(30), 31 | } 32 | } 33 | 34 | // Connect opens a sql.DB connection using datasource settings 35 | func (h *SQLCSVMock) Connect(_ context.Context, _ backend.DataSourceInstanceSettings, msg json.RawMessage) (*sql.DB, error) { 36 | backend.Logger.Debug("connecting to mock data") 37 | folder := h.folder 38 | if folder == "" { 39 | folder = MockDataFolder 40 | } 41 | if !strings.HasPrefix(folder, "/") { 42 | folder = "/" + folder 43 | } 44 | err := CreateMockTable("users", folder) 45 | if err != nil { 46 | backend.Logger.Error("failed creating mock data: " + err.Error()) 47 | return nil, err 48 | } 49 | ex, err := os.Executable() 50 | if err != nil { 51 | backend.Logger.Error("failed accessing Mock path: " + err.Error()) 52 | } 53 | exPath := filepath.Dir(ex) 54 | db, err := sql.Open("csvq", exPath+folder) 55 | if err != nil { 56 | backend.Logger.Error("failed opening Mock sql: " + err.Error()) 57 | return nil, err 58 | } 59 | err = db.Ping() 60 | if err != nil { 61 | backend.Logger.Error("failed connecting to Mock: " + err.Error()) 62 | } 63 | 64 | timeout := time.Duration(1999) 65 | ctx, cancel := context.WithTimeout(context.Background(), timeout*time.Second) 66 | defer cancel() 67 | 68 | chErr := make(chan error, 1) 69 | go func() { 70 | err = db.PingContext(ctx) 71 | duration := time.Second * 60 72 | time.Sleep(duration) 73 | chErr <- err 74 | }() 75 | 76 | select { 77 | case err := <-chErr: 78 | if err != nil { 79 | // log.DefaultLogger.Error(err.Error()) 80 | return db, err 81 | } 82 | case <-time.After(timeout * time.Second): 83 | return db, errors.New("connection timed out") 84 | } 85 | return db, nil 86 | } 87 | 88 | // Converters defines list of string convertors 89 | func (h *SQLCSVMock) Converters() []sqlutil.Converter { 90 | return []sqlutil.Converter{} 91 | } 92 | 93 | // Macros returns list of macro functions convert the macros of raw query 94 | func (h *SQLCSVMock) Macros() sqlds.Macros { 95 | return sqlds.Macros{} 96 | } 97 | -------------------------------------------------------------------------------- /driver.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "encoding/json" 7 | "net/http" 8 | "time" 9 | 10 | "github.com/grafana/grafana-plugin-sdk-go/backend" 11 | "github.com/grafana/grafana-plugin-sdk-go/data" 12 | "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" 13 | ) 14 | 15 | type DriverSettings struct { 16 | FillMode *data.FillMissing 17 | RetryOn []string 18 | Timeout time.Duration 19 | Retries int 20 | Pause int 21 | ForwardHeaders bool 22 | Errors bool 23 | RowLimit int64 24 | } 25 | 26 | // Driver is a simple interface that defines how to connect to a backend SQL datasource 27 | // Plugin creators will need to implement this in order to create a managed datasource 28 | type Driver interface { 29 | // Connect connects to the database. It does not need to call `db.Ping()` 30 | Connect(context.Context, backend.DataSourceInstanceSettings, json.RawMessage) (*sql.DB, error) 31 | // Settings are read whenever the plugin is initialized, or after the data source settings are updated 32 | Settings(context.Context, backend.DataSourceInstanceSettings) DriverSettings 33 | Macros() Macros 34 | Converters() []sqlutil.Converter 35 | } 36 | 37 | // Connection represents a SQL connection and is satisfied by the *sql.DB type 38 | // For now, we only add the functions that we need / actively use. Some other candidates for future use could include the ExecContext and BeginTxContext functions 39 | type Connection interface { 40 | Close() error 41 | Ping() error 42 | PingContext(ctx context.Context) error 43 | QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) 44 | } 45 | 46 | // QueryDataMutator is an additional interface that could be implemented by driver. 47 | // This adds ability to the driver to optionally mutate the query before it's run 48 | // with the QueryDataRequest. 49 | type QueryDataMutator interface { 50 | MutateQueryData(ctx context.Context, req *backend.QueryDataRequest) (context.Context, *backend.QueryDataRequest) 51 | } 52 | 53 | // CheckHealthMutator is an additional interface that could be implemented by driver. 54 | // This adds ability to the driver to optionally mutate the CheckHealth before it's run 55 | type CheckHealthMutator interface { 56 | MutateCheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (context.Context, *backend.CheckHealthRequest) 57 | } 58 | 59 | // QueryMutator is an additional interface that could be implemented by driver. 60 | // This adds ability to the driver it can mutate query before run. 61 | type QueryMutator interface { 62 | MutateQuery(ctx context.Context, req backend.DataQuery) (context.Context, backend.DataQuery) 63 | } 64 | 65 | // QueryArgSetter is an additional interface that could be implemented by driver. 66 | // This adds the ability to the driver to optionally set query args that are then sent down to the database. 67 | type QueryArgSetter interface { 68 | SetQueryArgs(ctx context.Context, headers http.Header) []interface{} 69 | } 70 | 71 | // ResponseMutator is an additional interface that could be implemented by driver. 72 | // This adds ability to the driver, so it can mutate a response from the driver before its returned to the client. 73 | type ResponseMutator interface { 74 | MutateResponse(ctx context.Context, res data.Frames) (data.Frames, error) 75 | } 76 | 77 | type QueryErrorMutator interface { 78 | MutateQueryError(err error) backend.ErrorWithSource 79 | } 80 | -------------------------------------------------------------------------------- /completion.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "net/http" 9 | 10 | "github.com/grafana/grafana-plugin-sdk-go/backend" 11 | ) 12 | 13 | const ( 14 | schemas = "schemas" 15 | tables = "tables" 16 | columns = "columns" 17 | ) 18 | 19 | var ( 20 | // ErrorNotImplemented is returned if the function is not implemented by the provided Driver (the Completable pointer is nil) 21 | ErrorNotImplemented = errors.New("not implemented") 22 | // ErrorWrongOptions when trying to parse Options with a invalid JSON 23 | ErrorWrongOptions = errors.New("error reading query options") 24 | ) 25 | 26 | // Options are used to query schemas, tables and columns. They will be encoded in the request body (e.g. {"database": "mydb"}) 27 | type Options map[string]string 28 | 29 | // Completable will be used to autocomplete Tables Schemas and Columns for SQL languages 30 | type Completable interface { 31 | Schemas(ctx context.Context, options Options) ([]string, error) 32 | Tables(ctx context.Context, options Options) ([]string, error) 33 | Columns(ctx context.Context, options Options) ([]string, error) 34 | } 35 | 36 | func handleError(rw http.ResponseWriter, err error) { 37 | rw.WriteHeader(http.StatusBadRequest) 38 | _, err = rw.Write([]byte(err.Error())) 39 | if err != nil { 40 | backend.Logger.Error(err.Error()) 41 | } 42 | } 43 | 44 | func sendResourceResponse(rw http.ResponseWriter, res []string) { 45 | rw.Header().Add("Content-Type", "application/json") 46 | if err := json.NewEncoder(rw).Encode(res); err != nil { 47 | handleError(rw, err) 48 | return 49 | } 50 | } 51 | 52 | func (ds *SQLDatasource) getResources(rtype string) func(rw http.ResponseWriter, req *http.Request) { 53 | return func(rw http.ResponseWriter, req *http.Request) { 54 | if ds.Completable == nil { 55 | handleError(rw, ErrorNotImplemented) 56 | return 57 | } 58 | 59 | options := Options{} 60 | if req.Body != nil { 61 | err := json.NewDecoder(req.Body).Decode(&options) 62 | if err != nil { 63 | handleError(rw, err) 64 | return 65 | } 66 | } 67 | 68 | var res []string 69 | var err error 70 | switch rtype { 71 | case schemas: 72 | res, err = ds.Completable.Schemas(req.Context(), options) 73 | case tables: 74 | res, err = ds.Completable.Tables(req.Context(), options) 75 | case columns: 76 | res, err = ds.Completable.Columns(req.Context(), options) 77 | default: 78 | err = fmt.Errorf("unexpected resource type: %s", rtype) 79 | } 80 | if err != nil { 81 | handleError(rw, err) 82 | return 83 | } 84 | 85 | sendResourceResponse(rw, res) 86 | } 87 | } 88 | 89 | func (ds *SQLDatasource) registerRoutes(mux *http.ServeMux) error { 90 | defaultRoutes := map[string]func(http.ResponseWriter, *http.Request){ 91 | "/tables": ds.getResources(tables), 92 | "/schemas": ds.getResources(schemas), 93 | "/columns": ds.getResources(columns), 94 | } 95 | for route, handler := range defaultRoutes { 96 | mux.HandleFunc(route, handler) 97 | } 98 | for route, handler := range ds.CustomRoutes { 99 | if _, ok := defaultRoutes[route]; ok { 100 | return fmt.Errorf("unable to redefine %s, use the Completable interface instead", route) 101 | } 102 | mux.HandleFunc(route, handler) 103 | } 104 | return nil 105 | } 106 | 107 | func ParseOptions(rawOptions json.RawMessage) (Options, error) { 108 | args := Options{} 109 | if rawOptions != nil { 110 | err := json.Unmarshal(rawOptions, &args) 111 | if err != nil { 112 | return nil, fmt.Errorf("%w: %v", ErrorWrongOptions, err) 113 | } 114 | } 115 | return args, nil 116 | } 117 | -------------------------------------------------------------------------------- /health_test.go: -------------------------------------------------------------------------------- 1 | package sqlds_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/grafana/grafana-plugin-sdk-go/backend" 8 | sqlds "github.com/grafana/sqlds/v5" 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | ) 12 | 13 | func getFakeConnector(t *testing.T, shouldFail bool) *sqlds.Connector { 14 | t.Helper() 15 | c, _ := sqlds.NewConnector(context.TODO(), &sqlds.SQLMock{ShouldFailToConnect: shouldFail}, backend.DataSourceInstanceSettings{}, false) 16 | return c 17 | } 18 | 19 | func TestHealthChecker_Check(t *testing.T) { 20 | tests := []struct { 21 | name string 22 | Connector *sqlds.Connector 23 | Metrics sqlds.Metrics 24 | PreCheckHealth func(ctx context.Context, req *backend.CheckHealthRequest) *backend.CheckHealthResult 25 | PostCheckHealth func(ctx context.Context, req *backend.CheckHealthRequest) *backend.CheckHealthResult 26 | ctx context.Context 27 | req *backend.CheckHealthRequest 28 | want *backend.CheckHealthResult 29 | wantErr error 30 | }{ 31 | { 32 | name: "default health check should return valid result", 33 | Connector: getFakeConnector(t, false), 34 | }, 35 | { 36 | name: "should not error when pre check succeed", 37 | Connector: getFakeConnector(t, false), 38 | PreCheckHealth: func(ctx context.Context, req *backend.CheckHealthRequest) *backend.CheckHealthResult { 39 | return &backend.CheckHealthResult{Status: backend.HealthStatusOk} 40 | }, 41 | }, 42 | { 43 | name: "should error when pre check failed", 44 | Connector: getFakeConnector(t, false), 45 | PreCheckHealth: func(ctx context.Context, req *backend.CheckHealthRequest) *backend.CheckHealthResult { 46 | return &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: "unknown error"} 47 | }, 48 | want: &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: "unknown error"}, 49 | }, 50 | { 51 | name: "should return actual error when pre and post health check succeed but actual connect failed", 52 | Connector: getFakeConnector(t, true), 53 | PreCheckHealth: func(ctx context.Context, req *backend.CheckHealthRequest) *backend.CheckHealthResult { 54 | return &backend.CheckHealthResult{Status: backend.HealthStatusOk} 55 | }, 56 | PostCheckHealth: func(ctx context.Context, req *backend.CheckHealthRequest) *backend.CheckHealthResult { 57 | return &backend.CheckHealthResult{Status: backend.HealthStatusOk} 58 | }, 59 | want: &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: "unable to get default db connection"}, 60 | }, 61 | { 62 | name: "should not error when post check succeed", 63 | Connector: getFakeConnector(t, false), 64 | PostCheckHealth: func(ctx context.Context, req *backend.CheckHealthRequest) *backend.CheckHealthResult { 65 | return &backend.CheckHealthResult{Status: backend.HealthStatusOk} 66 | }, 67 | }, 68 | { 69 | name: "should error when post check failed", 70 | Connector: getFakeConnector(t, false), 71 | PostCheckHealth: func(ctx context.Context, req *backend.CheckHealthRequest) *backend.CheckHealthResult { 72 | return &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: "unknown error"} 73 | }, 74 | want: &backend.CheckHealthResult{Status: backend.HealthStatusError, Message: "unknown error"}, 75 | }, 76 | } 77 | for _, tt := range tests { 78 | t.Run(tt.name, func(t *testing.T) { 79 | connector := tt.Connector 80 | if connector == nil { 81 | connector = &sqlds.Connector{} 82 | } 83 | req := tt.req 84 | if req == nil { 85 | req = &backend.CheckHealthRequest{} 86 | } 87 | want := tt.want 88 | if want == nil { 89 | want = &backend.CheckHealthResult{Status: backend.HealthStatusOk, Message: "Data source is working"} 90 | } 91 | ctx := tt.ctx 92 | if ctx == nil { 93 | ctx = context.Background() 94 | } 95 | hc := &sqlds.HealthChecker{ 96 | Connector: connector, 97 | Metrics: tt.Metrics, 98 | PreCheckHealth: tt.PreCheckHealth, 99 | PostCheckHealth: tt.PostCheckHealth, 100 | } 101 | got, err := hc.Check(ctx, req) 102 | if tt.wantErr != nil { 103 | require.NotNil(t, err) 104 | assert.Equal(t, tt.wantErr.Error(), err.Error()) 105 | return 106 | } 107 | require.Nil(t, err) 108 | require.NotNil(t, got) 109 | assert.Equal(t, want.Message, got.Message) 110 | assert.Equal(t, want.Status, got.Status) 111 | }) 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /datasource_connect_test.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "database/sql/driver" 7 | "encoding/json" 8 | "errors" 9 | "testing" 10 | 11 | "github.com/grafana/grafana-plugin-sdk-go/backend" 12 | "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" 13 | ) 14 | 15 | type fakeDriver struct { 16 | openDBfn func(msg json.RawMessage) (*sql.DB, error) 17 | 18 | Driver 19 | } 20 | 21 | func (d fakeDriver) Connect(_ context.Context, _ backend.DataSourceInstanceSettings, msg json.RawMessage) (db *sql.DB, err error) { 22 | return d.openDBfn(msg) 23 | } 24 | 25 | func (d fakeDriver) Macros() Macros { 26 | return Macros{} 27 | } 28 | 29 | func (d fakeDriver) Converters() []sqlutil.Converter { 30 | return []sqlutil.Converter{} 31 | } 32 | 33 | type fakeSQLConnector struct{} 34 | 35 | func (f fakeSQLConnector) Connect(_ context.Context) (driver.Conn, error) { 36 | return nil, nil 37 | } 38 | 39 | func (f fakeSQLConnector) Driver() driver.Driver { 40 | return nil 41 | } 42 | 43 | func Test_getDBConnectionFromQuery(t *testing.T) { 44 | db := &sql.DB{} 45 | db2 := &sql.DB{} 46 | db3 := &sql.DB{} 47 | d := &fakeDriver{openDBfn: func(msg json.RawMessage) (*sql.DB, error) { return db3, nil }} 48 | tests := []struct { 49 | existingDB *sql.DB 50 | expectedDB *sql.DB 51 | desc string 52 | dsUID string 53 | args string 54 | expectedKey string 55 | }{ 56 | { 57 | desc: "it should return the default db with no args", 58 | dsUID: "uid1", 59 | args: "", 60 | expectedKey: "uid1-default", 61 | expectedDB: db, 62 | }, 63 | { 64 | desc: "it should return the cached connection for the given args", 65 | dsUID: "uid1", 66 | args: "foo", 67 | expectedKey: "uid1-2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", 68 | existingDB: db2, 69 | expectedDB: db2, 70 | }, 71 | { 72 | desc: "it should create a new connection with the given args", 73 | dsUID: "uid1", 74 | args: "foo", 75 | expectedKey: "uid1-2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", 76 | expectedDB: db3, 77 | }, 78 | } 79 | for _, tt := range tests { 80 | t.Run(tt.desc, func(t *testing.T) { 81 | conn := &Connector{UID: tt.dsUID, driver: d, enableMultipleConnections: true, driverSettings: DriverSettings{}} 82 | settings := backend.DataSourceInstanceSettings{UID: tt.dsUID} 83 | key := defaultKey(tt.dsUID) 84 | // Add the mandatory default db 85 | conn.storeDBConnection(key, dbConnection{db, settings}) 86 | if tt.existingDB != nil { 87 | key = keyWithConnectionArgs(tt.dsUID, []byte(tt.args)) 88 | conn.storeDBConnection(key, dbConnection{tt.existingDB, settings}) 89 | } 90 | 91 | key, dbConn, err := conn.GetConnectionFromQuery(context.Background(), &Query{ConnectionArgs: json.RawMessage(tt.args)}) 92 | if err != nil { 93 | t.Fatalf("unexpected error %v", err) 94 | } 95 | if key != tt.expectedKey { 96 | t.Fatalf("unexpected cache key %s", key) 97 | } 98 | if dbConn.db != tt.expectedDB { 99 | t.Fatalf("unexpected result %v", dbConn.db) 100 | } 101 | }) 102 | } 103 | 104 | t.Run("it should return an error if connection args are used without enabling multiple connections", func(t *testing.T) { 105 | conn := &Connector{driver: d, enableMultipleConnections: false} 106 | _, _, err := conn.GetConnectionFromQuery(context.Background(), &Query{ConnectionArgs: json.RawMessage("foo")}) 107 | if err == nil || !errors.Is(err, MissingMultipleConnectionsConfig) { 108 | t.Errorf("expecting error: %v", MissingMultipleConnectionsConfig) 109 | } 110 | }) 111 | 112 | t.Run("it should return an error if the default connection is missing", func(t *testing.T) { 113 | conn := &Connector{driver: d} 114 | _, _, err := conn.GetConnectionFromQuery(context.Background(), &Query{}) 115 | if err == nil || !errors.Is(err, MissingDBConnection) { 116 | t.Errorf("expecting error: %v", MissingDBConnection) 117 | } 118 | }) 119 | } 120 | 121 | func Test_Dispose(t *testing.T) { 122 | t.Run("it should close connections", func(t *testing.T) { 123 | db := sql.OpenDB(fakeSQLConnector{}) 124 | d := &fakeDriver{openDBfn: func(msg json.RawMessage) (*sql.DB, error) { return db, nil }} 125 | conn := &Connector{driver: d} 126 | ds := &SQLDatasource{connector: conn} 127 | conn.connections.Store(defaultKey("uid1"), dbConnection{db: db}) 128 | conn.connections.Store("foo", dbConnection{db: db}) 129 | ds.Dispose() 130 | count := 0 131 | conn.connections.Range(func(key, value interface{}) bool { 132 | count++ 133 | return true 134 | }) 135 | if count != 0 { 136 | t.Errorf("did not close all connections") 137 | } 138 | }) 139 | } 140 | -------------------------------------------------------------------------------- /data-mock.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "errors" 5 | "io/fs" 6 | "os" 7 | "path/filepath" 8 | 9 | "github.com/grafana/grafana-plugin-sdk-go/backend" 10 | ) 11 | 12 | // we could add more "tables" later 13 | var mockTables = map[string]string{ 14 | "users": users, 15 | } 16 | 17 | // MockDataFolder is the default folder that will contain data files 18 | const MockDataFolder = "/mock-data" 19 | 20 | // Create will create a "table" (csv file) in the data folder that can be queried with SQL 21 | func CreateMockTable(table string, folder string) error { 22 | return CreateMockData(table, folder, mockTables[table]) 23 | } 24 | 25 | // CreateData will create a "table" (csv file) in the data folder that can be queried with SQL 26 | func CreateMockData(table string, folder string, csvData string) error { 27 | if folder == "" { 28 | folder = MockDataFolder 29 | } 30 | ex, err := os.Executable() 31 | if err != nil { 32 | backend.Logger.Error("failed getting to Hana Mock path: " + err.Error()) 33 | return err 34 | } 35 | exPath := filepath.Dir(ex) 36 | if _, err := os.Stat(exPath + folder); errors.Is(err, fs.ErrNotExist) { 37 | if err := os.Mkdir(exPath+folder, 0700); err != nil { 38 | backend.Logger.Error("failed creating mock folder: " + err.Error()) 39 | return err 40 | } 41 | } 42 | tablePath := exPath + folder + "/" + table 43 | _, err = os.Stat(tablePath) 44 | if err != nil { 45 | if errors.Is(err, fs.ErrNotExist) { 46 | if err := os.WriteFile(tablePath, []byte(csvData), 0700); err != nil { 47 | backend.Logger.Error("failed writing mock data: " + err.Error()) 48 | return err 49 | } 50 | return nil 51 | } 52 | return err 53 | } 54 | 55 | return nil 56 | } 57 | 58 | const users = `id,first_name,last_name,gender,country_code 59 | 1,Louis,Washington,Male,PS 60 | 2,Sean,Burton,Male,SE 61 | 3,Mildred,Gonzales,Female,ID 62 | 4,Kathy,Dunn,Female,PE 63 | 5,Brian,Fernandez,Male,ID 64 | 6,Aaron,Alvarez,Male,TN 65 | 7,Theresa,King,Female,PH 66 | 8,Catherine,Greene,Female,ID 67 | 9,Barbara,Sanders,Female,US 68 | 10,John,Garrett,Male,MT 69 | 11,Jerry,Tucker,Male,GT 70 | 12,James,Marshall,Male,PL 71 | 13,Cheryl,Perry,Female,CN 72 | 14,Gregory,Jones,Male,CA 73 | 15,Julie,Olson,Female,JP 74 | 16,Raymond,King,Male,AZ 75 | 17,Christina,Wagner,Female,AR 76 | 18,Evelyn,Harvey,Female,RU 77 | 19,Earl,Stewart,Male,ID 78 | 20,Jerry,Kelley,Male,RU 79 | 21,Russell,Ruiz,Male,ID 80 | 22,Rachel,Reynolds,Female,FR 81 | 23,Anne,Richards,Female,PA 82 | 24,Jimmy,Hudson,Male,GR 83 | 25,Brandon,Ward,Male,BR 84 | 26,Ruby,Stevens,Female,TH 85 | 27,Paula,Jordan,Female,ID 86 | 28,Jessica,Hayes,Female,CN 87 | 29,Kimberly,Butler,Female,AD 88 | 30,Jacqueline,Lee,Female,CN 89 | 31,Heather,Lopez,Female,ID 90 | 32,Cheryl,Burke,Female,AR 91 | 33,Sarah,Ryan,Female,CN 92 | 34,Donna,Kelly,Female,ID 93 | 35,Norma,Davis,Female,ID 94 | 36,Jack,Anderson,Male,CN 95 | 37,Albert,Gibson,Male,PH 96 | 38,Victor,Hayes,Male,SN 97 | 39,Mary,Lynch,Female,MN 98 | 40,Elizabeth,Fernandez,Female,PL 99 | 41,Brenda,Shaw,Female,GR 100 | 42,Jacqueline,Hernandez,Female,RU 101 | 43,Sarah,King,Female,PT 102 | 44,Christine,Nguyen,Female,MC 103 | 45,Johnny,Woods,Male,CN 104 | 46,Dennis,Thompson,Male,RU 105 | 47,Diana,Brooks,Female,CO 106 | 48,Wayne,Morales,Male,CR 107 | 49,Arthur,Howard,Male,PE 108 | 50,Earl,Daniels,Male,ID 109 | 51,Martin,Gonzales,Male,PL 110 | 52,Annie,Palmer,Female,PK 111 | 53,Rose,Griffin,Female,MN 112 | 54,Ruth,Garza,Female,TH 113 | 55,Gerald,Marshall,Male,CZ 114 | 56,Julie,Mills,Female,FI 115 | 57,Julia,Fowler,Female,PS 116 | 58,Bonnie,Dixon,Female,CN 117 | 59,Adam,Mendoza,Male,FR 118 | 60,Brian,Bailey,Male,HR 119 | 61,Linda,Hansen,Female,PL 120 | 62,Edward,Gordon,Male,JP 121 | 63,Pamela,Hill,Female,LV 122 | 64,Ruth,Gibson,Female,YE 123 | 65,John,Reynolds,Male,CN 124 | 66,Nancy,Berry,Female,PH 125 | 67,Kevin,Kelly,Male,PL 126 | 68,Sean,Williamson,Male,PH 127 | 69,Jeremy,Rogers,Male,CN 128 | 70,Emily,Carroll,Female,ME 129 | 71,Antonio,Torres,Male,RU 130 | 72,Willie,Barnes,Male,ID 131 | 73,Margaret,Lewis,Female,PL 132 | 74,Douglas,Dunn,Male,IL 133 | 75,Lois,Cruz,Female,LY 134 | 76,Lori,Reynolds,Female,CG 135 | 77,Debra,Ray,Female,CZ 136 | 78,Brandon,Garza,Male,MN 137 | 79,Norma,Smith,Female,MX 138 | 80,Jennifer,Murray,Female,PT 139 | 81,Howard,Chapman,Male,CN 140 | 82,Diane,Cox,Female,IE 141 | 83,Victor,Martinez,Male,ID 142 | 84,Sara,Olson,Female,CN 143 | 85,Joyce,Snyder,Female,CA 144 | 86,Bruce,Price,Male,KI 145 | 87,Ryan,Lane,Male,IR 146 | 88,Jean,Richards,Female,RU 147 | 89,Nicholas,Carpenter,Male,CI 148 | 90,Richard,Burke,Male,AR 149 | 91,Harry,Young,Male,GT 150 | 92,Walter,Boyd,Male,PH 151 | 93,Roy,Cox,Male,BR 152 | 94,Judy,Myers,Female,CN 153 | 95,Jason,Alexander,Male,RU 154 | 96,Henry,James,Male,PT 155 | 97,George,Hawkins,Male,IR 156 | 98,Irene,Chavez,Female,FR 157 | 99,Willie,Alvarez,Male,ID 158 | 100,Evelyn,Kennedy,Female,HT` 159 | -------------------------------------------------------------------------------- /mock/csv/csv_data.go: -------------------------------------------------------------------------------- 1 | package csv 2 | 3 | import ( 4 | "errors" 5 | "io/fs" 6 | "os" 7 | "path/filepath" 8 | 9 | "github.com/grafana/grafana-plugin-sdk-go/backend" 10 | ) 11 | 12 | // we could add more "tables" later 13 | var mockTables = map[string]string{ 14 | "users": users, 15 | } 16 | 17 | // MockDataFolder is the default folder that will contain data files 18 | const MockDataFolder = "/mock-data" 19 | 20 | // Create will create a "table" (csv file) in the data folder that can be queried with SQL 21 | func CreateMockTable(table string, folder string) error { 22 | return CreateMockData(table, folder, mockTables[table]) 23 | } 24 | 25 | // CreateData will create a "table" (csv file) in the data folder that can be queried with SQL 26 | func CreateMockData(table string, folder string, csvData string) error { 27 | if folder == "" { 28 | folder = MockDataFolder 29 | } 30 | ex, err := os.Executable() 31 | if err != nil { 32 | backend.Logger.Error("failed getting to Hana Mock path: " + err.Error()) 33 | return err 34 | } 35 | exPath := filepath.Dir(ex) 36 | if _, err := os.Stat(exPath + folder); errors.Is(err, fs.ErrNotExist) { 37 | if err := os.Mkdir(exPath+folder, 0700); err != nil { 38 | backend.Logger.Error("failed creating mock folder: " + err.Error()) 39 | return err 40 | } 41 | } 42 | tablePath := exPath + folder + "/" + table 43 | _, err = os.Stat(tablePath) 44 | if err != nil { 45 | if errors.Is(err, fs.ErrNotExist) { 46 | if err := os.WriteFile(tablePath, []byte(csvData), 0700); err != nil { 47 | backend.Logger.Error("failed writing mock data: " + err.Error()) 48 | return err 49 | } 50 | return nil 51 | } 52 | return err 53 | } 54 | 55 | return nil 56 | } 57 | 58 | const users = `id,first_name,last_name,gender,country_code 59 | 1,Louis,Washington,Male,PS 60 | 2,Sean,Burton,Male,SE 61 | 3,Mildred,Gonzales,Female,ID 62 | 4,Kathy,Dunn,Female,PE 63 | 5,Brian,Fernandez,Male,ID 64 | 6,Aaron,Alvarez,Male,TN 65 | 7,Theresa,King,Female,PH 66 | 8,Catherine,Greene,Female,ID 67 | 9,Barbara,Sanders,Female,US 68 | 10,John,Garrett,Male,MT 69 | 11,Jerry,Tucker,Male,GT 70 | 12,James,Marshall,Male,PL 71 | 13,Cheryl,Perry,Female,CN 72 | 14,Gregory,Jones,Male,CA 73 | 15,Julie,Olson,Female,JP 74 | 16,Raymond,King,Male,AZ 75 | 17,Christina,Wagner,Female,AR 76 | 18,Evelyn,Harvey,Female,RU 77 | 19,Earl,Stewart,Male,ID 78 | 20,Jerry,Kelley,Male,RU 79 | 21,Russell,Ruiz,Male,ID 80 | 22,Rachel,Reynolds,Female,FR 81 | 23,Anne,Richards,Female,PA 82 | 24,Jimmy,Hudson,Male,GR 83 | 25,Brandon,Ward,Male,BR 84 | 26,Ruby,Stevens,Female,TH 85 | 27,Paula,Jordan,Female,ID 86 | 28,Jessica,Hayes,Female,CN 87 | 29,Kimberly,Butler,Female,AD 88 | 30,Jacqueline,Lee,Female,CN 89 | 31,Heather,Lopez,Female,ID 90 | 32,Cheryl,Burke,Female,AR 91 | 33,Sarah,Ryan,Female,CN 92 | 34,Donna,Kelly,Female,ID 93 | 35,Norma,Davis,Female,ID 94 | 36,Jack,Anderson,Male,CN 95 | 37,Albert,Gibson,Male,PH 96 | 38,Victor,Hayes,Male,SN 97 | 39,Mary,Lynch,Female,MN 98 | 40,Elizabeth,Fernandez,Female,PL 99 | 41,Brenda,Shaw,Female,GR 100 | 42,Jacqueline,Hernandez,Female,RU 101 | 43,Sarah,King,Female,PT 102 | 44,Christine,Nguyen,Female,MC 103 | 45,Johnny,Woods,Male,CN 104 | 46,Dennis,Thompson,Male,RU 105 | 47,Diana,Brooks,Female,CO 106 | 48,Wayne,Morales,Male,CR 107 | 49,Arthur,Howard,Male,PE 108 | 50,Earl,Daniels,Male,ID 109 | 51,Martin,Gonzales,Male,PL 110 | 52,Annie,Palmer,Female,PK 111 | 53,Rose,Griffin,Female,MN 112 | 54,Ruth,Garza,Female,TH 113 | 55,Gerald,Marshall,Male,CZ 114 | 56,Julie,Mills,Female,FI 115 | 57,Julia,Fowler,Female,PS 116 | 58,Bonnie,Dixon,Female,CN 117 | 59,Adam,Mendoza,Male,FR 118 | 60,Brian,Bailey,Male,HR 119 | 61,Linda,Hansen,Female,PL 120 | 62,Edward,Gordon,Male,JP 121 | 63,Pamela,Hill,Female,LV 122 | 64,Ruth,Gibson,Female,YE 123 | 65,John,Reynolds,Male,CN 124 | 66,Nancy,Berry,Female,PH 125 | 67,Kevin,Kelly,Male,PL 126 | 68,Sean,Williamson,Male,PH 127 | 69,Jeremy,Rogers,Male,CN 128 | 70,Emily,Carroll,Female,ME 129 | 71,Antonio,Torres,Male,RU 130 | 72,Willie,Barnes,Male,ID 131 | 73,Margaret,Lewis,Female,PL 132 | 74,Douglas,Dunn,Male,IL 133 | 75,Lois,Cruz,Female,LY 134 | 76,Lori,Reynolds,Female,CG 135 | 77,Debra,Ray,Female,CZ 136 | 78,Brandon,Garza,Male,MN 137 | 79,Norma,Smith,Female,MX 138 | 80,Jennifer,Murray,Female,PT 139 | 81,Howard,Chapman,Male,CN 140 | 82,Diane,Cox,Female,IE 141 | 83,Victor,Martinez,Male,ID 142 | 84,Sara,Olson,Female,CN 143 | 85,Joyce,Snyder,Female,CA 144 | 86,Bruce,Price,Male,KI 145 | 87,Ryan,Lane,Male,IR 146 | 88,Jean,Richards,Female,RU 147 | 89,Nicholas,Carpenter,Male,CI 148 | 90,Richard,Burke,Male,AR 149 | 91,Harry,Young,Male,GT 150 | 92,Walter,Boyd,Male,PH 151 | 93,Roy,Cox,Male,BR 152 | 94,Judy,Myers,Female,CN 153 | 95,Jason,Alexander,Male,RU 154 | 96,Henry,James,Male,PT 155 | 97,George,Hawkins,Male,IR 156 | 98,Irene,Chavez,Female,FR 157 | 99,Willie,Alvarez,Male,ID 158 | 100,Evelyn,Kennedy,Female,HT` 159 | -------------------------------------------------------------------------------- /dataframe_test.go: -------------------------------------------------------------------------------- 1 | package sqlds_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "github.com/grafana/grafana-plugin-sdk-go/backend" 10 | "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" 11 | "github.com/grafana/sqlds/v5" 12 | "github.com/grafana/sqlds/v5/test" 13 | "github.com/stretchr/testify/require" 14 | ) 15 | 16 | // we test how no-rows sql responses are converted to dataframes 17 | func TestNoRowsFrame(t *testing.T) { 18 | 19 | tts := []struct { 20 | name string 21 | data test.Data 22 | format sqlutil.FormatQueryOption 23 | expectedFieldCount int 24 | }{ 25 | { 26 | name: "empty table", 27 | format: sqlutil.FormatOptionTable, 28 | data: test.Data{ 29 | Cols: []test.Column{ 30 | { 31 | Name: "name", 32 | DataType: "TEXT", 33 | Kind: "", 34 | }, 35 | { 36 | Name: "age", 37 | DataType: "INTEGER", 38 | Kind: int64(0), 39 | }, 40 | }, 41 | Rows: [][]any{}, 42 | }, 43 | expectedFieldCount: 2, 44 | }, 45 | { 46 | name: "empty wide", 47 | format: sqlutil.FormatOptionTimeSeries, 48 | data: test.Data{ 49 | Cols: []test.Column{ 50 | { 51 | Name: "time", 52 | DataType: "TIMESTAMP", 53 | Kind: time.Unix(0, 0), 54 | }, 55 | { 56 | Name: "v1", 57 | DataType: "FLOAT", 58 | Kind: float64(0), 59 | }, 60 | { 61 | Name: "v2", 62 | DataType: "FLOAT", 63 | Kind: float64(0), 64 | }, 65 | }, 66 | Rows: [][]any{}, 67 | }, 68 | expectedFieldCount: 0, 69 | }, 70 | { 71 | name: "empty long", 72 | format: sqlutil.FormatOptionTimeSeries, 73 | data: test.Data{ 74 | Cols: []test.Column{ 75 | { 76 | Name: "time", 77 | DataType: "TIMESTAMP", 78 | Kind: time.Unix(0, 0), 79 | }, 80 | { 81 | Name: "tag", 82 | DataType: "TEXT", 83 | Kind: "", 84 | }, 85 | { 86 | Name: "value", 87 | DataType: "FLOAT", 88 | Kind: float64(0), 89 | }, 90 | }, 91 | Rows: [][]any{}, 92 | }, 93 | expectedFieldCount: 0, 94 | }, 95 | { 96 | name: "empty multi", 97 | format: sqlutil.FormatOptionMulti, 98 | data: test.Data{ 99 | Cols: []test.Column{ 100 | { 101 | Name: "time", 102 | DataType: "TIMESTAMP", 103 | Kind: time.Unix(0, 0), 104 | }, 105 | { 106 | Name: "tag", 107 | DataType: "TEXT", 108 | Kind: "", 109 | }, 110 | { 111 | Name: "value", 112 | DataType: "FLOAT", 113 | Kind: float64(0), 114 | }, 115 | }, 116 | Rows: [][]any{}, 117 | }, 118 | expectedFieldCount: 0, 119 | }, 120 | { 121 | name: "logs", 122 | format: sqlutil.FormatOptionLogs, 123 | data: test.Data{ 124 | Cols: []test.Column{ 125 | { 126 | Name: "time", 127 | DataType: "TIMESTAMP", 128 | Kind: time.Unix(0, 0), 129 | }, 130 | { 131 | Name: "text", 132 | DataType: "TEXT", 133 | Kind: "", 134 | }, 135 | }, 136 | Rows: [][]any{}, 137 | }, 138 | expectedFieldCount: 2, 139 | }, 140 | { 141 | name: "trace", 142 | format: sqlutil.FormatOptionLogs, 143 | data: test.Data{ 144 | Cols: []test.Column{ 145 | { 146 | Name: "time", 147 | DataType: "TIMESTAMP", 148 | Kind: time.Unix(0, 0), 149 | }, 150 | // FIXME: i do not know what kind of data is in trace-frames 151 | }, 152 | Rows: [][]any{}, 153 | }, 154 | expectedFieldCount: 1, 155 | }, 156 | } 157 | 158 | for _, tt := range tts { 159 | t.Run(tt.name, func(t *testing.T) { 160 | id := "empty-frames" + tt.name 161 | driver, _ := test.NewDriver(id, tt.data, nil, test.DriverOpts{}, nil) 162 | ds := sqlds.NewDatasource(driver) 163 | 164 | settings := backend.DataSourceInstanceSettings{UID: id, JSONData: []byte("{}")} 165 | _, err := ds.NewDatasource(context.Background(), settings) 166 | 167 | require.NoError(t, err) 168 | 169 | req := backend.QueryDataRequest{ 170 | PluginContext: backend.PluginContext{ 171 | DataSourceInstanceSettings: &settings, 172 | }, 173 | Queries: []backend.DataQuery{ 174 | { 175 | RefID: "A", 176 | JSON: []byte(fmt.Sprintf(`{ "rawSql": "SELECT 42", "format": %d }`, tt.format)), 177 | }, 178 | }, 179 | } 180 | 181 | r, err := ds.QueryData(context.Background(), &req) 182 | require.NoError(t, err) 183 | d := r.Responses["A"] 184 | require.NotNil(t, d) 185 | require.NoError(t, d.Error) 186 | require.Len(t, d.Frames, 1) 187 | require.Len(t, d.Frames[0].Fields, tt.expectedFieldCount) 188 | 189 | }) 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/grafana/sqlds/v5 2 | 3 | go 1.24.6 4 | 5 | require ( 6 | github.com/go-sql-driver/mysql v1.9.3 7 | github.com/google/go-cmp v0.7.0 8 | github.com/grafana/dataplane/sdata v0.0.9 9 | github.com/grafana/grafana-plugin-sdk-go v0.284.0 10 | github.com/mithrandie/csvq-driver v1.7.0 11 | github.com/prometheus/client_golang v1.23.2 12 | github.com/stretchr/testify v1.11.1 13 | ) 14 | 15 | require ( 16 | github.com/go-logr/logr v1.4.3 // indirect 17 | github.com/go-logr/stdr v1.2.2 // indirect 18 | github.com/goccy/go-json v0.10.5 // indirect 19 | github.com/gogo/protobuf v1.3.2 // indirect 20 | github.com/google/uuid v1.6.0 // indirect 21 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect 22 | github.com/klauspost/cpuid/v2 v2.3.0 // indirect 23 | github.com/zeebo/xxh3 v1.0.2 // indirect 24 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect 25 | go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 // indirect 26 | go.opentelemetry.io/contrib/propagators/jaeger v1.38.0 // indirect 27 | go.opentelemetry.io/contrib/samplers/jaegerremote v0.32.0 // indirect 28 | go.opentelemetry.io/otel v1.38.0 // indirect 29 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect 30 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect 31 | go.opentelemetry.io/otel/metric v1.38.0 // indirect 32 | go.opentelemetry.io/otel/sdk v1.38.0 // indirect 33 | go.opentelemetry.io/otel/trace v1.38.0 // indirect 34 | go.opentelemetry.io/proto/otlp v1.7.1 // indirect 35 | golang.org/x/mod v0.29.0 // indirect 36 | golang.org/x/tools v0.38.0 // indirect 37 | google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect 38 | google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 // indirect 39 | ) 40 | 41 | require ( 42 | filippo.io/edwards25519 v1.1.0 // indirect 43 | github.com/apache/arrow-go/v18 v18.4.1 // indirect 44 | github.com/beorn7/perks v1.0.1 // indirect 45 | github.com/cenkalti/backoff/v5 v5.0.3 // indirect 46 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 47 | github.com/cheekybits/genny v1.0.0 // indirect 48 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 49 | github.com/fatih/color v1.15.0 // indirect 50 | github.com/gogo/googleapis v1.4.1 // indirect 51 | github.com/golang/protobuf v1.5.4 // indirect 52 | github.com/google/flatbuffers v25.2.10+incompatible // indirect 53 | github.com/grafana/otel-profiling-go v0.5.1 // indirect 54 | github.com/grafana/pyroscope-go/godeltaprof v0.1.9 // indirect 55 | github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 // indirect 56 | github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 // indirect 57 | github.com/hashicorp/go-hclog v1.6.3 // indirect 58 | github.com/hashicorp/go-plugin v1.7.0 // indirect 59 | github.com/hashicorp/yamux v0.1.2 // indirect 60 | github.com/jaegertracing/jaeger-idl v0.5.0 // indirect 61 | github.com/json-iterator/go v1.1.12 // indirect 62 | github.com/jszwedko/go-datemath v0.1.1-0.20230526204004-640a500621d6 // indirect 63 | github.com/klauspost/compress v1.18.0 // indirect 64 | github.com/mattetti/filebuffer v1.0.1 // indirect 65 | github.com/mattn/go-colorable v0.1.13 // indirect 66 | github.com/mattn/go-isatty v0.0.20 // indirect 67 | github.com/mattn/go-runewidth v0.0.16 // indirect 68 | github.com/mitchellh/go-homedir v1.1.0 // indirect 69 | github.com/mithrandie/csvq v1.18.1 // indirect 70 | github.com/mithrandie/go-file/v2 v2.1.0 // indirect 71 | github.com/mithrandie/go-text v1.6.0 // indirect 72 | github.com/mithrandie/ternary v1.1.1 // indirect 73 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 74 | github.com/modern-go/reflect2 v1.0.2 // indirect 75 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 76 | github.com/oklog/run v1.1.0 // indirect 77 | github.com/olekukonko/tablewriter v0.0.5 // indirect 78 | github.com/patrickmn/go-cache v2.1.0+incompatible // indirect 79 | github.com/pierrec/lz4/v4 v4.1.22 // indirect 80 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 81 | github.com/prometheus/client_model v0.6.2 // indirect 82 | github.com/prometheus/common v0.67.2 // indirect 83 | github.com/prometheus/procfs v0.16.1 // indirect 84 | github.com/rivo/uniseg v0.4.7 // indirect 85 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect 86 | go.yaml.in/yaml/v2 v2.4.3 // indirect 87 | golang.org/x/crypto v0.45.0 // indirect 88 | golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 // indirect 89 | golang.org/x/net v0.47.0 // indirect 90 | golang.org/x/sync v0.18.0 // indirect 91 | golang.org/x/sys v0.38.0 // indirect 92 | golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 // indirect 93 | golang.org/x/term v0.37.0 // indirect 94 | golang.org/x/text v0.31.0 // indirect 95 | golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect 96 | google.golang.org/grpc v1.76.0 // indirect 97 | google.golang.org/protobuf v1.36.10 // indirect 98 | gopkg.in/yaml.v3 v3.0.1 // indirect 99 | ) 100 | -------------------------------------------------------------------------------- /connector.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "fmt" 7 | "net/http" 8 | "strings" 9 | "sync" 10 | "time" 11 | 12 | "github.com/grafana/grafana-plugin-sdk-go/backend" 13 | ) 14 | 15 | type Connector struct { 16 | UID string 17 | connections sync.Map 18 | driver Driver 19 | driverSettings DriverSettings 20 | // Enabling multiple connections may cause that concurrent connection limits 21 | // are hit. The datasource enabling this should make sure connections are cached 22 | // if necessary. 23 | enableMultipleConnections bool 24 | } 25 | 26 | func NewConnector(ctx context.Context, driver Driver, settings backend.DataSourceInstanceSettings, enableMultipleConnections bool) (*Connector, error) { 27 | ds := driver.Settings(ctx, settings) 28 | db, err := driver.Connect(ctx, settings, nil) 29 | if err != nil { 30 | return nil, backend.DownstreamError(err) 31 | } 32 | 33 | conn := &Connector{ 34 | UID: settings.UID, 35 | driver: driver, 36 | driverSettings: ds, 37 | enableMultipleConnections: enableMultipleConnections, 38 | } 39 | key := defaultKey(settings.UID) 40 | conn.storeDBConnection(key, dbConnection{db, settings}) 41 | return conn, nil 42 | } 43 | 44 | func (c *Connector) Connect(ctx context.Context, headers http.Header) (*dbConnection, error) { 45 | key := defaultKey(c.UID) 46 | dbConn, ok := c.getDBConnection(key) 47 | if !ok { 48 | return nil, ErrorMissingDBConnection 49 | } 50 | 51 | if c.driverSettings.Retries == 0 { 52 | err := c.connect(ctx, dbConn) 53 | return nil, err 54 | } 55 | 56 | err := c.connectWithRetries(ctx, dbConn, key, headers) 57 | return &dbConn, err 58 | } 59 | 60 | func (c *Connector) connectWithRetries(ctx context.Context, conn dbConnection, key string, headers http.Header) error { 61 | q := &Query{} 62 | if c.driverSettings.ForwardHeaders { 63 | applyHeaders(q, headers) 64 | } 65 | 66 | var db *sql.DB 67 | var err error 68 | for i := 0; i < c.driverSettings.Retries; i++ { 69 | db, err = c.Reconnect(ctx, conn, q, key) 70 | if err != nil { 71 | return err 72 | } 73 | conn := dbConnection{ 74 | db: db, 75 | settings: conn.settings, 76 | } 77 | err = c.connect(ctx, conn) 78 | if err == nil { 79 | break 80 | } 81 | 82 | if !shouldRetry(c.driverSettings.RetryOn, err.Error()) { 83 | break 84 | } 85 | 86 | if i+1 == c.driverSettings.Retries { 87 | break 88 | } 89 | 90 | if c.driverSettings.Pause > 0 { 91 | time.Sleep(time.Duration(c.driverSettings.Pause * int(time.Second))) 92 | } 93 | backend.Logger.Warn(fmt.Sprintf("connect failed: %s. Retrying %d times", err.Error(), i+1)) 94 | } 95 | 96 | return err 97 | } 98 | 99 | func (c *Connector) connect(ctx context.Context, conn dbConnection) error { 100 | if err := c.ping(ctx, conn); err != nil { 101 | return backend.DownstreamError(err) 102 | } 103 | 104 | return nil 105 | } 106 | 107 | func (c *Connector) ping(ctx context.Context, conn dbConnection) error { 108 | if c.driverSettings.Timeout == 0 { 109 | return conn.db.PingContext(ctx) 110 | } 111 | 112 | ctx, cancel := context.WithTimeout(ctx, c.driverSettings.Timeout) 113 | defer cancel() 114 | 115 | return conn.db.PingContext(ctx) 116 | } 117 | 118 | func (c *Connector) Reconnect(ctx context.Context, dbConn dbConnection, q *Query, cacheKey string) (*sql.DB, error) { 119 | if err := dbConn.db.Close(); err != nil { 120 | backend.Logger.Warn(fmt.Sprintf("closing existing connection failed: %s", err.Error())) 121 | } 122 | 123 | db, err := c.driver.Connect(ctx, dbConn.settings, q.ConnectionArgs) 124 | if err != nil { 125 | return nil, backend.DownstreamError(err) 126 | } 127 | c.storeDBConnection(cacheKey, dbConnection{db, dbConn.settings}) 128 | return db, nil 129 | } 130 | 131 | func (ds *Connector) getDBConnection(key string) (dbConnection, bool) { 132 | conn, ok := ds.connections.Load(key) 133 | if !ok { 134 | return dbConnection{}, false 135 | } 136 | return conn.(dbConnection), true 137 | } 138 | 139 | func (ds *Connector) storeDBConnection(key string, dbConn dbConnection) { 140 | ds.connections.Store(key, dbConn) 141 | } 142 | 143 | // Dispose is called when an existing SQLDatasource needs to be replaced 144 | func (c *Connector) Dispose() { 145 | c.connections.Range(func(_, conn interface{}) bool { 146 | _ = conn.(dbConnection).db.Close() 147 | return true 148 | }) 149 | c.connections.Clear() 150 | } 151 | 152 | func (c *Connector) GetConnectionFromQuery(ctx context.Context, q *Query) (string, dbConnection, error) { 153 | if !c.enableMultipleConnections && !c.driverSettings.ForwardHeaders && len(q.ConnectionArgs) > 0 { 154 | return "", dbConnection{}, MissingMultipleConnectionsConfig 155 | } 156 | // The database connection may vary depending on query arguments 157 | // The raw arguments are used as key to store the db connection in memory so they can be reused 158 | key := defaultKey(c.UID) 159 | dbConn, ok := c.getDBConnection(key) 160 | if !ok { 161 | return "", dbConnection{}, MissingDBConnection 162 | } 163 | if !c.enableMultipleConnections || len(q.ConnectionArgs) == 0 { 164 | backend.Logger.Debug("using single user connection") 165 | return key, dbConn, nil 166 | } 167 | 168 | key = keyWithConnectionArgs(c.UID, q.ConnectionArgs) 169 | if cachedConn, ok := c.getDBConnection(key); ok { 170 | backend.Logger.Debug("cached connection") 171 | return key, cachedConn, nil 172 | } 173 | 174 | db, err := c.driver.Connect(ctx, dbConn.settings, q.ConnectionArgs) 175 | if err != nil { 176 | backend.Logger.Debug("connect error " + err.Error()) 177 | return "", dbConnection{}, backend.DownstreamError(err) 178 | } 179 | backend.Logger.Debug("new connection(multiple) created") 180 | // Assign this connection in the cache 181 | dbConn = dbConnection{db, dbConn.settings} 182 | c.storeDBConnection(key, dbConn) 183 | 184 | return key, dbConn, nil 185 | } 186 | 187 | func shouldRetry(retryOn []string, err string) bool { 188 | for _, r := range retryOn { 189 | if strings.Contains(err, r) { 190 | return true 191 | } 192 | } 193 | return false 194 | } 195 | -------------------------------------------------------------------------------- /test/driver.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "database/sql/driver" 7 | "encoding/json" 8 | "fmt" 9 | "io" 10 | "reflect" 11 | "time" 12 | 13 | "github.com/grafana/grafana-plugin-sdk-go/backend" 14 | "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" 15 | "github.com/grafana/sqlds/v5" 16 | "github.com/grafana/sqlds/v5/mock" 17 | ) 18 | 19 | var registered = map[string]*SqlHandler{} 20 | 21 | // NewDriver creates and registers a new test datasource driver 22 | func NewDriver(name string, dbdata Data, converters []sqlutil.Converter, opts DriverOpts, macros sqlds.Macros) (TestDS, *SqlHandler) { 23 | if registered[name] == nil { 24 | handler := NewDriverHandler(dbdata, opts) 25 | registered[name] = &handler 26 | mock.RegisterDriver(name, &handler) 27 | } 28 | 29 | return NewTestDS( 30 | func(msg json.RawMessage) (*sql.DB, error) { 31 | if opts.OnConnect != nil { 32 | opts.OnConnect(msg) 33 | } 34 | return sql.Open(name, "") 35 | }, 36 | converters, 37 | macros, 38 | ), registered[name] 39 | } 40 | 41 | // NewTestDS creates a new test datasource driver 42 | func NewTestDS(openDBfn func(msg json.RawMessage) (*sql.DB, error), converters []sqlutil.Converter, macros sqlds.Macros) TestDS { 43 | return TestDS{ 44 | openDBfn: openDBfn, 45 | converters: converters, 46 | macros: macros, 47 | } 48 | } 49 | 50 | // NewDriverHandler creates a new driver handler 51 | func NewDriverHandler(data Data, opts DriverOpts) SqlHandler { 52 | return SqlHandler{ 53 | Data: data, 54 | Opts: opts, 55 | } 56 | } 57 | 58 | // SqlHandler handles driver functions 59 | type SqlHandler struct { 60 | mock.DBHandler 61 | Data Data 62 | Opts DriverOpts 63 | State State 64 | row int 65 | } 66 | 67 | // Ping represents a database ping 68 | func (s *SqlHandler) Ping(ctx context.Context) error { 69 | s.State.ConnectAttempts += 1 70 | if s.Opts.ConnectDelay > 0 { 71 | time.Sleep(time.Duration(s.Opts.ConnectDelay * int(time.Second))) // simulate a connection delay 72 | } 73 | if s.Opts.ConnectError != nil && (s.Opts.ConnectFailTimes == 0 || s.State.ConnectAttempts <= s.Opts.ConnectFailTimes) { 74 | return s.Opts.ConnectError 75 | } 76 | return nil 77 | } 78 | 79 | // Query represents a database query 80 | func (s *SqlHandler) Query(args []driver.Value) (driver.Rows, error) { 81 | s.State.QueryAttempts += 1 82 | if s.Opts.QueryDelay > 0 { 83 | time.Sleep(time.Duration(s.Opts.QueryDelay * int(time.Second))) // simulate a query delay 84 | } 85 | s.row = 0 86 | // only show the error if we have not exceeded the fail times and the error is not nil 87 | if s.Opts.QueryError != nil && (s.Opts.QueryFailTimes == 0 || s.State.QueryAttempts <= s.Opts.QueryFailTimes) { 88 | return s, s.Opts.QueryError 89 | } 90 | 91 | return s, nil 92 | } 93 | 94 | // Columns represents columns from a query 95 | func (s *SqlHandler) Columns() []string { 96 | var cols []string 97 | for _, c := range s.Data.Cols { 98 | cols = append(cols, c.Name) 99 | } 100 | return cols 101 | } 102 | 103 | // Next iterates over rows 104 | func (s *SqlHandler) Next(dest []driver.Value) error { 105 | if s.row+1 > len(s.Data.Rows) { 106 | return io.EOF 107 | } 108 | 109 | row := s.Data.Rows[s.row] 110 | s.row++ 111 | 112 | for i, col := range row { 113 | dest[i] = col 114 | } 115 | return nil 116 | } 117 | 118 | // Close implements the database Close interface 119 | func (s SqlHandler) Close() error { 120 | return nil 121 | } 122 | 123 | // ColumnTypeScanType returns the scan type for the column 124 | func (s SqlHandler) ColumnTypeScanType(index int) reflect.Type { 125 | kind := s.Data.Cols[index].Kind 126 | return reflect.TypeOf(kind) 127 | } 128 | 129 | // ColumnTypeDatabaseTypeName returns the database type for the column 130 | func (s SqlHandler) ColumnTypeDatabaseTypeName(index int) string { 131 | return s.Data.Cols[index].DataType 132 | } 133 | 134 | // Data - the columns/rows 135 | type Data struct { 136 | Cols []Column 137 | Rows [][]any 138 | } 139 | 140 | // Column - the column meta 141 | type Column struct { 142 | Name string 143 | Kind any 144 | DataType string 145 | } 146 | 147 | // TestDS ... 148 | type TestDS struct { 149 | openDBfn func(msg json.RawMessage) (*sql.DB, error) 150 | converters []sqlutil.Converter 151 | macros sqlds.Macros 152 | sqlds.Driver 153 | } 154 | 155 | // Open - opens the test database 156 | func (s TestDS) Open() (*sql.DB, error) { 157 | return s.openDBfn(nil) 158 | } 159 | 160 | // Connect - connects to the test database 161 | func (s TestDS) Connect(ctx context.Context, cfg backend.DataSourceInstanceSettings, msg json.RawMessage) (*sql.DB, error) { 162 | return s.openDBfn(msg) 163 | } 164 | 165 | // Settings - Settings to the test database 166 | func (s TestDS) Settings(ctx context.Context, config backend.DataSourceInstanceSettings) sqlds.DriverSettings { 167 | settings, err := LoadSettings(ctx, config) 168 | if err != nil { 169 | fmt.Println("error loading settings") 170 | return sqlds.DriverSettings{} 171 | } 172 | return settings 173 | } 174 | 175 | // Macros - Macros for the test database 176 | func (s TestDS) Macros() sqlds.Macros { 177 | return s.macros 178 | } 179 | 180 | // Converters - Converters for the test database 181 | func (s TestDS) Converters() []sqlutil.Converter { 182 | return nil 183 | } 184 | 185 | // DriverOpts the optional settings 186 | type DriverOpts struct { 187 | ConnectDelay int 188 | ConnectError error 189 | ConnectFailTimes int 190 | OnConnect func(msg []byte) 191 | QueryDelay int 192 | QueryError error 193 | QueryFailTimes int 194 | } 195 | 196 | // State is the state of the connections/queries 197 | type State struct { 198 | QueryAttempts int 199 | ConnectAttempts int 200 | } 201 | 202 | // LoadSettings will read and validate Settings from the DataSourceConfig 203 | func LoadSettings(ctx context.Context, config backend.DataSourceInstanceSettings) (settings sqlds.DriverSettings, err error) { 204 | if err := json.Unmarshal(config.JSONData, &settings); err != nil { 205 | return settings, fmt.Errorf("%s: %s", err.Error(), "Invalid Settings") 206 | } 207 | return settings, nil 208 | } 209 | -------------------------------------------------------------------------------- /completion_test.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "io" 10 | "net/http" 11 | "net/http/httptest" 12 | "testing" 13 | 14 | "github.com/google/go-cmp/cmp" 15 | ) 16 | 17 | func Test_handleError(t *testing.T) { 18 | t.Run("it should write an error code and a message", func(t *testing.T) { 19 | w := httptest.NewRecorder() 20 | handleError(w, fmt.Errorf("test!")) 21 | 22 | resp := w.Result() 23 | body, _ := io.ReadAll(resp.Body) 24 | 25 | if resp.StatusCode != http.StatusBadRequest { 26 | t.Errorf("expecting code %v got %v", http.StatusBadRequest, resp.StatusCode) 27 | } 28 | if string(body) != "test!" { 29 | t.Errorf("expecting response test! got %v", string(body)) 30 | } 31 | }) 32 | } 33 | 34 | func Test_sendResourceResponse(t *testing.T) { 35 | t.Run("it should send a JSON response", func(t *testing.T) { 36 | w := httptest.NewRecorder() 37 | sendResourceResponse(w, []string{"foo", "bar"}) 38 | 39 | resp := w.Result() 40 | body, _ := io.ReadAll(resp.Body) 41 | 42 | if resp.StatusCode != http.StatusOK { 43 | t.Errorf("expecting code %v got %v", http.StatusBadRequest, http.StatusOK) 44 | } 45 | expectedResult := `["foo","bar"]` + "\n" 46 | if string(body) != expectedResult { 47 | t.Errorf("expecting response %v got %v", expectedResult, string(body)) 48 | } 49 | if resp.Header.Get("Content-Type") != "application/json" { 50 | t.Errorf("expecting content-type application/json got %v", resp.Header.Get("Content-Type")) 51 | } 52 | }) 53 | } 54 | 55 | type fakeCompletable struct { 56 | schemas map[string][]string 57 | tables map[string][]string 58 | columns map[string][]string 59 | err error 60 | } 61 | 62 | func (f *fakeCompletable) Schemas(ctx context.Context, options Options) ([]string, error) { 63 | return f.schemas[options["database"]], f.err 64 | } 65 | 66 | func (f *fakeCompletable) Tables(ctx context.Context, options Options) ([]string, error) { 67 | return f.tables[options["schema"]], f.err 68 | } 69 | 70 | func (f *fakeCompletable) Columns(ctx context.Context, options Options) ([]string, error) { 71 | return f.columns[options["table"]], f.err 72 | } 73 | 74 | func TestCompletable(t *testing.T) { 75 | tests := []struct { 76 | description string 77 | method string 78 | fakeImpl *fakeCompletable 79 | reqBody string 80 | expectedRes string 81 | }{ 82 | { 83 | "it should return schemas", 84 | schemas, 85 | &fakeCompletable{schemas: map[string][]string{"foobar": {"foo", "bar"}}}, 86 | `{"database":"foobar"}`, 87 | `["foo","bar"]` + "\n", 88 | }, 89 | { 90 | "it should return tables of a schema", 91 | tables, 92 | &fakeCompletable{tables: map[string][]string{"foobar": {"foo", "bar"}}}, 93 | `{"schema":"foobar"}`, 94 | `["foo","bar"]` + "\n", 95 | }, 96 | { 97 | "it should return columns of a table", 98 | columns, 99 | &fakeCompletable{columns: map[string][]string{"foobar": {"foo", "bar"}}}, 100 | `{"table":"foobar"}`, 101 | `["foo","bar"]` + "\n", 102 | }, 103 | } 104 | for _, test := range tests { 105 | t.Run(test.description, func(t *testing.T) { 106 | w := httptest.NewRecorder() 107 | 108 | sqlds := &SQLDatasource{} 109 | sqlds.Completable = test.fakeImpl 110 | 111 | b := io.NopCloser(bytes.NewReader([]byte(test.reqBody))) 112 | sqlds.getResources(test.method)(w, &http.Request{Body: b}) 113 | resp := w.Result() 114 | body, _ := io.ReadAll(resp.Body) 115 | 116 | if resp.StatusCode != http.StatusOK { 117 | t.Errorf("expecting code %v got %v", http.StatusOK, resp.StatusCode) 118 | } 119 | if string(body) != test.expectedRes { 120 | t.Errorf("expecting response %v got %v", test.expectedRes, string(body)) 121 | } 122 | if resp.Header.Get("Content-Type") != "application/json" { 123 | t.Errorf("expecting content-type application/json got %v", resp.Header.Get("Content-Type")) 124 | } 125 | }) 126 | } 127 | } 128 | 129 | func Test_registerRoutes(t *testing.T) { 130 | t.Run("it should add a new route", func(t *testing.T) { 131 | sqlds := &SQLDatasource{} 132 | sqlds.CustomRoutes = map[string]func(http.ResponseWriter, *http.Request){ 133 | "/foo": func(w http.ResponseWriter, r *http.Request) { 134 | _, err := w.Write([]byte("bar")) 135 | if err != nil { 136 | t.Fatal((err)) 137 | } 138 | }, 139 | } 140 | 141 | mux := http.NewServeMux() 142 | err := sqlds.registerRoutes(mux) 143 | if err != nil { 144 | t.Fatalf("unexpected error %v", err) 145 | } 146 | resp := httptest.NewRecorder() 147 | req, err := http.NewRequest("GET", "/foo", nil) 148 | if err != nil { 149 | t.Fatalf("unexpected error %v", err) 150 | } 151 | mux.ServeHTTP(resp, req) 152 | 153 | respByte, err := io.ReadAll(resp.Body) 154 | if err != nil { 155 | t.Fatalf("unexpected error %v", err) 156 | } 157 | if string(respByte) != "bar" { 158 | t.Errorf("unexpected response %s", string(respByte)) 159 | } 160 | }) 161 | 162 | t.Run("it error if tried to add an existing route", func(t *testing.T) { 163 | sqlds := &SQLDatasource{} 164 | sqlds.CustomRoutes = map[string]func(http.ResponseWriter, *http.Request){ 165 | "/tables": func(w http.ResponseWriter, r *http.Request) {}, 166 | } 167 | 168 | mux := http.NewServeMux() 169 | err := sqlds.registerRoutes(mux) 170 | if err == nil || err.Error() != "unable to redefine /tables, use the Completable interface instead" { 171 | t.Errorf("unexpected error %v", err) 172 | } 173 | }) 174 | } 175 | 176 | func TestParseOptions(t *testing.T) { 177 | tests := []struct { 178 | err error 179 | result Options 180 | description string 181 | input json.RawMessage 182 | }{ 183 | { 184 | description: "parses input", 185 | input: json.RawMessage(`{"foo":"bar"}`), 186 | result: Options{"foo": "bar"}, 187 | }, 188 | { 189 | description: "returns an error", 190 | input: json.RawMessage(`not a json`), 191 | err: ErrorWrongOptions, 192 | }, 193 | } 194 | for _, tc := range tests { 195 | t.Run(tc.description, func(t *testing.T) { 196 | res, err := ParseOptions(tc.input) 197 | if (err != nil || tc.err != nil) && !errors.Is(err, tc.err) { 198 | t.Errorf("unexpected error %v", err) 199 | } 200 | if !cmp.Equal(res, tc.result) { 201 | t.Errorf("unexpected result: %v", cmp.Diff(res, tc.result)) 202 | } 203 | }) 204 | } 205 | } 206 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [5.0.2] 9 | 10 | ### Changed 11 | - Hash connectionArgs when used as cache key (#199) 12 | 13 | ## [5.0.1] 14 | 15 | ### Dependencies 16 | - Bumped github.com/grafana/grafana-plugin-sdk-go (#192) 17 | 18 | ## [5.0.0] 19 | 20 | ### Added 21 | - Added QueryErrorMutator (#190) 22 | 23 | ### Changed 24 | - **Breaking change** IsPGXConnectionError has been removed. Removed PGX v5/PostgreSQL-specific error detection (#190) 25 | 26 | ## [4.2.7] 27 | 28 | ### Changed 29 | - Fix error source in QueryData (#180) 30 | 31 | ## [4.2.6] 32 | 33 | ### Changed 34 | - Pass down Context to Ping method (#179) 35 | 36 | ## [4.2.5] 37 | 38 | ### Changed 39 | - Improved error handling (#177) 40 | 41 | ## [4.2.4] 42 | 43 | ### Changed 44 | - Improved panic error recovery (#176) 45 | 46 | ### Dependencies 47 | - Bumped github.com/grafana/grafana-plugin-sdk-go (#175) 48 | - Bumped github.com/go-sql-driver/mysql in the all-go-dependencies group (#174) 49 | 50 | ## [4.2.3] 51 | 52 | ### Added 53 | - Added panic recovery mechanism (#173) 54 | 55 | ### Dependencies 56 | - Bumped github.com/grafana/grafana-plugin-sdk-go (#171) 57 | - Bumped actions/setup-go in the all-github-action-dependencies group (#172) 58 | 59 | ### Infrastructure 60 | - Updated workflows (#170) 61 | 62 | ## [4.2.2] 63 | 64 | ### Dependencies 65 | - Bumped the all-go-dependencies group with 2 updates (#167) 66 | - Bumped golang.org/x/net from 0.36.0 to 0.38.0 in the go_modules group (#168) 67 | - Bumped github.com/go-sql-driver/mysql in the all-go-dependencies group (#166) 68 | - Bumped the all-go-dependencies group across 1 directory with 2 updates (#164) 69 | 70 | ## [4.2.1] 71 | 72 | ### Added 73 | - Made row limit configurable (#165) 74 | 75 | ## [4.2.0] 76 | 77 | ### Added 78 | - Enabled dataproxy.row_limit configuration option from Grafana (#162) 79 | 80 | ### Dependencies 81 | - Bumped the all-go-dependencies group with 2 updates (#160) 82 | - Bumped golang.org/x/net from 0.35.0 to 0.36.0 in the go_modules group (#161) 83 | - Bumped the all-go-dependencies group across 1 directory with 4 updates (#159) 84 | - Bumped github.com/grafana/grafana-plugin-sdk-go (#157) 85 | - Bumped github.com/grafana/grafana-plugin-sdk-go from 0.263.0 to 0.265.0 (#155) 86 | - Bumped grafana-plugin-sdk-go and removed experimental error source (#153) 87 | 88 | ## [4.1.7] 89 | 90 | ### Fixed 91 | - Fixed: return error if not nil when connecting (#151) 92 | 93 | ### Dependencies 94 | - Bumped x/net to v0.33.0 (#150) 95 | - Bumped github.com/grafana/grafana-plugin-sdk-go (#149) 96 | 97 | ## [4.1.6] 98 | 99 | ### Dependencies 100 | - Bumped golang.org/x/crypto from 0.29.0 to 0.31.0 in the go_modules group (#148) 101 | - Bumped github.com/grafana/grafana-plugin-sdk-go (#146) 102 | - Bumped the all-go-dependencies group with 2 updates (#145) 103 | - Bumped github.com/grafana/grafana-plugin-sdk-go (#144) 104 | 105 | ## [4.1.5] 106 | 107 | ### Added 108 | - Added pre and post health check methods (#147) 109 | 110 | ### Dependencies 111 | - Bumped golang.org/x/crypto from 0.29.0 to 0.31.0 in the go_modules group (#148) 112 | - Bumped github.com/grafana/grafana-plugin-sdk-go (#146) 113 | - Bumped the all-go-dependencies group with 2 updates (#145) 114 | - Bumped github.com/grafana/grafana-plugin-sdk-go (#144) 115 | 116 | ## [4.1.4] 117 | 118 | ### Fixed 119 | - Fixed error source coming from getFrames function (#142) 120 | - Fixed mistyped macros being incorrectly reported as plugin error (#141) 121 | 122 | ### Infrastructure 123 | - Migrated from Drone to GitHub Actions (#143) 124 | 125 | ## [4.1.3] 126 | 127 | ### Fixed 128 | - Fixed: mark incorrect macros arguments error as downstream (#140) 129 | - Fixed: implement InstanceDisposer to close db connections (#136) 130 | 131 | ### Dependencies 132 | - Updated dependencies and added Dependabot configuration (#138) 133 | 134 | ### Infrastructure 135 | - Bumped actions/checkout in the all-github-action-dependencies group (#139) 136 | 137 | ## [4.1.2] 138 | 139 | ### Security 140 | - Updated dependencies for CVE fixes (#134) 141 | 142 | ## [4.1.1] 143 | 144 | ### Changed 145 | - Updated Go version and SDK version (#130) 146 | 147 | ### Dependencies 148 | - Bumped google.golang.org/grpc from 1.64.0 to 1.64.1 (#129) 149 | 150 | ## [4.1.0] 151 | 152 | ### Added 153 | - Added support for context modification (#127) 154 | 155 | ## [4.0.0] 156 | 157 | ### Changed 158 | - **BREAKING**: Major version release v4.0.0 (#126) 159 | - Updated grafana-plugin-sdk-go to v0.233.0 (#125) 160 | 161 | ### Improved 162 | - Enhanced test coverage with more no-error checks (#123) 163 | 164 | ## [3.4.2] 165 | 166 | ### Fixed 167 | - Adjusted handling of zero-rows results (#121) 168 | 169 | ## [3.4.1] 170 | 171 | ### Added 172 | - Added SLO support: capture query and health duration with error source labeling (#122) 173 | 174 | ## [3.4.0] 175 | 176 | ### Added 177 | - Unit tests for zero-rows-returned situations (#119) 178 | - Capture duration and label with error source (#116) 179 | - Return executed query string when error occurs (#117) 180 | 181 | ### Fixed 182 | - Fixed Next() method in tests (#120) 183 | 184 | ### Documentation 185 | - Added release instructions (#115) 186 | 187 | ## [3.3.0] 188 | 189 | ### Dependencies 190 | - Updated grafana-plugin-sdk-go to v0.231.0 (#114) 191 | 192 | ## [3.2.0] 193 | 194 | ### Added 195 | - Added multi timeseries return format (#106) 196 | 197 | ### Changed 198 | - Used functions migrated to sqlutil (#107) 199 | 200 | ### Infrastructure 201 | - Added CODEOWNERS file (#104) 202 | 203 | ## [3.1.0] 204 | 205 | ### Added 206 | - Added error source support (#103) 207 | 208 | ## [3.0.0] 209 | 210 | ### Changed 211 | - **BREAKING**: Major version release v3.0.0 (#102) 212 | - Updated Go plugin SDK to 0.184.0 (#100) 213 | - Enhanced error source functionality (#99) 214 | 215 | ### Documentation 216 | - Updated installation instructions (#101) 217 | 218 | ## [2.7.2] 219 | 220 | ### Fixed 221 | - Fixed functions passed into macros with multiple arguments to parse correctly (#98) 222 | 223 | ### Added 224 | - Added capability for implementing datasources to set query args (#95) 225 | 226 | ## [2.7.1] 227 | 228 | ### Added 229 | - Added QueryFailTimes check to the test driver (#94) 230 | 231 | ## [2.7.0] 232 | 233 | ### Added 234 | - Added test driver functionality (#93) 235 | 236 | ## [2.6.0] 237 | 238 | ### Added 239 | - Added query errors handling (#92) 240 | 241 | ## [2.5.1] 242 | 243 | ### Added 244 | - Added header forwarding capability (#90) 245 | 246 | ## [2.5.0] 247 | 248 | ### Added 249 | - Added support for mutateResponse functionality (#89) 250 | 251 | ## [2.4.1] 252 | 253 | ### Fixed 254 | - Fixed map write panic (#88) 255 | 256 | ## [2.4.0] 257 | 258 | ### Added 259 | - Added QueryMutator interface for driver (#87) 260 | 261 | ### Infrastructure 262 | - Used organization ISSUE_COMMANDS_TOKEN with reduced scope (#86) 263 | 264 | ## [2.3.21] 265 | 266 | ### Added 267 | - Added format option for trace (#81) 268 | 269 | ### Fixed 270 | - Fixed macro parsing issues: 271 | - Close macro match on space if no arguments (#83) 272 | - Fixed parsing macros in more complex queries (#78) 273 | - Fixed connection leak on query retry reconnect (#79) 274 | 275 | ### Improved 276 | - Added retry on message functionality (#80) 277 | 278 | ## [2.3.0] 279 | 280 | ### Added 281 | - Updated Completable interface with custom options (#47) 282 | 283 | ## [2.2.0] 284 | 285 | ### Added 286 | - Added default macros support (#45) 287 | 288 | ### Fixed 289 | - Fixed panic issues (#40) 290 | - Fixed integration test backoff/limit 291 | 292 | ## [2.1.0] 293 | 294 | ### Added 295 | - Added support for logs format (#38) 296 | 297 | ## [2.0.3] 298 | 299 | ### Fixed 300 | - Fixed error frame return during macro parsing errors (#37) 301 | 302 | ## [2.0.2] 303 | 304 | ### Added 305 | - Added integration tests 306 | - Added tests for query timeout 307 | - Added backoff mechanism with ticker 308 | 309 | ## [2.0.1] 310 | 311 | ### Changed 312 | - Updated import path to v2 313 | 314 | ## [2.0.0] 315 | 316 | ### Changed 317 | - **BREAKING**: Major version release v2.0.0 (#33) 318 | - Updated import path to v2 (#33) 319 | - Used QueryContext for context cancellation handling 320 | - Replaced github.com/pkg/errors with stdlib 321 | 322 | ### Added 323 | - Added query args to modify current DB (#30) 324 | - Added timeout functionality with no results fallback 325 | - Added support for any route (#29) 326 | 327 | ## [1.3.0] 328 | 329 | ### Added 330 | - Added ability to modify query FillMode (#27) 331 | 332 | ### Infrastructure 333 | - Signed Drone's configuration YAML file for repository protection 334 | 335 | ## [1.2.0] 336 | 337 | ### Added 338 | - Added schema evaluation when requesting tables (#24) 339 | - Added macros with no arguments support (#23) 340 | - Added possible macros to Query struct (#22) 341 | - Added ability to return tables, schemas and columns as resources (#21) 342 | - Used refId as frame name (#18) 343 | 344 | ## [1.0.0] 345 | 346 | ### Added 347 | - Initial stable release of sqlds 348 | - Core SQL datasource functionality 349 | - Basic macro support 350 | - Connection management 351 | 352 | ## About 353 | 354 | This changelog documents changes for the `sqlds` package, which provides a common foundation for SQL-driven datasources in Grafana. The package centralizes common SQL datasource logic to reduce code duplication across datasources like Postgres, MySQL, and MSSQL. 355 | 356 | ### Categories 357 | 358 | - **Added**: New features 359 | - **Changed**: Changes in existing functionality 360 | - **Deprecated**: Soon-to-be removed features 361 | - **Removed**: Removed features 362 | - **Fixed**: Bug fixes 363 | - **Security**: Vulnerability fixes 364 | - **Dependencies**: Dependency updates 365 | - **Infrastructure**: CI/CD and tooling changes 366 | -------------------------------------------------------------------------------- /datasource_rowlimit_test.go: -------------------------------------------------------------------------------- 1 | package sqlds_test 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "testing" 7 | 8 | "github.com/grafana/grafana-plugin-sdk-go/backend" 9 | "github.com/grafana/sqlds/v5" 10 | "github.com/grafana/sqlds/v5/test" 11 | "github.com/stretchr/testify/assert" 12 | "github.com/stretchr/testify/require" 13 | ) 14 | 15 | type mockDriver struct { 16 | sqlds.SQLMock 17 | rowLimit int64 18 | } 19 | 20 | func (d *mockDriver) Settings(ctx context.Context, settings backend.DataSourceInstanceSettings) sqlds.DriverSettings { 21 | ds := d.SQLMock.Settings(ctx, settings) 22 | ds.RowLimit = d.rowLimit 23 | return ds 24 | } 25 | 26 | func getMockGrafanaCfg(rowLimit string) *backend.GrafanaCfg { 27 | // needs all these properties to be set to avoid errors 28 | return backend.NewGrafanaCfg(map[string]string{ 29 | "GF_SQL_ROW_LIMIT": rowLimit, 30 | "GF_SQL_MAX_OPEN_CONNS_DEFAULT": "10", 31 | "GF_SQL_MAX_IDLE_CONNS_DEFAULT": "5", 32 | "GF_SQL_MAX_CONN_LIFETIME_SECONDS_DEFAULT": "3600", 33 | }) 34 | } 35 | func TestRowLimitFromConfig(t *testing.T) { 36 | // Create a mock config using the proper API 37 | mockConfig := getMockGrafanaCfg("200") 38 | 39 | // Create context with config 40 | ctx := backend.WithGrafanaConfig(context.Background(), mockConfig) 41 | 42 | // Create datasource with row limit enabled 43 | driver := &mockDriver{} 44 | ds := sqlds.NewDatasource(driver) 45 | ds.EnableRowLimit = true 46 | 47 | // Create settings and initialize datasource 48 | settings := backend.DataSourceInstanceSettings{UID: "rowlimit-config", JSONData: []byte("{}")} 49 | instance, err := ds.NewDatasource(ctx, settings) 50 | require.NoError(t, err) 51 | 52 | // Verify row limit was set correctly from config 53 | sqlDS, ok := instance.(*sqlds.SQLDatasource) 54 | require.True(t, ok) 55 | assert.Equal(t, int64(200), sqlDS.GetRowLimit()) 56 | } 57 | 58 | func TestRowLimitFromDriverSettings(t *testing.T) { 59 | // Create datasource with driver that has row limit 60 | driver := &mockDriver{rowLimit: 300} 61 | ds := sqlds.NewDatasource(driver) 62 | ds.EnableRowLimit = true 63 | 64 | // Create settings and initialize datasource 65 | settings := backend.DataSourceInstanceSettings{UID: "rowlimit-driver", JSONData: []byte("{}")} 66 | instance, err := ds.NewDatasource(context.Background(), settings) 67 | require.NoError(t, err) 68 | 69 | // Verify driver settings row limit was used 70 | sqlDS, ok := instance.(*sqlds.SQLDatasource) 71 | require.True(t, ok) 72 | assert.Equal(t, int64(300), sqlDS.GetRowLimit()) 73 | } 74 | 75 | func TestRowLimitPrecedence(t *testing.T) { 76 | // Create a mock config using the proper API 77 | mockConfig := getMockGrafanaCfg("200") 78 | 79 | // Create context with config 80 | ctx := backend.WithGrafanaConfig(context.Background(), mockConfig) 81 | 82 | // Create datasource with driver that has row limit 83 | driver := &mockDriver{rowLimit: 300} 84 | ds := sqlds.NewDatasource(driver) 85 | ds.EnableRowLimit = true 86 | 87 | // Create settings and initialize datasource 88 | settings := backend.DataSourceInstanceSettings{UID: "rowlimit-precedence", JSONData: []byte("{}")} 89 | instance, err := ds.NewDatasource(ctx, settings) 90 | require.NoError(t, err) 91 | 92 | // Verify driver settings take precedence over config 93 | sqlDS, ok := instance.(*sqlds.SQLDatasource) 94 | require.True(t, ok) 95 | assert.Equal(t, int64(300), sqlDS.GetRowLimit()) 96 | } 97 | 98 | func TestRowLimitDisabled(t *testing.T) { 99 | // Create a mock config using the proper API 100 | mockConfig := getMockGrafanaCfg("200") 101 | // Create context with config 102 | ctx := backend.WithGrafanaConfig(context.Background(), mockConfig) 103 | 104 | // Create datasource with row limit disabled 105 | driver := &mockDriver{} 106 | ds := sqlds.NewDatasource(driver) 107 | ds.EnableRowLimit = false 108 | 109 | // Create settings and initialize datasource 110 | settings := backend.DataSourceInstanceSettings{UID: "rowlimit-disabled", JSONData: []byte("{}")} 111 | instance, err := ds.NewDatasource(ctx, settings) 112 | require.NoError(t, err) 113 | 114 | // Verify default row limit is used when feature is disabled 115 | sqlDS, ok := instance.(*sqlds.SQLDatasource) 116 | require.True(t, ok) 117 | assert.Equal(t, int64(-1), sqlDS.GetRowLimit()) 118 | } 119 | 120 | func TestRowLimitDefault(t *testing.T) { 121 | // Create a mock config using the proper API 122 | mockConfig := backend.NewGrafanaCfg(map[string]string{}) 123 | 124 | // Create context with config 125 | ctx := backend.WithGrafanaConfig(context.Background(), mockConfig) 126 | 127 | // Create datasource with row limit disabled 128 | driver := &mockDriver{} 129 | ds := sqlds.NewDatasource(driver) 130 | 131 | // Create settings and initialize datasource 132 | settings := backend.DataSourceInstanceSettings{UID: "rowlimit-disabled", JSONData: []byte("{}")} 133 | instance, err := ds.NewDatasource(ctx, settings) 134 | require.NoError(t, err) 135 | 136 | // Verify default row limit is used when feature is disabled 137 | sqlDS, ok := instance.(*sqlds.SQLDatasource) 138 | require.True(t, ok) 139 | assert.Equal(t, int64(-1), sqlDS.GetRowLimit()) 140 | } 141 | 142 | func TestSetDefaultRowLimit(t *testing.T) { 143 | // Create datasource 144 | driver := &mockDriver{} 145 | ds := sqlds.NewDatasource(driver) 146 | 147 | // Initialize datasource 148 | settings := backend.DataSourceInstanceSettings{UID: "rowlimit-set", JSONData: []byte("{}")} 149 | instance, err := ds.NewDatasource(context.Background(), settings) 150 | require.NoError(t, err) 151 | 152 | // Cast to SQLDatasource 153 | sqlDS, ok := instance.(*sqlds.SQLDatasource) 154 | require.True(t, ok) 155 | 156 | // Set row limit 157 | sqlDS.SetDefaultRowLimit(500) 158 | 159 | // Verify row limit was set correctly 160 | assert.Equal(t, int64(500), sqlDS.GetRowLimit()) 161 | assert.True(t, sqlDS.EnableRowLimit) 162 | } 163 | 164 | func TestRowLimitPassedToQuery(t *testing.T) { 165 | // Set up test data 166 | testData := test.Data{ 167 | Cols: []test.Column{ 168 | {Name: "id", DataType: "INTEGER", Kind: int64(0)}, 169 | {Name: "name", DataType: "TEXT", Kind: ""}, 170 | }, 171 | Rows: [][]any{ 172 | {int64(1), "test1"}, 173 | {int64(2), "test2"}, 174 | {int64(3), "test3"}, 175 | }, 176 | } 177 | 178 | // Create datasource with row limit 179 | driver, _ := test.NewDriver("rowlimit-query", testData, nil, test.DriverOpts{}, nil) 180 | ds := sqlds.NewDatasource(driver) 181 | 182 | // Create settings and initialize datasource 183 | settings := backend.DataSourceInstanceSettings{UID: "rowlimit-query", JSONData: []byte("{}")} 184 | instance, err := ds.NewDatasource(context.Background(), settings) 185 | require.NoError(t, err) 186 | 187 | // Cast to SQLDatasource and set row limit 188 | sqlDS, ok := instance.(*sqlds.SQLDatasource) 189 | require.True(t, ok) 190 | sqlDS.SetDefaultRowLimit(2) 191 | 192 | // Create query request 193 | req := &backend.QueryDataRequest{ 194 | PluginContext: backend.PluginContext{ 195 | DataSourceInstanceSettings: &settings, 196 | }, 197 | Queries: []backend.DataQuery{ 198 | { 199 | RefID: "A", 200 | JSON: []byte(`{"rawSql": "SELECT * FROM test"}`), 201 | }, 202 | }, 203 | } 204 | 205 | // Execute query 206 | resp, err := sqlDS.QueryData(context.Background(), req) 207 | assert.NoError(t, err) 208 | 209 | // Verify response 210 | queryResp := resp.Responses["A"] 211 | assert.NoError(t, queryResp.Error) 212 | assert.NotNil(t, queryResp.Frames) 213 | assert.Len(t, queryResp.Frames, 1) 214 | 215 | // Verify row limit was applied (should only have 2 rows) 216 | frame := queryResp.Frames[0] 217 | rowCount, _ := frame.RowLen() 218 | assert.Equal(t, 2, rowCount) 219 | } 220 | 221 | func TestRowLimitFromEnvVar(t *testing.T) { 222 | // Save original env var value to restore later 223 | originalValue, originalExists := os.LookupEnv("GF_DATAPROXY_ROW_LIMIT") 224 | 225 | // Clean up after test 226 | defer func() { 227 | if originalExists { 228 | os.Setenv("GF_DATAPROXY_ROW_LIMIT", originalValue) 229 | } else { 230 | os.Unsetenv("GF_DATAPROXY_ROW_LIMIT") 231 | } 232 | }() 233 | 234 | tests := []struct { 235 | name string 236 | envValue string 237 | expectedLimit int64 238 | configValue string 239 | driverRowLimit int64 240 | }{ 241 | { 242 | name: "valid env var", 243 | envValue: "400", 244 | expectedLimit: 400, 245 | }, 246 | { 247 | name: "invalid env var", 248 | envValue: "not-a-number", 249 | expectedLimit: -1, 250 | }, 251 | { 252 | name: "negative env var", 253 | envValue: "-10", 254 | expectedLimit: -1, 255 | }, 256 | { 257 | name: "env var precedence over config", 258 | envValue: "400", 259 | configValue: "200", 260 | expectedLimit: 400, 261 | }, 262 | { 263 | name: "driver settings precedence over env var", 264 | envValue: "400", 265 | driverRowLimit: 300, 266 | expectedLimit: 300, 267 | }, 268 | } 269 | 270 | for _, tt := range tests { 271 | t.Run(tt.name, func(t *testing.T) { 272 | // Set env var for test 273 | os.Setenv("GF_DATAPROXY_ROW_LIMIT", tt.envValue) 274 | 275 | // Create context with config if needed 276 | ctx := context.Background() 277 | if tt.configValue != "" { 278 | mockConfig := getMockGrafanaCfg(tt.configValue) 279 | ctx = backend.WithGrafanaConfig(ctx, mockConfig) 280 | } 281 | 282 | // Create datasource with driver that may have row limit 283 | driver := &mockDriver{rowLimit: tt.driverRowLimit} 284 | ds := sqlds.NewDatasource(driver) 285 | ds.EnableRowLimit = true 286 | 287 | // Create settings and initialize datasource 288 | settings := backend.DataSourceInstanceSettings{UID: "rowlimit-env-" + tt.name, JSONData: []byte("{}")} 289 | instance, err := ds.NewDatasource(ctx, settings) 290 | require.NoError(t, err) 291 | 292 | // Verify row limit was set correctly 293 | sqlDS, ok := instance.(*sqlds.SQLDatasource) 294 | require.True(t, ok) 295 | assert.Equal(t, tt.expectedLimit, sqlDS.GetRowLimit()) 296 | }) 297 | } 298 | } 299 | -------------------------------------------------------------------------------- /query.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "encoding/json" 7 | "errors" 8 | "fmt" 9 | "net/http" 10 | "time" 11 | 12 | "runtime/debug" 13 | 14 | "github.com/grafana/dataplane/sdata/timeseries" 15 | "github.com/grafana/grafana-plugin-sdk-go/backend" 16 | "github.com/grafana/grafana-plugin-sdk-go/data" 17 | "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" 18 | ) 19 | 20 | // FormatQueryOption defines how the user has chosen to represent the data 21 | // Deprecated: use sqlutil.FormatQueryOption directly instead 22 | type FormatQueryOption = sqlutil.FormatQueryOption 23 | 24 | // Deprecated: use the values in sqlutil directly instead 25 | const ( 26 | // FormatOptionTimeSeries formats the query results as a timeseries using "LongToWide" 27 | FormatOptionTimeSeries = sqlutil.FormatOptionTimeSeries 28 | // FormatOptionTable formats the query results as a table using "LongToWide" 29 | FormatOptionTable = sqlutil.FormatOptionTable 30 | // FormatOptionLogs sets the preferred visualization to logs 31 | FormatOptionLogs = sqlutil.FormatOptionLogs 32 | // FormatOptionsTrace sets the preferred visualization to trace 33 | FormatOptionTrace = sqlutil.FormatOptionTrace 34 | // FormatOptionMulti formats the query results as a timeseries using "LongToMulti" 35 | FormatOptionMulti = sqlutil.FormatOptionMulti 36 | ) 37 | 38 | // Deprecated: use sqlutil.Query directly instead 39 | type Query = sqlutil.Query 40 | 41 | // GetQuery wraps sqlutil's GetQuery to add headers if needed 42 | func GetQuery(query backend.DataQuery, headers http.Header, setHeaders bool) (*Query, error) { 43 | model, err := sqlutil.GetQuery(query) 44 | if err != nil { 45 | return nil, backend.PluginError(err) 46 | } 47 | 48 | if setHeaders { 49 | applyHeaders(model, headers) 50 | } 51 | 52 | return model, nil 53 | } 54 | 55 | type DBQuery struct { 56 | DB Connection 57 | fillMode *data.FillMissing 58 | Settings backend.DataSourceInstanceSettings 59 | metrics Metrics 60 | DSName string 61 | converters []sqlutil.Converter 62 | rowLimit int64 63 | } 64 | 65 | func NewQuery(db Connection, settings backend.DataSourceInstanceSettings, converters []sqlutil.Converter, fillMode *data.FillMissing, rowLimit int64) *DBQuery { 66 | return &DBQuery{ 67 | DB: db, 68 | DSName: settings.Name, 69 | converters: converters, 70 | fillMode: fillMode, 71 | metrics: NewMetrics(settings.Name, settings.Type, EndpointQuery), 72 | rowLimit: rowLimit, 73 | } 74 | } 75 | 76 | // Run sends the query to the connection and converts the rows to a dataframe. 77 | func (q *DBQuery) Run(ctx context.Context, query *Query, queryErrorMutator QueryErrorMutator, args ...interface{}) (data.Frames, error) { 78 | start := time.Now() 79 | rows, err := q.DB.QueryContext(ctx, query.RawSQL, args...) 80 | if err != nil { 81 | var errWithSource backend.ErrorWithSource 82 | defer func() { 83 | q.metrics.CollectDuration(Source(errWithSource.ErrorSource()), StatusError, time.Since(start).Seconds()) 84 | }() 85 | 86 | if errors.Is(err, context.Canceled) { 87 | errWithSource := backend.NewErrorWithSource(err, backend.ErrorSourceDownstream) 88 | return sqlutil.ErrorFrameFromQuery(query), errWithSource 89 | } 90 | 91 | // Wrap with ErrorQuery to enable retry logic in datasource 92 | queryErr := fmt.Errorf("%w: %w", ErrorQuery, err) 93 | 94 | // Handle driver specific errors 95 | if queryErrorMutator != nil { 96 | errWithSource = queryErrorMutator.MutateQueryError(queryErr) 97 | return sqlutil.ErrorFrameFromQuery(query), errWithSource 98 | } 99 | 100 | // If we get to this point, assume the error is from the plugin 101 | errWithSource = backend.NewErrorWithSource(queryErr, backend.DefaultErrorSource) 102 | 103 | return sqlutil.ErrorFrameFromQuery(query), errWithSource 104 | } 105 | q.metrics.CollectDuration(SourceDownstream, StatusOK, time.Since(start).Seconds()) 106 | 107 | // Check for an error response 108 | if err := rows.Err(); err != nil { 109 | queryErr := fmt.Errorf("%w: %w", ErrorQuery, err) 110 | errWithSource := backend.NewErrorWithSource(queryErr, backend.DefaultErrorSource) 111 | if errors.Is(err, sql.ErrNoRows) { 112 | // Should we even response with an error here? 113 | // The panel will simply show "no data" 114 | errWithSource = backend.NewErrorWithSource(fmt.Errorf("%w: %s", err, "Error response from database"), backend.ErrorSourceDownstream) 115 | return sqlutil.ErrorFrameFromQuery(query), errWithSource 116 | } 117 | if queryErrorMutator != nil { 118 | errWithSource = queryErrorMutator.MutateQueryError(queryErr) 119 | } 120 | 121 | q.metrics.CollectDuration(Source(errWithSource.ErrorSource()), StatusError, time.Since(start).Seconds()) 122 | return sqlutil.ErrorFrameFromQuery(query), errWithSource 123 | } 124 | 125 | defer func() { 126 | if err := rows.Close(); err != nil { 127 | backend.Logger.Error(err.Error()) 128 | } 129 | }() 130 | 131 | return q.convertRowsToFrames(rows, query, queryErrorMutator) 132 | } 133 | 134 | func (q *DBQuery) convertRowsToFrames(rows *sql.Rows, query *Query, queryErrorMutator QueryErrorMutator) (data.Frames, error) { 135 | source := SourcePlugin 136 | status := StatusOK 137 | start := time.Now() 138 | defer func() { 139 | q.metrics.CollectDuration(source, status, time.Since(start).Seconds()) 140 | }() 141 | 142 | res, err := getFrames(rows, q.rowLimit, q.converters, q.fillMode, query) 143 | if err != nil { 144 | status = StatusError 145 | 146 | // Additional checks for processing errors 147 | if backend.IsDownstreamHTTPError(err) { 148 | source = SourceDownstream 149 | } else if queryErrorMutator != nil { 150 | errWithSource := queryErrorMutator.MutateQueryError(err) 151 | source = Source(errWithSource.ErrorSource()) 152 | } 153 | 154 | return sqlutil.ErrorFrameFromQuery(query), backend.NewErrorWithSource( 155 | fmt.Errorf("%w: %s", err, "Could not process SQL results"), 156 | backend.ErrorSource(source), 157 | ) 158 | } 159 | return res, nil 160 | } 161 | 162 | // getFrames converts rows to dataframes 163 | func getFrames(rows *sql.Rows, limit int64, converters []sqlutil.Converter, fillMode *data.FillMissing, query *Query) (data.Frames, error) { 164 | // Validate rows before processing to prevent panics 165 | if err := validateRows(rows); err != nil { 166 | backend.Logger.Error("Invalid SQL rows", "error", err.Error()) 167 | return nil, err 168 | } 169 | 170 | frame, err := sqlutil.FrameFromRows(rows, limit, converters...) 171 | if err != nil { 172 | return nil, err 173 | } 174 | frame.Name = query.RefID 175 | if frame.Meta == nil { 176 | frame.Meta = &data.FrameMeta{} 177 | } 178 | 179 | count, err := frame.RowLen() 180 | if err != nil { 181 | return nil, err 182 | } 183 | 184 | // the handling of zero-rows differs between various "format"s. 185 | zeroRows := count == 0 186 | 187 | frame.Meta.ExecutedQueryString = query.RawSQL 188 | frame.Meta.PreferredVisualization = data.VisTypeGraph 189 | 190 | switch query.Format { 191 | case FormatOptionMulti: 192 | if zeroRows { 193 | return nil, ErrorNoResults 194 | } 195 | 196 | if frame.TimeSeriesSchema().Type == data.TimeSeriesTypeLong { 197 | 198 | err = fixFrameForLongToMulti(frame) 199 | if err != nil { 200 | return nil, err 201 | } 202 | 203 | frames, err := timeseries.LongToMulti(×eries.LongFrame{frame}) 204 | if err != nil { 205 | return nil, err 206 | } 207 | return frames.Frames(), nil 208 | } 209 | case FormatOptionTable: 210 | frame.Meta.PreferredVisualization = data.VisTypeTable 211 | case FormatOptionLogs: 212 | frame.Meta.PreferredVisualization = data.VisTypeLogs 213 | case FormatOptionTrace: 214 | frame.Meta.PreferredVisualization = data.VisTypeTrace 215 | // Format as timeSeries 216 | default: 217 | if zeroRows { 218 | return nil, ErrorNoResults 219 | } 220 | 221 | if frame.TimeSeriesSchema().Type == data.TimeSeriesTypeLong { 222 | frame, err = data.LongToWide(frame, fillMode) 223 | if err != nil { 224 | return nil, err 225 | } 226 | } 227 | } 228 | return data.Frames{frame}, nil 229 | } 230 | 231 | // accessColumns checks whether we can access rows.Columns, checking 232 | // for error or panic. In the case of panic, logs the stack trace at debug level 233 | // for security 234 | func accessColumns(rows *sql.Rows) (columnErr error) { 235 | defer func() { 236 | if r := recover(); r != nil { 237 | columnErr = fmt.Errorf("panic accessing columns: %v", r) 238 | stack := string(debug.Stack()) 239 | backend.Logger.Debug("accessColumns panic stack trace", "stack", stack) 240 | } 241 | }() 242 | _, columnErr = rows.Columns() 243 | return columnErr 244 | } 245 | 246 | // validateRows performs safety checks on SQL rows to prevent panics 247 | func validateRows(rows *sql.Rows) error { 248 | if rows == nil { 249 | return fmt.Errorf("%w: rows is nil", ErrorRowValidation) 250 | } 251 | 252 | err := accessColumns(rows) 253 | if err != nil { 254 | return fmt.Errorf("%w: %w", ErrorRowValidation, err) 255 | } 256 | return nil 257 | } 258 | 259 | // fixFrameForLongToMulti edits the passed in frame so that it's first time field isn't nullable and has the correct meta 260 | func fixFrameForLongToMulti(frame *data.Frame) error { 261 | if frame == nil { 262 | return fmt.Errorf("can not convert to wide series, input is nil") 263 | } 264 | 265 | timeFields := frame.TypeIndices(data.FieldTypeTime, data.FieldTypeNullableTime) 266 | if len(timeFields) == 0 { 267 | return fmt.Errorf("can not convert to wide series, input is missing a time field") 268 | } 269 | 270 | // the timeseries package expects the first time field in the frame to be non-nullable and ignores the rest 271 | timeField := frame.Fields[timeFields[0]] 272 | if timeField.Type() == data.FieldTypeNullableTime { 273 | newValues := []time.Time{} 274 | for i := 0; i < timeField.Len(); i++ { 275 | val, ok := timeField.ConcreteAt(i) 276 | if !ok { 277 | return fmt.Errorf("can not convert to wide series, input has null time values") 278 | } 279 | newValues = append(newValues, val.(time.Time)) 280 | } 281 | newField := data.NewField(timeField.Name, timeField.Labels, newValues) 282 | newField.Config = timeField.Config 283 | frame.Fields[timeFields[0]] = newField 284 | 285 | // LongToMulti requires the meta to be set for the frame 286 | if frame.Meta == nil { 287 | frame.Meta = &data.FrameMeta{} 288 | } 289 | frame.Meta.Type = data.FrameTypeTimeSeriesLong 290 | frame.Meta.TypeVersion = data.FrameTypeVersion{0, 1} 291 | } 292 | return nil 293 | } 294 | 295 | func applyHeaders(query *Query, headers http.Header) *Query { 296 | var args map[string]interface{} 297 | if query.ConnectionArgs == nil { 298 | query.ConnectionArgs = []byte("{}") 299 | } 300 | err := json.Unmarshal(query.ConnectionArgs, &args) 301 | if err != nil { 302 | backend.Logger.Warn(fmt.Sprintf("Failed to apply headers: %s", err.Error())) 303 | return query 304 | } 305 | args[HeaderKey] = headers 306 | raw, err := json.Marshal(args) 307 | if err != nil { 308 | backend.Logger.Warn(fmt.Sprintf("Failed to apply headers: %s", err.Error())) 309 | return query 310 | } 311 | 312 | query.ConnectionArgs = raw 313 | 314 | return query 315 | } 316 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2021 Grafana Labs 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /query_test.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "errors" 7 | "fmt" 8 | "testing" 9 | "time" 10 | 11 | "github.com/grafana/grafana-plugin-sdk-go/backend" 12 | "github.com/grafana/grafana-plugin-sdk-go/data" 13 | "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" 14 | "github.com/stretchr/testify/assert" 15 | "github.com/stretchr/testify/require" 16 | ) 17 | 18 | var ( 19 | errorPingCompleted = errors.New("ping completed") 20 | errorQueryCompleted = errors.New("query completed") 21 | ) 22 | 23 | type testConnection struct { 24 | PingWait time.Duration 25 | 26 | QueryWait time.Duration 27 | QueryRunCount int 28 | } 29 | 30 | func (t *testConnection) Close() error { 31 | t.QueryRunCount = 0 32 | return nil 33 | } 34 | 35 | func (t *testConnection) Ping() error { 36 | return errorPingCompleted 37 | } 38 | 39 | func (t *testConnection) PingContext(ctx context.Context) error { 40 | done := make(chan bool) 41 | go func() { 42 | time.Sleep(t.QueryWait) 43 | done <- true 44 | }() 45 | 46 | select { 47 | case <-ctx.Done(): 48 | return context.Canceled 49 | case <-done: 50 | return errorPingCompleted 51 | } 52 | } 53 | 54 | func (t *testConnection) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { 55 | t.QueryRunCount++ 56 | 57 | done := make(chan bool) 58 | go func() { 59 | time.Sleep(t.QueryWait) 60 | done <- true 61 | }() 62 | 63 | select { 64 | case <-ctx.Done(): 65 | return nil, context.Canceled 66 | case <-done: 67 | return nil, errorQueryCompleted 68 | } 69 | } 70 | 71 | func TestQuery_Timeout(t *testing.T) { 72 | t.Run("it should return context.Canceled if the query timeout is exceeded", func(t *testing.T) { 73 | ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) 74 | defer cancel() 75 | 76 | conn := &testConnection{ 77 | PingWait: time.Second * 5, 78 | QueryWait: time.Second * 5, 79 | } 80 | 81 | defer conn.Close() 82 | 83 | settings := backend.DataSourceInstanceSettings{ 84 | Name: "foo", 85 | } 86 | 87 | sqlQuery := NewQuery(conn, settings, []sqlutil.Converter{}, nil, defaultRowLimit) 88 | _, err := sqlQuery.Run(ctx, &Query{}, nil) 89 | 90 | if !errors.Is(err, context.Canceled) { 91 | t.Fatal("expected error to be context.Canceled, received", err) 92 | } 93 | 94 | if conn.QueryRunCount != 1 { 95 | t.Fatal("expected the querycontext function to run only once, but ran", conn.QueryRunCount, "times") 96 | } 97 | }) 98 | 99 | t.Run("it should run to completion and not return a query timeout error", func(t *testing.T) { 100 | ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) 101 | defer cancel() 102 | 103 | conn := &testConnection{ 104 | PingWait: time.Second, 105 | QueryWait: time.Second, 106 | } 107 | 108 | defer conn.Close() 109 | 110 | settings := backend.DataSourceInstanceSettings{ 111 | Name: "foo", 112 | } 113 | 114 | sqlQuery := NewQuery(conn, settings, []sqlutil.Converter{}, nil, defaultRowLimit) 115 | _, err := sqlQuery.Run(ctx, &Query{}, nil) 116 | 117 | if !errors.Is(err, errorQueryCompleted) { 118 | t.Fatal("expected function to complete, received error: ", err) 119 | } 120 | }) 121 | } 122 | 123 | func TestFixFrameForLongToMulti(t *testing.T) { 124 | t.Run("fix time", func(t *testing.T) { 125 | time1 := time.UnixMilli(1) 126 | time2 := time.UnixMilli(2) 127 | frame := data.NewFrame("", 128 | data.NewField("time", nil, []*time.Time{&time1, &time2}), 129 | data.NewField("host", nil, []string{"a", "b"}), 130 | data.NewField("iface", nil, []string{"eth0", "eth0"}), 131 | data.NewField("in_bytes", nil, []float64{1, 2}), 132 | data.NewField("out_bytes", nil, []int64{3, 4}), 133 | ) 134 | 135 | err := fixFrameForLongToMulti(frame) 136 | require.NoError(t, err) 137 | 138 | require.Equal(t, frame.Fields[0].Type(), data.FieldTypeTime) 139 | require.Equal(t, frame.Fields[0].Len(), 2) 140 | require.Equal(t, frame.Fields[0].At(0).(time.Time), time1) 141 | require.Equal(t, frame.Fields[0].At(1).(time.Time), time2) 142 | 143 | require.Equal(t, frame.Meta.Type, data.FrameTypeTimeSeriesLong) 144 | require.Equal(t, frame.Meta.TypeVersion, data.FrameTypeVersion{0, 1}) 145 | }) 146 | t.Run("errors for null time", func(t *testing.T) { 147 | time1 := time.UnixMilli(1) 148 | frame := data.NewFrame("", 149 | data.NewField("time", nil, []*time.Time{&time1, nil}), 150 | data.NewField("host", nil, []string{"a", "b"}), 151 | data.NewField("in_bytes", nil, []float64{1, 2}), 152 | ) 153 | 154 | err := fixFrameForLongToMulti(frame) 155 | require.Equal(t, err, fmt.Errorf("can not convert to wide series, input has null time values")) 156 | }) 157 | t.Run("error for no time", func(t *testing.T) { 158 | frame := data.NewFrame("", 159 | data.NewField("host", nil, []string{"a", "b"}), 160 | data.NewField("in_bytes", nil, []float64{1, 2}), 161 | ) 162 | 163 | err := fixFrameForLongToMulti(frame) 164 | require.Equal(t, err, fmt.Errorf("can not convert to wide series, input is missing a time field")) 165 | }) 166 | } 167 | 168 | func TestLabelNameSanitization(t *testing.T) { 169 | testcases := []struct { 170 | input string 171 | expected string 172 | err bool 173 | }{ 174 | {input: "job", expected: "job"}, 175 | {input: "job._loal['", expected: "job_loal"}, 176 | {input: "", expected: "", err: true}, 177 | {input: ";;;", expected: "", err: true}, 178 | {input: "Data source", expected: "Data_source"}, 179 | } 180 | 181 | for _, tc := range testcases { 182 | got, ok := sanitizeLabelName(tc.input) 183 | if tc.err { 184 | assert.Equal(t, false, ok) 185 | } else { 186 | assert.Equal(t, true, ok) 187 | assert.Equal(t, tc.expected, got) 188 | } 189 | } 190 | } 191 | 192 | func TestValidateRows(t *testing.T) { 193 | t.Run("returns error for nil rows", func(t *testing.T) { 194 | err := validateRows(nil) 195 | require.Error(t, err) 196 | require.ErrorIs(t, err, ErrorRowValidation) 197 | require.Contains(t, err.Error(), "rows is nil") 198 | }) 199 | } 200 | 201 | func TestFixFrameForLongToMulti_NilFrame(t *testing.T) { 202 | err := fixFrameForLongToMulti(nil) 203 | require.Error(t, err) 204 | require.Contains(t, err.Error(), "can not convert to wide series, input is nil") 205 | } 206 | 207 | func TestFixFrameForLongToMulti_NonNullableTime(t *testing.T) { 208 | time1 := time.UnixMilli(1) 209 | time2 := time.UnixMilli(2) 210 | frame := data.NewFrame("", 211 | data.NewField("time", nil, []time.Time{time1, time2}), 212 | data.NewField("host", nil, []string{"a", "b"}), 213 | data.NewField("value", nil, []float64{1, 2}), 214 | ) 215 | 216 | err := fixFrameForLongToMulti(frame) 217 | require.NoError(t, err) 218 | 219 | // Verify the frame wasn't modified since time was already non-nullable 220 | require.Equal(t, data.FieldTypeTime, frame.Fields[0].Type()) 221 | require.Equal(t, 2, frame.Fields[0].Len()) 222 | } 223 | 224 | func TestNewQuery(t *testing.T) { 225 | conn := &testConnection{} 226 | settings := backend.DataSourceInstanceSettings{ 227 | Name: "test-datasource", 228 | Type: "test-type", 229 | } 230 | converters := []sqlutil.Converter{} 231 | fillMode := &data.FillMissing{} 232 | rowLimit := int64(1000) 233 | 234 | dbQuery := NewQuery(conn, settings, converters, fillMode, rowLimit) 235 | 236 | require.NotNil(t, dbQuery) 237 | require.Equal(t, conn, dbQuery.DB) 238 | require.Equal(t, "test-datasource", dbQuery.DSName) 239 | require.Equal(t, converters, dbQuery.converters) 240 | require.Equal(t, fillMode, dbQuery.fillMode) 241 | require.Equal(t, rowLimit, dbQuery.rowLimit) 242 | require.NotNil(t, dbQuery.metrics) 243 | } 244 | 245 | func TestRun_WithDownStreamErrorMutator(t *testing.T) { 246 | ctx := context.Background() 247 | conn := &testConnection{ 248 | QueryWait: 0, 249 | } 250 | settings := backend.DataSourceInstanceSettings{ 251 | Name: "test", 252 | } 253 | 254 | query := &Query{ 255 | RawSQL: "SELECT * FROM test", 256 | RefID: "A", 257 | } 258 | 259 | // Create a mock ErrorMutator 260 | mockMutator := &mockErrorMutator{ 261 | shouldMutate: true, 262 | } 263 | 264 | dbQuery := NewQuery(conn, settings, []sqlutil.Converter{}, nil, defaultRowLimit) 265 | _, err := dbQuery.Run(ctx, query, mockMutator) 266 | 267 | require.Error(t, err) 268 | require.True(t, mockMutator.called, "ErrorMutator should have been called") 269 | } 270 | 271 | func TestRun_ErrorQueryWrapping(t *testing.T) { 272 | t.Run("query errors are wrapped with ErrorQuery for retry logic", func(t *testing.T) { 273 | ctx := context.Background() 274 | conn := &testConnection{ 275 | QueryWait: 0, 276 | } 277 | settings := backend.DataSourceInstanceSettings{ 278 | Name: "test", 279 | } 280 | 281 | query := &Query{ 282 | RawSQL: "SELECT * FROM test", 283 | RefID: "A", 284 | } 285 | 286 | dbQuery := NewQuery(conn, settings, []sqlutil.Converter{}, nil, defaultRowLimit) 287 | _, err := dbQuery.Run(ctx, query, nil) 288 | 289 | require.Error(t, err) 290 | // Verify the error is wrapped with ErrorQuery to enable retry logic in datasource.go 291 | require.True(t, errors.Is(err, ErrorQuery), "Error should be wrapped with ErrorQuery for retry detection") 292 | // Verify the original error is preserved in the chain 293 | require.True(t, errors.Is(err, errorQueryCompleted), "Original error should be preserved") 294 | }) 295 | 296 | t.Run("context.Canceled is NOT wrapped with ErrorQuery", func(t *testing.T) { 297 | ctx, cancel := context.WithCancel(context.Background()) 298 | cancel() 299 | 300 | conn := &testConnection{ 301 | QueryWait: 0, 302 | } 303 | settings := backend.DataSourceInstanceSettings{ 304 | Name: "test", 305 | } 306 | 307 | query := &Query{ 308 | RawSQL: "SELECT * FROM test", 309 | RefID: "A", 310 | } 311 | 312 | dbQuery := NewQuery(conn, settings, []sqlutil.Converter{}, nil, defaultRowLimit) 313 | _, err := dbQuery.Run(ctx, query, nil) 314 | 315 | require.Error(t, err) 316 | // Context cancellation should NOT be wrapped with ErrorQuery 317 | require.True(t, errors.Is(err, context.Canceled), "Error should be context.Canceled") 318 | // Verify it's classified as downstream error 319 | var errWithSource backend.ErrorWithSource 320 | require.True(t, errors.As(err, &errWithSource), "Error should implement ErrorWithSource") 321 | require.Equal(t, backend.ErrorSourceDownstream, errWithSource.ErrorSource()) 322 | }) 323 | 324 | t.Run("QueryErrorMutator receives ErrorQuery-wrapped errors", func(t *testing.T) { 325 | ctx := context.Background() 326 | conn := &testConnection{ 327 | QueryWait: 0, 328 | } 329 | settings := backend.DataSourceInstanceSettings{ 330 | Name: "test", 331 | } 332 | 333 | query := &Query{ 334 | RawSQL: "SELECT * FROM test", 335 | RefID: "A", 336 | } 337 | 338 | var receivedErr error 339 | mockMutator := &mockErrorMutatorWithCapture{ 340 | capturedErr: &receivedErr, 341 | } 342 | 343 | dbQuery := NewQuery(conn, settings, []sqlutil.Converter{}, nil, defaultRowLimit) 344 | _, err := dbQuery.Run(ctx, query, mockMutator) 345 | 346 | require.Error(t, err) 347 | require.NotNil(t, receivedErr, "Mutator should have received an error") 348 | // Verify the mutator received an ErrorQuery-wrapped error 349 | require.True(t, errors.Is(receivedErr, ErrorQuery), "Mutator should receive ErrorQuery-wrapped error") 350 | require.True(t, errors.Is(receivedErr, errorQueryCompleted), "Original error should be in chain") 351 | }) 352 | } 353 | 354 | // mockErrorMutator is a simple implementation for testing 355 | type mockErrorMutator struct { 356 | shouldMutate bool 357 | called bool 358 | } 359 | 360 | func (m *mockErrorMutator) MutateQueryError(err error) backend.ErrorWithSource { 361 | m.called = true 362 | if m.shouldMutate { 363 | return backend.NewErrorWithSource(err, backend.ErrorSourceDownstream) 364 | } 365 | return backend.NewErrorWithSource(err, backend.ErrorSourcePlugin) 366 | } 367 | 368 | // mockErrorMutatorWithCapture captures the error it receives for testing 369 | type mockErrorMutatorWithCapture struct { 370 | capturedErr *error 371 | } 372 | 373 | func (m *mockErrorMutatorWithCapture) MutateQueryError(err error) backend.ErrorWithSource { 374 | if m.capturedErr != nil { 375 | *m.capturedErr = err 376 | } 377 | return backend.NewErrorWithSource(err, backend.ErrorSourceDownstream) 378 | } 379 | -------------------------------------------------------------------------------- /datasource_test.go: -------------------------------------------------------------------------------- 1 | package sqlds_test 2 | 3 | import ( 4 | "context" 5 | "database/sql" 6 | "database/sql/driver" 7 | "encoding/json" 8 | "errors" 9 | "fmt" 10 | "testing" 11 | 12 | "github.com/grafana/grafana-plugin-sdk-go/backend" 13 | "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" 14 | "github.com/grafana/sqlds/v5" 15 | "github.com/grafana/sqlds/v5/mock" 16 | "github.com/grafana/sqlds/v5/test" 17 | "github.com/stretchr/testify/assert" 18 | ) 19 | 20 | func Test_health_retries(t *testing.T) { 21 | opts := test.DriverOpts{ 22 | ConnectError: errors.New("foo"), 23 | } 24 | cfg := `{ "timeout": 0, "retries": 5, "retryOn": ["foo"] }` 25 | req, handler, ds := healthRequest(t, "timeout", opts, cfg) 26 | 27 | _, err := ds.CheckHealth(context.Background(), &req) 28 | 29 | assert.Equal(t, nil, err) 30 | assert.Equal(t, 5, handler.State.ConnectAttempts) 31 | } 32 | 33 | func Test_query_retries(t *testing.T) { 34 | cfg := `{ "timeout": 0, "retries": 5, "retryOn": ["foo"] }` 35 | opts := test.DriverOpts{ 36 | QueryError: errors.New("foo"), 37 | } 38 | 39 | req, handler, ds := queryRequest(t, "error", opts, cfg, nil) 40 | 41 | data, err := ds.QueryData(context.Background(), req) 42 | assert.Nil(t, err) 43 | assert.NotNil(t, data.Responses) 44 | assert.Equal(t, 6, handler.State.QueryAttempts) 45 | 46 | res := data.Responses["foo"] 47 | assert.NotNil(t, res.Error) 48 | assert.Equal(t, backend.ErrorSourceDownstream, res.ErrorSource) 49 | } 50 | 51 | func Test_query_apply_headers(t *testing.T) { 52 | var message []byte 53 | onConnect := func(msg []byte) { 54 | message = msg 55 | } 56 | 57 | opts := test.DriverOpts{ 58 | QueryError: errors.New("missing token"), 59 | QueryFailTimes: 1, // first check always fails since headers are not available on initial connect 60 | OnConnect: onConnect, 61 | } 62 | cfg := `{ "timeout": 0, "retries": 1, "retryOn": ["missing token"], "forwardHeaders": true }` 63 | 64 | req, handler, ds := queryRequest(t, "headers", opts, cfg, nil) 65 | 66 | req.SetHTTPHeader("foo", "bar") 67 | 68 | data, err := ds.QueryData(context.Background(), req) 69 | assert.Nil(t, err) 70 | assert.NotNil(t, data.Responses) 71 | assert.Equal(t, 2, handler.State.QueryAttempts) 72 | 73 | assert.Contains(t, string(message), "bar") 74 | } 75 | 76 | func Test_check_health_with_headers(t *testing.T) { 77 | var message json.RawMessage 78 | onConnect := func(msg []byte) { 79 | message = msg 80 | } 81 | opts := test.DriverOpts{ 82 | ConnectError: errors.New("missing token"), 83 | ConnectFailTimes: 1, // first check always fails since headers are not available on initial connect 84 | OnConnect: onConnect, 85 | } 86 | cfg := `{ "timeout": 0, "retries": 2, "retryOn": ["missing token"], "forwardHeaders": true }` 87 | req, handler, ds := healthRequest(t, "health-headers", opts, cfg) 88 | r := &req 89 | r.SetHTTPHeader("foo", "bar") 90 | 91 | res, err := ds.CheckHealth(context.Background(), r) 92 | assert.Nil(t, err) 93 | assert.Equal(t, "Data source is working", res.Message) 94 | assert.Equal(t, 2, handler.State.ConnectAttempts) 95 | assert.Contains(t, string(message), "bar") 96 | } 97 | 98 | func Test_no_errors(t *testing.T) { 99 | req, _, ds := healthRequest(t, "pass", test.DriverOpts{}, "{}") 100 | result, err := ds.CheckHealth(context.Background(), &req) 101 | 102 | assert.Nil(t, err) 103 | expected := "Data source is working" 104 | assert.Equal(t, expected, result.Message) 105 | } 106 | 107 | func Test_custom_marco_errors(t *testing.T) { 108 | cfg := `{ "timeout": 0, "retries": 0, "retryOn": ["foo"], query: "badArgumentCount" }` 109 | opts := test.DriverOpts{} 110 | 111 | badArgumentCountFunc := func(query *sqlds.Query, args []string) (string, error) { 112 | return "", sqlutil.ErrorBadArgumentCount 113 | } 114 | macros := sqlds.Macros{ 115 | "foo": badArgumentCountFunc, 116 | } 117 | 118 | req, _, ds := queryRequest(t, "interpolate", opts, cfg, macros) 119 | 120 | req.Queries[0].JSON = []byte(`{ "rawSql": "select $__foo from bar;" }`) 121 | 122 | data, err := ds.QueryData(context.Background(), req) 123 | assert.Nil(t, err) 124 | 125 | res := data.Responses["foo"] 126 | assert.NotNil(t, res.Error) 127 | assert.Equal(t, backend.ErrorSourceDownstream, res.ErrorSource) 128 | assert.Contains(t, res.Error.Error(), sqlutil.ErrorBadArgumentCount.Error()) 129 | } 130 | 131 | func Test_default_macro_errors(t *testing.T) { 132 | tests := []struct { 133 | name string 134 | rawSQL string 135 | wantError string 136 | }{ 137 | { 138 | name: "missing parameters", 139 | rawSQL: "select * from bar where $__timeGroup(", 140 | wantError: "missing close bracket", 141 | }, 142 | { 143 | name: "incorrect argument count 0 - timeGroup", 144 | rawSQL: "select * from bar where $__timeGroup()", 145 | wantError: sqlutil.ErrorBadArgumentCount.Error(), 146 | }, 147 | { 148 | name: "incorrect argument count 3 - timeGroup", 149 | rawSQL: "select * from bar where $__timeGroup(1,2,3)", 150 | wantError: sqlutil.ErrorBadArgumentCount.Error(), 151 | }, 152 | { 153 | name: "incorrect argument count 0 - timeFilter", 154 | rawSQL: "select * from bar where $__timeFilter", 155 | wantError: sqlutil.ErrorBadArgumentCount.Error(), 156 | }, 157 | { 158 | name: "incorrect argument count 3 - timeFilter", 159 | rawSQL: "select * from bar where $__timeFilter(1,2,3)", 160 | wantError: sqlutil.ErrorBadArgumentCount.Error(), 161 | }, 162 | { 163 | name: "incorrect argument count 0 - timeFrom", 164 | rawSQL: "select * from bar where $__timeFrom", 165 | wantError: sqlutil.ErrorBadArgumentCount.Error(), 166 | }, 167 | { 168 | name: "incorrect argument count 3 - timeFrom", 169 | rawSQL: "select * from bar where $__timeFrom(1,2,3)", 170 | wantError: sqlutil.ErrorBadArgumentCount.Error(), 171 | }, 172 | } 173 | 174 | // Common test configuration 175 | cfg := `{ "timeout": 0, "retries": 0, "retryOn": ["foo"], query: "badArgumentCount" }` 176 | opts := test.DriverOpts{} 177 | 178 | for _, tt := range tests { 179 | t.Run(tt.name, func(t *testing.T) { 180 | // Setup request 181 | req, _, ds := queryRequest(t, "interpolate", opts, cfg, nil) 182 | req.Queries[0].JSON = []byte(fmt.Sprintf(`{ "rawSql": "%s" }`, tt.rawSQL)) 183 | 184 | // Execute query 185 | data, err := ds.QueryData(context.Background(), req) 186 | assert.Nil(t, err) 187 | 188 | // Verify response 189 | res := data.Responses["foo"] 190 | assert.NotNil(t, res.Error) 191 | assert.Equal(t, backend.ErrorSourceDownstream, res.ErrorSource) 192 | assert.Contains(t, res.Error.Error(), tt.wantError) 193 | }) 194 | } 195 | } 196 | 197 | func Test_query_panic_recovery(t *testing.T) { 198 | cfg := `{ "timeout": 0, "retries": 0, "retryOn": [] }` 199 | opts := test.DriverOpts{} 200 | 201 | // Create a macro that triggers a panic 202 | panicMacro := func(query *sqlds.Query, args []string) (string, error) { 203 | panic("Random panic for testing purposes") 204 | } 205 | macros := sqlds.Macros{ 206 | "panicTest": panicMacro, 207 | } 208 | 209 | req, _, ds := queryRequest(t, "panic-test", opts, cfg, macros) 210 | 211 | // Set up a query that uses the panic-triggering macro 212 | req.Queries[0].JSON = []byte(`{ "rawSql": "SELECT $__panicTest() FROM test_table;" }`) 213 | 214 | // Execute the query 215 | data, err := ds.QueryData(context.Background(), req) 216 | 217 | // Verify that the panic was caught and converted to an error 218 | assert.Nil(t, err) 219 | assert.NotNil(t, data.Responses) 220 | 221 | res := data.Responses["foo"] 222 | assert.NotNil(t, res.Error) 223 | assert.Equal(t, backend.ErrorSourcePlugin, res.ErrorSource) 224 | assert.Contains(t, res.Error.Error(), "SQL datasource query execution panic") 225 | assert.Contains(t, res.Error.Error(), "Random panic for testing purposes") 226 | assert.Nil(t, res.Frames) 227 | } 228 | 229 | func Test_query_panic_in_rows_validation(t *testing.T) { 230 | cfg := `{ "timeout": 0, "retries": 0, "retryOn": [] }` 231 | opts := test.DriverOpts{} 232 | 233 | // Set up a driver that returns rows which will cause a panic when accessing columns 234 | // This will be caught by our validateRows function 235 | customQueryFunc := func(args []driver.Value) (driver.Rows, error) { 236 | // Return a panicking rows implementation that will cause a panic when columns are accessed 237 | return &panickingRows{}, nil 238 | } 239 | 240 | // Create a custom driver with a wrapper for the query method 241 | driverName := "panic-rows-test" 242 | 243 | // Create and register the handler 244 | handler := test.NewDriverHandler(test.Data{}, opts) 245 | 246 | // Create a custom handler that overrides the Query method 247 | customHandler := &panickingDBHandler{ 248 | SqlHandler: handler, 249 | customQueryFunc: customQueryFunc, 250 | } 251 | 252 | mock.RegisterDriver(driverName, customHandler) 253 | 254 | // Create datasource with the custom driver 255 | testDS := &testDriver{driverName: driverName} 256 | ds := sqlds.NewDatasource(testDS) 257 | 258 | // Set up the query request 259 | req, settings := setupQueryRequest("panic-rows-validation", cfg) 260 | _, err := ds.NewDatasource(context.Background(), settings) 261 | assert.Nil(t, err) 262 | 263 | // Execute the query 264 | data, err := ds.QueryData(context.Background(), req) 265 | 266 | // Verify that the panic was caught and converted to an error 267 | assert.Nil(t, err) 268 | assert.NotNil(t, data.Responses) 269 | 270 | res := data.Responses["foo"] 271 | assert.NotNil(t, res.Error) 272 | assert.Contains(t, res.Error.Error(), "SQL rows validation failed") 273 | assert.NotNil(t, res.Frames) // Error frame is returned, not nil 274 | } 275 | 276 | // panickingRows is a custom rows implementation that panics when columns are accessed 277 | type panickingRows struct{} 278 | 279 | func (r *panickingRows) Columns() []string { 280 | panic("panic in Columns method") 281 | } 282 | 283 | func (r *panickingRows) Close() error { 284 | return nil 285 | } 286 | 287 | func (r *panickingRows) Next(dest []driver.Value) error { 288 | return nil 289 | } 290 | 291 | func queryRequest(t *testing.T, name string, opts test.DriverOpts, cfg string, marcos sqlds.Macros) (*backend.QueryDataRequest, *test.SqlHandler, *sqlds.SQLDatasource) { 292 | driver, handler := test.NewDriver(name, test.Data{}, nil, opts, marcos) 293 | ds := sqlds.NewDatasource(driver) 294 | 295 | req, settings := setupQueryRequest(name, cfg) 296 | 297 | _, err := ds.NewDatasource(context.Background(), settings) 298 | assert.Equal(t, nil, err) 299 | return req, handler, ds 300 | } 301 | 302 | func setupQueryRequest(id string, cfg string) (*backend.QueryDataRequest, backend.DataSourceInstanceSettings) { 303 | s := backend.DataSourceInstanceSettings{UID: id, JSONData: []byte(cfg)} 304 | return &backend.QueryDataRequest{ 305 | PluginContext: backend.PluginContext{ 306 | DataSourceInstanceSettings: &s, 307 | }, 308 | Queries: []backend.DataQuery{ 309 | { 310 | RefID: "foo", 311 | JSON: []byte(`{ "rawSql": "foo" }`), 312 | }, 313 | }, 314 | }, s 315 | } 316 | 317 | func healthRequest(t *testing.T, name string, opts test.DriverOpts, cfg string) (backend.CheckHealthRequest, *test.SqlHandler, *sqlds.SQLDatasource) { 318 | driver, handler := test.NewDriver(name, test.Data{}, nil, opts, nil) 319 | ds := sqlds.NewDatasource(driver) 320 | 321 | req, settings := setupHealthRequest(name, cfg) 322 | 323 | _, err := ds.NewDatasource(context.Background(), settings) 324 | assert.Equal(t, nil, err) 325 | return req, handler, ds 326 | } 327 | 328 | func setupHealthRequest(id string, cfg string) (backend.CheckHealthRequest, backend.DataSourceInstanceSettings) { 329 | settings := backend.DataSourceInstanceSettings{UID: id, JSONData: []byte(cfg)} 330 | req := backend.CheckHealthRequest{ 331 | PluginContext: backend.PluginContext{ 332 | DataSourceInstanceSettings: &settings, 333 | }, 334 | } 335 | return req, settings 336 | } 337 | 338 | // testDriver implements sqlds.Driver interface for testing 339 | type testDriver struct { 340 | driverName string 341 | } 342 | 343 | func (d *testDriver) Connect(ctx context.Context, cfg backend.DataSourceInstanceSettings, msg json.RawMessage) (*sql.DB, error) { 344 | return sql.Open(d.driverName, "") 345 | } 346 | 347 | func (d *testDriver) Settings(ctx context.Context, config backend.DataSourceInstanceSettings) sqlds.DriverSettings { 348 | settings, _ := test.LoadSettings(ctx, config) 349 | return settings 350 | } 351 | 352 | func (d *testDriver) Macros() sqlds.Macros { 353 | return nil 354 | } 355 | 356 | func (d *testDriver) Converters() []sqlutil.Converter { 357 | return nil 358 | } 359 | 360 | // panickingDBHandler implements mock.DBHandler and causes panics when querying 361 | type panickingDBHandler struct { 362 | test.SqlHandler 363 | customQueryFunc func(args []driver.Value) (driver.Rows, error) 364 | } 365 | 366 | func (h *panickingDBHandler) Query(args []driver.Value) (driver.Rows, error) { 367 | if h.customQueryFunc != nil { 368 | return h.customQueryFunc(args) 369 | } 370 | return h.SqlHandler.Query(args) 371 | } 372 | 373 | func (h *panickingDBHandler) Ping(ctx context.Context) error { 374 | return nil 375 | } 376 | 377 | func (h *panickingDBHandler) Columns() []string { 378 | return []string{"test_column"} 379 | } 380 | 381 | func (h *panickingDBHandler) Next(dest []driver.Value) error { 382 | return errors.New("no more rows") 383 | } 384 | -------------------------------------------------------------------------------- /datasource.go: -------------------------------------------------------------------------------- 1 | package sqlds 2 | 3 | import ( 4 | "context" 5 | "crypto/sha256" 6 | "database/sql" 7 | "encoding/json" 8 | "errors" 9 | "fmt" 10 | "net/http" 11 | "os" 12 | "runtime/debug" 13 | "strconv" 14 | "sync" 15 | "time" 16 | 17 | "github.com/grafana/grafana-plugin-sdk-go/data/sqlutil" 18 | 19 | "github.com/grafana/grafana-plugin-sdk-go/backend" 20 | "github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt" 21 | "github.com/grafana/grafana-plugin-sdk-go/backend/log" 22 | "github.com/grafana/grafana-plugin-sdk-go/backend/resource/httpadapter" 23 | "github.com/grafana/grafana-plugin-sdk-go/data" 24 | ) 25 | 26 | const defaultKeySuffix = "default" 27 | const defaultRowLimit = int64(-1) 28 | const envRowLimit = "GF_DATAPROXY_ROW_LIMIT" 29 | 30 | var ( 31 | ErrorMissingMultipleConnectionsConfig = backend.PluginError(errors.New("received connection arguments but the feature is not enabled")) 32 | ErrorMissingDBConnection = backend.PluginError(errors.New("unable to get default db connection")) 33 | HeaderKey = "grafana-http-headers" 34 | // Deprecated: ErrorMissingMultipleConnectionsConfig should be used instead 35 | MissingMultipleConnectionsConfig = ErrorMissingMultipleConnectionsConfig 36 | // Deprecated: ErrorMissingDBConnection should be used instead 37 | MissingDBConnection = ErrorMissingDBConnection 38 | ) 39 | 40 | func defaultKey(datasourceUID string) string { 41 | return fmt.Sprintf("%s-%s", datasourceUID, defaultKeySuffix) 42 | } 43 | 44 | func keyWithConnectionArgs(datasourceUID string, connArgs json.RawMessage) string { 45 | connectionArgsHash := sha256.Sum256(connArgs) 46 | return fmt.Sprintf("%s-%x", datasourceUID, connectionArgsHash) 47 | } 48 | 49 | type dbConnection struct { 50 | db *sql.DB 51 | settings backend.DataSourceInstanceSettings 52 | } 53 | 54 | type SQLDatasource struct { 55 | Completable 56 | backend.CallResourceHandler 57 | connector *Connector 58 | CustomRoutes map[string]func(http.ResponseWriter, *http.Request) 59 | metrics Metrics 60 | EnableMultipleConnections bool 61 | // EnableRowLimit: enables using the dataproxy.row_limit setting to limit the number of rows returned by the query 62 | // https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#row_limit 63 | EnableRowLimit bool 64 | rowLimit int64 65 | // PreCheckHealth (optional). Performs custom health check before the Connect method 66 | PreCheckHealth func(ctx context.Context, req *backend.CheckHealthRequest) *backend.CheckHealthResult 67 | // PostCheckHealth (optional).Performs custom health check after the Connect method 68 | PostCheckHealth func(ctx context.Context, req *backend.CheckHealthRequest) *backend.CheckHealthResult 69 | } 70 | 71 | // NewDatasource creates a new `SQLDatasource`. 72 | // It uses the provided settings argument to call the ds.Driver to connect to the SQL server 73 | func (ds *SQLDatasource) NewDatasource(ctx context.Context, settings backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) { 74 | conn, err := NewConnector(ctx, ds.driver(), settings, ds.EnableMultipleConnections) 75 | if err != nil { 76 | return nil, backend.DownstreamError(err) 77 | } 78 | 79 | ds.connector = conn 80 | mux := http.NewServeMux() 81 | err = ds.registerRoutes(mux) 82 | if err != nil { 83 | return nil, backend.PluginError(err) 84 | } 85 | 86 | ds.CallResourceHandler = httpadapter.New(mux) 87 | ds.metrics = NewMetrics(settings.Name, settings.Type, EndpointQuery) 88 | 89 | ds.rowLimit = ds.newRowLimit(ctx, conn) 90 | 91 | return ds, nil 92 | } 93 | 94 | // NewDatasource initializes the Datasource wrapper and instance manager 95 | func NewDatasource(c Driver) *SQLDatasource { 96 | return &SQLDatasource{ 97 | connector: &Connector{driver: c}, 98 | } 99 | } 100 | 101 | // Dispose cleans up datasource instance resources. 102 | // Note: Called when testing and saving a datasource 103 | func (ds *SQLDatasource) Dispose() { 104 | ds.connector.Dispose() 105 | } 106 | 107 | // QueryData creates the Responses list and executes each query 108 | func (ds *SQLDatasource) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { 109 | headers := req.GetHTTPHeaders() 110 | 111 | var ( 112 | response = NewResponse(backend.NewQueryDataResponse()) 113 | wg = sync.WaitGroup{} 114 | ) 115 | 116 | wg.Add(len(req.Queries)) 117 | 118 | if queryDataMutator, ok := ds.driver().(QueryDataMutator); ok { 119 | ctx, req = queryDataMutator.MutateQueryData(ctx, req) 120 | } 121 | 122 | // Execute each query and store the results by query RefID 123 | for _, q := range req.Queries { 124 | go func(query backend.DataQuery) { 125 | defer wg.Done() 126 | 127 | // Panic recovery 128 | defer func() { 129 | if r := recover(); r != nil { 130 | stack := string(debug.Stack()) 131 | errorMsg := fmt.Sprintf("SQL datasource query execution panic: %v", r) 132 | 133 | // Log panic without sensitive query data 134 | backend.Logger.Error(errorMsg, 135 | "panic", r, 136 | "refID", query.RefID, 137 | "queryType", query.QueryType, 138 | "maxDataPoints", query.MaxDataPoints, 139 | "interval", query.Interval) 140 | 141 | // Log stack trace separately at debug level to avoid exposing in production 142 | backend.Logger.Debug("Panic stack trace", "stack", stack) 143 | 144 | response.Set(query.RefID, backend.DataResponse{ 145 | Frames: nil, 146 | Error: backend.PluginError(errors.New(errorMsg)), 147 | ErrorSource: backend.ErrorSourcePlugin, 148 | }) 149 | } 150 | }() 151 | 152 | frames, err := ds.handleQuery(ctx, query, headers) 153 | if err == nil { 154 | if responseMutator, ok := ds.driver().(ResponseMutator); ok { 155 | frames, err = responseMutator.MutateResponse(ctx, frames) 156 | if err != nil { 157 | err = backend.PluginError(err) 158 | } 159 | } 160 | } 161 | 162 | response.Set(query.RefID, backend.DataResponse{ 163 | Frames: frames, 164 | Error: err, 165 | ErrorSource: ErrorSource(err), 166 | }) 167 | }(q) 168 | } 169 | 170 | wg.Wait() 171 | 172 | errs := ds.errors(response) 173 | if ds.DriverSettings().Errors { 174 | return response.Response(), errs 175 | } 176 | 177 | return response.Response(), nil 178 | } 179 | 180 | func (ds *SQLDatasource) GetDBFromQuery(ctx context.Context, q *Query) (*sql.DB, error) { 181 | _, dbConn, err := ds.connector.GetConnectionFromQuery(ctx, q) 182 | return dbConn.db, err 183 | } 184 | 185 | // handleQuery will call query, and attempt to reconnect if the query failed 186 | func (ds *SQLDatasource) handleQuery(ctx context.Context, req backend.DataQuery, headers http.Header) (data.Frames, error) { 187 | if queryMutator, ok := ds.driver().(QueryMutator); ok { 188 | ctx, req = queryMutator.MutateQuery(ctx, req) 189 | } 190 | 191 | // Convert the backend.DataQuery into a Query object 192 | q, err := GetQuery(req, headers, ds.DriverSettings().ForwardHeaders) 193 | if err != nil { 194 | return nil, err 195 | } 196 | 197 | // Apply supported macros to the query 198 | q.RawSQL, err = Interpolate(ds.driver(), q) 199 | if err != nil { 200 | if errors.Is(err, sqlutil.ErrorBadArgumentCount) || err.Error() == ErrorParsingMacroBrackets.Error() { 201 | err = backend.DownstreamError(err) 202 | } 203 | return sqlutil.ErrorFrameFromQuery(q), fmt.Errorf("%s: %w", "Could not apply macros", err) 204 | } 205 | 206 | // Apply the default FillMode, overwritting it if the query specifies it 207 | fillMode := ds.DriverSettings().FillMode 208 | if q.FillMissing != nil { 209 | fillMode = q.FillMissing 210 | } 211 | 212 | // Retrieve the database connection 213 | cacheKey, dbConn, err := ds.connector.GetConnectionFromQuery(ctx, q) 214 | if err != nil { 215 | return sqlutil.ErrorFrameFromQuery(q), err 216 | } 217 | 218 | if ds.DriverSettings().Timeout != 0 { 219 | tctx, cancel := context.WithTimeout(ctx, ds.DriverSettings().Timeout) 220 | defer cancel() 221 | 222 | ctx = tctx 223 | } 224 | 225 | var args []interface{} 226 | if argSetter, ok := ds.driver().(QueryArgSetter); ok { 227 | args = argSetter.SetQueryArgs(ctx, headers) 228 | } 229 | 230 | var queryErrorMutator QueryErrorMutator 231 | if mutator, ok := ds.driver().(QueryErrorMutator); ok { 232 | queryErrorMutator = mutator 233 | } 234 | 235 | // FIXES: 236 | // * Some datasources (snowflake) expire connections or have an authentication token that expires if not used in 1 or 4 hours. 237 | // Because the datasource driver does not include an option for permanent connections, we retry the connection 238 | // if the query fails. NOTE: this does not include some errors like "ErrNoRows" 239 | dbQuery := NewQuery(dbConn.db, dbConn.settings, ds.driver().Converters(), fillMode, ds.rowLimit) 240 | res, err := dbQuery.Run(ctx, q, queryErrorMutator, args...) 241 | if err == nil { 242 | return res, nil 243 | } 244 | 245 | if errors.Is(err, ErrorNoResults) { 246 | return res, nil 247 | } 248 | 249 | // If there's a query error that didn't exceed the 250 | // context deadline retry the query 251 | if errors.Is(err, ErrorQuery) && !errors.Is(err, context.DeadlineExceeded) { 252 | // only retry on messages that contain specific errors 253 | if shouldRetry(ds.DriverSettings().RetryOn, err.Error()) { 254 | for i := 0; i < ds.DriverSettings().Retries; i++ { 255 | backend.Logger.Warn(fmt.Sprintf("query failed: %s. Retrying %d times", err.Error(), i)) 256 | db, err := ds.connector.Reconnect(ctx, dbConn, q, cacheKey) 257 | if err != nil { 258 | return nil, backend.DownstreamError(err) 259 | } 260 | 261 | if ds.DriverSettings().Pause > 0 { 262 | time.Sleep(time.Duration(ds.DriverSettings().Pause * int(time.Second))) 263 | } 264 | 265 | dbQuery := NewQuery(db, dbConn.settings, ds.driver().Converters(), fillMode, ds.rowLimit) 266 | res, err = dbQuery.Run(ctx, q, queryErrorMutator, args...) 267 | if err == nil { 268 | return res, err 269 | } 270 | if !shouldRetry(ds.DriverSettings().RetryOn, err.Error()) { 271 | return res, err 272 | } 273 | backend.Logger.Warn(fmt.Sprintf("Retry failed: %s", err.Error())) 274 | } 275 | } 276 | } 277 | 278 | // Check if the error is retryable and convert to downstream error if so 279 | if errors.Is(err, ErrorQuery) && shouldRetry(ds.DriverSettings().RetryOn, err.Error()) { 280 | // Convert retryable errors to downstream errors 281 | if !backend.IsDownstreamError(err) { 282 | err = backend.DownstreamError(err) 283 | } 284 | } 285 | 286 | // allow retries on timeouts 287 | if errors.Is(err, context.DeadlineExceeded) { 288 | for i := 0; i < ds.DriverSettings().Retries; i++ { 289 | backend.Logger.Warn(fmt.Sprintf("connection timed out. retrying %d times", i)) 290 | db, err := ds.connector.Reconnect(ctx, dbConn, q, cacheKey) 291 | if err != nil { 292 | continue 293 | } 294 | 295 | dbQuery := NewQuery(db, dbConn.settings, ds.driver().Converters(), fillMode, ds.rowLimit) 296 | res, err = dbQuery.Run(ctx, q, queryErrorMutator, args...) 297 | if err == nil { 298 | return res, err 299 | } 300 | } 301 | } 302 | 303 | return res, err 304 | } 305 | 306 | // CheckHealth pings the connected SQL database 307 | func (ds *SQLDatasource) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { 308 | if checkHealthMutator, ok := ds.driver().(CheckHealthMutator); ok { 309 | ctx, req = checkHealthMutator.MutateCheckHealth(ctx, req) 310 | } 311 | healthChecker := &HealthChecker{ 312 | Connector: ds.connector, 313 | Metrics: ds.metrics.WithEndpoint(EndpointHealth), 314 | PreCheckHealth: ds.PreCheckHealth, 315 | PostCheckHealth: ds.PostCheckHealth, 316 | } 317 | return healthChecker.Check(ctx, req) 318 | } 319 | 320 | func (ds *SQLDatasource) DriverSettings() DriverSettings { 321 | return ds.connector.driverSettings 322 | } 323 | 324 | func (ds *SQLDatasource) driver() Driver { 325 | return ds.connector.driver 326 | } 327 | 328 | func (ds *SQLDatasource) errors(response *Response) error { 329 | if response == nil { 330 | return nil 331 | } 332 | res := response.Response() 333 | if res == nil { 334 | return nil 335 | } 336 | var err error 337 | for _, r := range res.Responses { 338 | err = errors.Join(err, r.Error) 339 | } 340 | if err != nil { 341 | backend.Logger.Error(err.Error()) 342 | } 343 | return err 344 | } 345 | 346 | func (ds *SQLDatasource) GetRowLimit() int64 { 347 | return ds.rowLimit 348 | } 349 | 350 | func (ds *SQLDatasource) SetDefaultRowLimit(limit int64) { 351 | ds.EnableRowLimit = true 352 | ds.rowLimit = limit 353 | } 354 | 355 | // newRowLimit returns the row limit for the datasource 356 | // It checks in the following order: 357 | // 1. set in the datasource configuration page 358 | // 2. set via the environment variable 359 | // 3. set is set on grafana_ini and passed via grafana context 360 | // 4. default row limit set by SetDefaultRowLimit 361 | func (ds *SQLDatasource) newRowLimit(ctx context.Context, conn *Connector) int64 { 362 | if !ds.EnableRowLimit { 363 | return defaultRowLimit 364 | } 365 | 366 | // Handles when row limit is set in the datasource configuration page 367 | settingsLimit := conn.driverSettings.RowLimit 368 | if settingsLimit != 0 { 369 | return settingsLimit 370 | } 371 | 372 | // Handles when row limit is set via environment variable 373 | envLimit := os.Getenv(envRowLimit) 374 | if envLimit != "" { 375 | l, err := strconv.ParseInt(envLimit, 10, 64) 376 | if err == nil && l >= 0 { 377 | return l 378 | } 379 | log.DefaultLogger.Error(fmt.Sprintf("failed setting row limit from environment variable: %s", err)) 380 | } 381 | 382 | // Handles row limit from sql config from grafana instance 383 | config := backend.GrafanaConfigFromContext(ctx) 384 | if ds.EnableRowLimit && config != nil { 385 | sqlConfig, err := config.SQL() 386 | if err != nil { 387 | backend.Logger.Error(fmt.Sprintf("failed setting row limit from sql config: %s", err)) 388 | } else { 389 | return sqlConfig.RowLimit 390 | } 391 | } 392 | 393 | // handles SetDefaultRowLimit where it is set before the datasource is initialized 394 | if ds.rowLimit != 0 { 395 | return ds.rowLimit 396 | } 397 | 398 | return defaultRowLimit 399 | } 400 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= 2 | filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= 3 | github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ= 4 | github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY= 5 | github.com/apache/arrow-go/v18 v18.4.1 h1:q/jVkBWCJOB9reDgaIZIdruLQUb1kbkvOnOFezVH1C4= 6 | github.com/apache/arrow-go/v18 v18.4.1/go.mod h1:tLyFubsAl17bvFdUAy24bsSvA/6ww95Iqi67fTpGu3E= 7 | github.com/apache/thrift v0.22.0 h1:r7mTJdj51TMDe6RtcmNdQxgn9XcyfGDOzegMDRg47uc= 8 | github.com/apache/thrift v0.22.0/go.mod h1:1e7J/O1Ae6ZQMTYdy9xa3w9k+XHWPfRvdPyJeynQ+/g= 9 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 10 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 11 | github.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw= 12 | github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= 13 | github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= 14 | github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= 15 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 16 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 17 | github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= 18 | github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= 19 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 20 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 21 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= 22 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 23 | github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= 24 | github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= 25 | github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= 26 | github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= 27 | github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= 28 | github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 29 | github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 30 | github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= 31 | github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 32 | github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= 33 | github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= 34 | github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo= 35 | github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= 36 | github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= 37 | github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= 38 | github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= 39 | github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= 40 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 41 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 42 | github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= 43 | github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= 44 | github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= 45 | github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 46 | github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= 47 | github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= 48 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 49 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= 50 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= 51 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 52 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 53 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 54 | github.com/grafana/dataplane/sdata v0.0.9 h1:AGL1LZnCUG4MnQtnWpBPbQ8ZpptaZs14w6kE/MWfg7s= 55 | github.com/grafana/dataplane/sdata v0.0.9/go.mod h1:Jvs5ddpGmn6vcxT7tCTWAZ1mgi4sbcdFt9utQx5uMAU= 56 | github.com/grafana/grafana-plugin-sdk-go v0.284.0 h1:1bK7eWsnPBLUWDcWJWe218Ik5ad0a5JpEL4mH9ry7Ws= 57 | github.com/grafana/grafana-plugin-sdk-go v0.284.0/go.mod h1:lHPniaSxq3SL5MxDIPy04TYB1jnTp/ivkYO+xn5Rz3E= 58 | github.com/grafana/otel-profiling-go v0.5.1 h1:stVPKAFZSa7eGiqbYuG25VcqYksR6iWvF3YH66t4qL8= 59 | github.com/grafana/otel-profiling-go v0.5.1/go.mod h1:ftN/t5A/4gQI19/8MoWurBEtC6gFw8Dns1sJZ9W4Tls= 60 | github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og= 61 | github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= 62 | github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0 h1:QGLs/O40yoNK9vmy4rhUGBVyMf1lISBGtXRpsu/Qu/o= 63 | github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0/go.mod h1:hM2alZsMUni80N33RBe6J0e423LB+odMj7d3EMP9l20= 64 | github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 h1:B+8ClL/kCQkRiU82d9xajRPKYMrB7E0MbtzWVi1K4ns= 65 | github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3/go.mod h1:NbCUVmiS4foBGBHOYlCT25+YmGpJ32dZPi75pGEUpj4= 66 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= 67 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= 68 | github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= 69 | github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= 70 | github.com/hashicorp/go-plugin v1.7.0 h1:YghfQH/0QmPNc/AZMTFE3ac8fipZyZECHdDPshfk+mA= 71 | github.com/hashicorp/go-plugin v1.7.0/go.mod h1:BExt6KEaIYx804z8k4gRzRLEvxKVb+kn0NMcihqOqb8= 72 | github.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8= 73 | github.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns= 74 | github.com/jaegertracing/jaeger-idl v0.5.0 h1:zFXR5NL3Utu7MhPg8ZorxtCBjHrL3ReM1VoB65FOFGE= 75 | github.com/jaegertracing/jaeger-idl v0.5.0/go.mod h1:ON90zFo9eoyXrt9F/KN8YeF3zxcnujaisMweFY/rg5k= 76 | github.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94= 77 | github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= 78 | github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= 79 | github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= 80 | github.com/jszwedko/go-datemath v0.1.1-0.20230526204004-640a500621d6 h1:SwcnSwBR7X/5EHJQlXBockkJVIMRVt5yKaesBPMtyZQ= 81 | github.com/jszwedko/go-datemath v0.1.1-0.20230526204004-640a500621d6/go.mod h1:WrYiIuiXUMIvTDAQw97C+9l0CnBmCcvosPjN3XDqS/o= 82 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 83 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 84 | github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= 85 | github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= 86 | github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= 87 | github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= 88 | github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= 89 | github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= 90 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 91 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 92 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 93 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 94 | github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= 95 | github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= 96 | github.com/mattetti/filebuffer v1.0.1 h1:gG7pyfnSIZCxdoKq+cPa8T0hhYtD9NxCdI4D7PTjRLM= 97 | github.com/mattetti/filebuffer v1.0.1/go.mod h1:YdMURNDOttIiruleeVr6f56OrMc+MydEnTcXwtkxNVs= 98 | github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= 99 | github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= 100 | github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= 101 | github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= 102 | github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= 103 | github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= 104 | github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= 105 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= 106 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= 107 | github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= 108 | github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= 109 | github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= 110 | github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= 111 | github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= 112 | github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= 113 | github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= 114 | github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= 115 | github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= 116 | github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= 117 | github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= 118 | github.com/mithrandie/csvq v1.18.1 h1:f7NB2scbb7xx2ffPduJ2VtZ85RpWXfvanYskAkGlCBU= 119 | github.com/mithrandie/csvq v1.18.1/go.mod h1:MRJj7AtcXfk7jhNGxLuJGP3LORmh4lpiPWxQ7VyCRn8= 120 | github.com/mithrandie/csvq-driver v1.7.0 h1:ejiavXNWwTPMyr3fJFnhcqd1L1cYudA0foQy9cZrqhw= 121 | github.com/mithrandie/csvq-driver v1.7.0/go.mod h1:HcN3xL9UCJnBYA/AIQOOB/KlyfXAiYr5yxDmiwrGk5o= 122 | github.com/mithrandie/go-file/v2 v2.1.0 h1:XA5Tl+73GXMDvgwSE3Sg0uC5FkLr3hnXs8SpUas0hyg= 123 | github.com/mithrandie/go-file/v2 v2.1.0/go.mod h1:9YtTF3Xo59GqC1Pxw6KyGVcM/qubAMlxVsqI/u9r++c= 124 | github.com/mithrandie/go-text v1.6.0 h1:8gOXTMPbMY8DJbKMTv8kHhADcJlDWXqS/YQH4SyWO6s= 125 | github.com/mithrandie/go-text v1.6.0/go.mod h1:xCgj1xiNbI/d4xA9sLVvXkjh5B2tNx2ZT2/3rpmh8to= 126 | github.com/mithrandie/ternary v1.1.1 h1:k/joD6UGVYxHixYmSR8EGgDFNONBMqyD373xT4QRdC4= 127 | github.com/mithrandie/ternary v1.1.1/go.mod h1:0D9Ba3+09K2TdSZO7/bFCC0GjSXetCvYuYq0u8FY/1g= 128 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 129 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= 130 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 131 | github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= 132 | github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= 133 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 134 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 135 | github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= 136 | github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= 137 | github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= 138 | github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= 139 | github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= 140 | github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= 141 | github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= 142 | github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= 143 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 144 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= 145 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 146 | github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= 147 | github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= 148 | github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= 149 | github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= 150 | github.com/prometheus/common v0.67.2 h1:PcBAckGFTIHt2+L3I33uNRTlKTplNzFctXcWhPyAEN8= 151 | github.com/prometheus/common v0.67.2/go.mod h1:63W3KZb1JOKgcjlIr64WW/LvFGAqKPj0atm+knVGEko= 152 | github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= 153 | github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= 154 | github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= 155 | github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= 156 | github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= 157 | github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= 158 | github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= 159 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 160 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 161 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 162 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 163 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 164 | github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= 165 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 166 | github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= 167 | github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= 168 | github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= 169 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 170 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 171 | github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= 172 | github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= 173 | github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= 174 | github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= 175 | go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= 176 | go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= 177 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= 178 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= 179 | go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0 h1:2pn7OzMewmYRiNtv1doZnLo3gONcnMHlFnmOR8Vgt+8= 180 | go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.63.0/go.mod h1:rjbQTDEPQymPE0YnRQp9/NuPwwtL0sesz/fnqRW/v84= 181 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= 182 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= 183 | go.opentelemetry.io/contrib/propagators/jaeger v1.38.0 h1:nXGeLvT1QtCAhkASkP/ksjkTKZALIaQBIW+JSIw1KIc= 184 | go.opentelemetry.io/contrib/propagators/jaeger v1.38.0/go.mod h1:oMvOXk78ZR3KEuPMBgp/ThAMDy9ku/eyUVztr+3G6Wo= 185 | go.opentelemetry.io/contrib/samplers/jaegerremote v0.32.0 h1:oPW/SRFyHgIgxrvNhSBzqvZER2N5kRlci3/rGTOuyWo= 186 | go.opentelemetry.io/contrib/samplers/jaegerremote v0.32.0/go.mod h1:B9Oka5QVD0bnmZNO6gBbBta6nohD/1Z+f9waH2oXyBs= 187 | go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= 188 | go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= 189 | go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= 190 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= 191 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= 192 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54= 193 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk= 194 | go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= 195 | go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= 196 | go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= 197 | go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= 198 | go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= 199 | go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= 200 | go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= 201 | go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= 202 | go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= 203 | go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= 204 | go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= 205 | go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= 206 | go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= 207 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 208 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 209 | go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= 210 | go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= 211 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 212 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 213 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 214 | golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= 215 | golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= 216 | golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 h1:TQwNpfvNkxAVlItJf6Cr5JTsVZoC/Sj7K3OZv2Pc14A= 217 | golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk= 218 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 219 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 220 | golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= 221 | golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= 222 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 223 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 224 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 225 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 226 | golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= 227 | golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= 228 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 229 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 230 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 231 | golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= 232 | golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= 233 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 234 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 235 | golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 236 | golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 237 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 238 | golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 239 | golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 240 | golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 241 | golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 242 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 243 | golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 244 | golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= 245 | golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= 246 | golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8 h1:LvzTn0GQhWuvKH/kVRS3R3bVAsdQWI7hvfLHGgh9+lU= 247 | golang.org/x/telemetry v0.0.0-20251008203120-078029d740a8/go.mod h1:Pi4ztBfryZoJEkyFTI5/Ocsu2jXyDr6iSdgJiYE/uwE= 248 | golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= 249 | golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= 250 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 251 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 252 | golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= 253 | golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= 254 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 255 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 256 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 257 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 258 | golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= 259 | golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= 260 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 261 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 262 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 263 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 264 | golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= 265 | golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= 266 | gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= 267 | gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= 268 | google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= 269 | google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= 270 | google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 h1:CirRxTOwnRWVLKzDNrs0CXAaVozJoR4G9xvdRecrdpk= 271 | google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ= 272 | google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= 273 | google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= 274 | google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= 275 | google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= 276 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 277 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 278 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 279 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 280 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 281 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 282 | --------------------------------------------------------------------------------